code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.wixpress.petri.laboratory
import org.joda.time.DateTime
/**
* @author talyag
* @since 9/20/14
*/
object EligibilityCriteriaTypes {
class UserCreationDateCriterion(creationDate: DateTime) extends EligibilityCriterion[DateTime] {
override def getValue: DateTime = creationDate
}
class LanguageCriterion(language: String) extends EligibilityCriterion[String] {
override def getValue: String = language
}
class CountryCriterion(geo: String) extends EligibilityCriterion[String] {
override def getValue: String = geo
}
class CompanyEmployeeCriterion(isCompanyEmployee: java.lang.Boolean) extends EligibilityCriterion[java.lang.Boolean] {
override def getValue: java.lang.Boolean = isCompanyEmployee
}
class CustomContextCriterion(customContextMap: java.util.Map[String,String]) extends EligibilityCriterion[java.util.Map[String,String]] {
override def getValue: java.util.Map[String,String] = customContextMap
}
}
| wix/petri | wix-petri-core/src/main/java/com/wixpress/petri/laboratory/EligibilityCriteriaTypes.scala | Scala | bsd-3-clause | 972 |
import org.jsoup.Jsoup
/**
* Created by filippo on 02/05/16.
*/
class cTParser( city : String) {
val site = s"http://www.grandecinema3.it/cinemalistresult.aspx?provincia=$city"
val doc = Jsoup.connect(site).get
val cineList = doc.select(".cinemalist-scheda-cinema").listIterator
while(cineList.hasNext){
val c = cineList.next()
val nome = c.getElementsByClass("descrizione").first()
val indirizzo = c.getElementsByTag("span").first()
println(Console.BOLD + Console.GREEN + nome.text() + Console.RESET +" -+- "+Console.BOLD +Console.GREEN + indirizzo.text() + Console.RESET)
val program = c.getElementsByTag("table").first()
if(program == null){
println(Console.CYAN +" =+= "+ "Nessun film in programmazione qui.")
}else{
val films = program.getElementsByClass("box-film").listIterator
while(films.hasNext){
val film = films.next()
val scheda = film.getElementsByClass("scheda").first
val titolo = scheda.getElementsByClass("titolofilm").first().text()
val trama = scheda.getElementsByClass("titolofilm").first().getElementsByTag("a").first.attr("href")
val root = "http://www.grandecinema3.it"
val genere = scheda.getElementsByClass("label").first.nextSibling.toString.replace(" ","")
val orari = scheda.getElementsByClass("orario").first.text
println(Console.CYAN +" =+= "+ titolo +" - " + root+trama )
println(Console.WHITE + " "+ genere + " - "+ orari)
}
}
println("\\n")
}
println(Console.RESET)
}
| filirnd/cineTre | src/main/scala/cTParser.scala | Scala | gpl-3.0 | 1,602 |
package info.folone.scala.poi
import scalaz._
import std.map._
import std.list._
import syntax.monoid._
class Row(val index: Int)(val cells: Set[Cell]) {
def styles(sheet: String): Map[CellStyle, List[CellAddr]] =
cells.foldRight(Map[CellStyle, List[CellAddr]]()) { case (cell, map) =>
map |+| cell.styles(sheet, index)
}
override def toString: String = Show[Row].shows(this)
override def equals(obj: Any): Boolean =
obj != null && obj.isInstanceOf[Row] && Equal[Row].equal(obj.asInstanceOf[Row], this)
override def hashCode: Int = index.hashCode + cells.hashCode
}
object Row {
def apply(index: Int)(cells: Set[Cell]): Row = new Row(index)(cells)
def unapply(row: Row): Some[(Int, Set[Cell])] = Some((row.index, row.cells))
}
| folone/poi.scala | src/main/scala/info.folone/scala.poi/Row.scala | Scala | apache-2.0 | 760 |
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* https://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scalaz._
import uk.gov.nationalarchives.csv.validator.schema.Schema
import uk.gov.nationalarchives.csv.validator.api.{TextFile, CsvValidator}
import scalax.file.Path
import java.io.StringReader
import java.io
@RunWith(classOf[JUnitRunner])
class MetaDataValidatorAcceptanceSpec extends Specification with TestResources {
val base = acceptancePath
val v = new CsvValidator with AllErrorsMetaDataValidator {
val pathSubstitutions = List[(String,String)]()
val enforceCaseSensitivePathChecks = false
val trace = false
def validateR(csv: io.Reader, schema: Schema): this.type#MetaDataValidation[Any] = validate(csv, schema, None)
}
val ve = new CsvValidator with AllErrorsMetaDataValidator {
val pathSubstitutions = List[(String,String)]()
val enforceCaseSensitivePathChecks = true
val trace = false
}
import v.{validate, validateR, parseSchema}
import ve.{validate => validateE, parseSchema => parseSchemaE}
def parse(filePath: String): Schema = parseSchema(TextFile(Path.fromString(filePath))) fold (f => throw new IllegalArgumentException(f.toString()), s => s)
def parseE(filePath: String): Schema = parseSchemaE(TextFile(Path.fromString(filePath))) fold (f => throw new IllegalArgumentException(f.toString()), s => s)
def parse(reader: io.Reader): Schema = parseSchema(reader) fold (f => throw new IllegalArgumentException(f.toString()), s => s)
def parseE(reader: io.Reader): Schema = parseSchemaE(reader) fold (f => throw new IllegalArgumentException(f.toString()), s => s)
"@separator global directive" should {
"succeed for '$' separator" in {
validate(TextFile(Path.fromString(base) / "separated1.dsv"), parse(base + "/separated1.csvs"), None).isSuccess mustEqual true
}
"succeed for TAB separator" in {
validate(TextFile(Path.fromString(base) / "separated2.tsv"), parse(base + "/separated2.csvs"), None).isSuccess mustEqual true
}
"succeed for '\\t' separator" in {
validate(TextFile(Path.fromString(base) / "separated2.tsv"), parse(base + "/separated2-1.csvs"), None).isSuccess mustEqual true
}
"with @quoted global directive" should {
"succeed for '$' separator" in {
validate(TextFile(Path.fromString(base) / "separated3.dsv"), parse(base + "/separated3.csvs"), None).isSuccess mustEqual true
}
"succeed for TAB separator" in {
validate(TextFile(Path.fromString(base) / "separated4.tsv"), parse(base + "/separated4.csvs"), None).isSuccess mustEqual true
}
"succeed for '\\t' separator" in {
validate(TextFile(Path.fromString(base) / "separated4.tsv"), parse(base + "/separated4-1.csvs"), None).isSuccess mustEqual true
}
}
}
"Regex rule" should {
"succeed for metadata file with column that passes regex rule" in {
validate(TextFile(Path.fromString(base) / "regexRulePassMetaData.csv"), parse(base + "/regexRuleSchema.csvs"), None).isSuccess mustEqual true
}
"fail when @noHeader not set" in {
validate(TextFile(Path.fromString(base) / "regexRuleFailMetaData.csv"), parse(base + "/regexRuleSchemaWithoutNoHeaderSet.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, "regex(\\"[0-9]+\\") fails for line: 1, column: Age, value: \\"twenty\\"",Some(1),Some(1))
)
}
}
}
"Not empty rule" should {
"succeed for metadata file with column that passes regex rule" in {
validate(TextFile(Path.fromString(base) / "notempty.csv"), parse(base + "/notempty.csvs"), None).isSuccess mustEqual true
}
}
"Multiple errors " should {
"all be reported" in {
validate(TextFile(Path.fromString(base) / "multipleErrorsMetaData.csv"), parse(base + "/regexRuleSchemaWithNoHeaderSet.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """regex("[0-9]+") fails for line: 1, column: Age, value: "twenty"""",Some(1),Some(1)),
FailMessage(ValidationError, """regex("[0-9]+") fails for line: 2, column: Age, value: "thirty"""",Some(2),Some(1)))
}
}
}
"Combining two rules" should {
"succeed when metadata valid" in {
validate(TextFile(Path.fromString(base) / "twoRulesPassMetaData.csv"), parse(base + "/twoRuleSchema.csvs"), None).isSuccess mustEqual true
}
"fail when rules fail for all permutations" in {
validate(TextFile(Path.fromString(base) / "twoRulesFailMetaData.csv"), parse(base + "/twoRuleSchemaFail.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """in($FullName) fails for line: 1, column: Name, value: "Ben"""",Some(1),Some(0)),
FailMessage(ValidationError, """regex("[a-z]+") fails for line: 1, column: Name, value: "Ben"""",Some(1),Some(0)),
FailMessage(ValidationError, """in($FullName) fails for line: 2, column: Name, value: "Dave"""",Some(2),Some(0)),
FailMessage(ValidationError, """regex("[a-z]+") fails for line: 2, column: Name, value: "Dave"""",Some(2),Some(0)))
}
}
}
"An if rule" should {
"succeed if the conditionExpr and thenExpr are respected" in {
validate(TextFile(Path.fromString(base) / "ifRulePassMetaData.csv"), parse(base + "/ifRuleSchema.csvs"), None).isSuccess mustEqual true
}
"succeed if the condition and thenExpr or elseExpr are respected" in {
validate(TextFile(Path.fromString(base) / "ifRulePassMetaData.csv"), parse(base + "/ifElseRuleSchema.csvs"), None).isSuccess mustEqual true
}
"fail if the conditionExpr is true but the thenExpr is false" in {
validate(TextFile(Path.fromString(base) / "ifRuleFailThenMetaData.csv"), parse(base + "/ifElseRuleSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """is("hello world") fails for line: 1, column: SomeIfRule, value: "hello world1"""",Some(1),Some(1))
)
}
}
"fail if the conditionExpr is fasle but the elseExpr is false" in {
validate(TextFile(Path.fromString(base) / "ifRuleFailElseMetaData.csv"), parse(base + "/ifElseRuleSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """upperCase fails for line: 3, column: SomeIfRule, value: "EFQWeGW"""",Some(3),Some(1))
)
}
}
}
"An switch rule" should {
"succeed if the conditionExpr and thenExpr are respected - 1" in {
validate(TextFile(Path.fromString(base) / "switch1RulePassMetaData.csv"), parse(base + "/switch1RuleSchema.csvs"), None).isSuccess mustEqual true
}
"succeed if the conditionExpr and thenExpr are respected - 2" in {
validate(TextFile(Path.fromString(base) / "switch2RulePassMetaData.csv"), parse(base + "/switch2RuleSchema.csvs"), None).isSuccess mustEqual true
}
"fail if the conditionExpr is true but the thenExpr is false - 1" in {
validate(TextFile(Path.fromString(base) / "switch1RuleFailMetaData.csv"), parse(base + "/switch1RuleSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """is("hello world") fails for line: 1, column: SomeSwitchRule, value: "hello world1"""",Some(1),Some(1))
)
}
}
"fail if the conditionExpr is true but the thenExpr is false - 2" in {
validate(TextFile(Path.fromString(base) / "switch2RuleFailMetaData.csv"), parse(base + "/switch2RuleSchema.csvs"), None) must beLike {
case Failure(errors) =>errors.list mustEqual IList(
FailMessage(ValidationError, """is("hello world") fails for line: 1, column: SomeSwitchRule, value: "hello world1"""",Some(1),Some(1)),
FailMessage(ValidationError, """is("HELLO WORLD") fails for line: 2, column: SomeSwitchRule, value: "HELLO WORLD1"""",Some(2),Some(1))
)
}
}
}
"An in rule" should {
"succeed if the column value is in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "inRulePassMetaData.csv"), parse(base + "/inRuleSchema.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is not in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "inRuleFailMetaData.csv"), parse(base + "/inRuleSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """in("thevaluemustbeinthisstring") fails for line: 1, column: SomeInRule, value: "valuenotinrule"""",Some(1),Some(1)),
FailMessage(ValidationError, """in("thevaluemustbeinthisstring") fails for line: 3, column: SomeInRule, value: "thisonewillfailtoo"""",Some(3),Some(1)))
}
}
"succeed if the column value is in the rule's cross referenced column" in {
validate(TextFile(Path.fromString(base) / "inRuleCrossReferencePassMetaData.csv"), parse(base + "/inRuleCrossReferenceSchema.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is not in the rule's cross referenced column" in {
validate(TextFile(Path.fromString(base) / "inRuleCrossReferenceFailMetaData.csv"), parse(base + "/inRuleCrossReferenceSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """in($FullName) fails for line: 2, column: FirstName, value: "Dave"""",Some(2),Some(0)))
}
}
}
"An any rule" should {
"succeed if the column value is in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "anyRulePassMetaData.csv"), parse(base + "/anyRuleSchema.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is not in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "anyRuleFailMetaData.csv"), parse(base + "/anyRuleSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """any("value1", "value2", "value3") fails for line: 4, column: SomeAnyRule, value: "value4"""",Some(4),Some(1))
)
}
}
}
"A xsdDateTime rule" should {
"succeed if the column value is in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimePass.csv"), parse(base + "/xsdDateTime.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is not in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeFail.csv"), parse(base + "/xsdDateTime.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """xDateTime fails for line: 2, column: date, value: "2013-03-22"""",Some(2),Some(0))
)
}
}
}
"A xsdDateTimeRange rule" should {
"succeed if the column value is in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeRangePass.csv"), parse(base + "/xsdDateTimeRange.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is not in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeRangeFail.csv"), parse(base + "/xsdDateTimeRange.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """xDateTime("2012-01-01T01:00:00, 2013-01-01T01:00:00") fails for line: 2, column: date, value: "2014-01-01T01:00:00"""",Some(2),Some(0))
)
}
}
}
//FIXME
"A xsdDateTimeWithTimeZone rule" should {
"succeed if the column value is in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeTzPass.csv"), parse(base + "/xsdDateTimeTz.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is invalid" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeTzFail.csv"), parse(base + "/xsdDateTimeTz.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """xDateTimeWithTimeZone fails for line: 4, column: date, value: "2012-01-01T00:00:00"""",Some(4),Some(0))
)
}
}
}
"A xsdDateTimeWithTimeZoneRange rule" should {
"succeed if the column value is in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeTzRangePass.csv"), parse(base + "/xsdDateTimeTzRange.csvs"), None).isSuccess mustEqual true
}
"fail if the column value is not in the rule's literal string" in {
validate(TextFile(Path.fromString(base) / "xsdDateTimeTzRangeFail.csv"), parse(base + "/xsdDateTimeTzRange.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """xDateTimeWithTimeZone("2012-01-01T01:00:00+00:00, 2013-01-01T01:00:00+00:00") fails for line: 2, column: date, value: "2014-01-01T01:00:00+00:00"""",Some(2),Some(0))
)
}
}
}
"An @optional column directive" should {
"allow a column to have an empty value and ignore other rules" in {
validate(TextFile(Path.fromString(base) / "optionalPassMetaData.csv"), parse(base + "/optionalSchema.csvs"), None).isSuccess mustEqual true
}
"fail if a non empty value fails a rule" in {
validate(TextFile(Path.fromString(base) / "optionalFailMetaData.csv"), parse(base + "/optionalSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, "in($FullName) fails for line: 1, column: Name, value: \\"BP\\"",Some(1),Some(0)))
}
}
}
"An @ignoreCase column directive" should {
"pass a rule ignoring case" in {
validate(TextFile(Path.fromString(base) / "ignoreCasePassMetaData.csv"), parse(base + "/ignoreCaseSchema.csvs"), None).isSuccess mustEqual true
}
}
"A fileExists rule" should {
val schemaPath = Path.fromString(base) / "fileExistsSchema.csvs"
val schemaTemplate = schemaPath.lines(includeTerminator = true).mkString
val schema = schemaTemplate.replace("$$acceptancePath$$", base)
"ensure the file exists on the file system" in {
validate(TextFile(Path.fromString(base) / "fileExistsPassMetaData.csv"), parse(new StringReader(schema)), None).isSuccess mustEqual true
}
"ensure the file exists on the file system" in {
val csvPath = Path.fromString(base) / "fileExistsCrossRefPassMetaData.csv"
val csvTemplate = csvPath.lines(includeTerminator = true).mkString
val csv = csvTemplate.replace("$$acceptancePath$$", base)
validateR(new StringReader(csv), parse(base + "/fileExistsCrossRefSchema.csvs")).isSuccess mustEqual true
}
"fail if the file does not exist on the file system" in {
validate(TextFile(Path.fromString(base) / "fileExistsPassMetaData.csv"), parse(base + "/fileExistsSchemaWithBadBasePath.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """fileExists("src/test/resources/uk/gov/nationalarchives") fails for line: 1, column: PasswordFile, value: "benPass.csvs"""",Some(1),Some(2)),
FailMessage(ValidationError, """fileExists("src/test/resources/uk/gov/nationalarchives") fails for line: 2, column: PasswordFile, value: "andyPass.csvs"""",Some(2),Some(2)))
}
}
"enforce case-sensitive comparisons when case-sensitive comparisons are set" in {
val csfSchemaPath = Path.fromString(base) / "caseSensitiveFiles.csvs"
val csfSchemaTemplate = csfSchemaPath.lines(includeTerminator = true).mkString
val csfSchema = csfSchemaTemplate.replace("$$acceptancePath$$", base)
validateE(TextFile(Path.fromString(base) / "caseSensitiveFiles.csv"), parseE(new StringReader(csfSchema)), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """fileExists("$$acceptance$$") fails for line: 2, column: filename, value: "casesensitivefiles.csv"""".replace("$$acceptance$$", base),Some(2),Some(0)),
FailMessage(ValidationError, """fileExists("$$acceptance$$") fails for line: 3, column: filename, value: "CASESENSITIVEFILES.csv"""".replace("$$acceptance$$", base),Some(3),Some(0))
)
}
}
}
"A range rule" should {
"enforce all element in a call on to be in a range of value" in {
validate(TextFile(Path.fromString(base) / "rangeRulePassMetaData.csv"), parse(base + "/rangeRuleSchema.csvs"), None).isSuccess mustEqual true
}
"enforce all element in a call on to be in a range of value" in {
validate(TextFile(Path.fromString(base) / "rangeRuleFailMetaData.csv"), parse(base + "/rangeRuleSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """range(1910,*) fails for line: 2, column: Year_of_birth, value: "1909"""",Some(2),Some(1)))
}
}
"fail with no limit set" in {
parseSchema(TextFile(Path.fromString(base) / "rangeRuleFailSchema.csvs")) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(SchemaDefinitionError, """Column: Year_of_birth: Invalid range in 'range(*,*)' at least one value needs to be defined""",None,None))
}
}
"fail with inconsistent limit" in {
parseSchema(TextFile(Path.fromString(base) / "rangeRuleInvalidSchema.csvs")) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(SchemaDefinitionError, """Column: Year_of_birth: Invalid range, minimum greater than maximum in: 'range(100,1)' at line: 4, column: 16""",None,None))
}
}
}
"A checksum rule" should {
"enforce case-sensitive comparisons when case-sensitive comparisons are set" in {
val csfSchemaPath = Path.fromString(base) / "caseSensitiveFilesChecksum.csvs"
val csfSchemaTemplate = csfSchemaPath.lines(includeTerminator = true).mkString
val csfSchema = csfSchemaTemplate.replace("$$acceptancePath$$", base)
validateE(TextFile(Path.fromString(base) / "caseSensitiveFilesChecksum.csv"), parseE(new StringReader(csfSchema)), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """checksum(file("$$acceptance$$", $filename), "MD5") file "$$acceptance$$$$file-sep$$casesensitivefileschecksum.csvs" not found for line: 2, column: checksum, value: "41424313f6052b7f062358ed38640b6e"""".replace("$$acceptance$$", base).replace("$$file-sep$$", FILE_SEPARATOR.toString),Some(2),Some(1)),
FailMessage(ValidationError, """checksum(file("$$acceptance$$", $filename), "MD5") file "$$acceptance$$$$file-sep$$CASESENSITIVEFILESCHECKSUM.csvs" not found for line: 3, column: checksum, value: "41424313f6052b7f062358ed38640b6e"""".replace("$$acceptance$$", base).replace("$$file-sep$$", FILE_SEPARATOR.toString),Some(3),Some(1))
)
}
}
}
"A identical rule" should {
"enforce all rows of the same column to be identical between themselves " in {
validate(TextFile(Path.fromString(base) / "identicalPassMetaData.csv"), parse(base + "/identicalSchema.csvs"), None).isSuccess mustEqual true
}
"enforce all rows of the same column to be identical between themselves with header" in {
validate(TextFile(Path.fromString(base) / "identicalHeaderMetaData.csv"), parse(base + "/identicalHeaderSchema.csvs"), None).isSuccess mustEqual true
}
"fail for different rows in the same column " in {
validateE(TextFile(Path.fromString(base) / "identicalFailMetaData.csv"), parseE(base + "/identicalSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """identical fails for line: 3, column: FullName, value: "fff"""",Some(3),Some(1))
)
}
}
"fail for empty value " in {
validate(TextFile(Path.fromString(base) / "identicalEmptyMetaData.csv"), parse(base + "/identicalSchema.csvs"), None).isFailure mustEqual true
}
}
"Validate fail fast" should {
val app = new CsvValidator with FailFastMetaDataValidator { val pathSubstitutions = List[(String,String)](); val enforceCaseSensitivePathChecks = false; val trace = false }
"only report first error for invalid @TotalColumns" in {
app.validate(TextFile(Path.fromString(base) / "totalColumnsFailMetaData.csv"), parse(base + "/totalColumnsSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, "Expected @totalColumns of 1 and found 2 on line 2",Some(2),Some(2)))
}
}
"only report first rule fail for multiple rules on a column" in {
app.validate(TextFile(Path.fromString(base) / "rulesFailMetaData.csv"), parse(base + "/rulesSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """regex("[A-Z][a-z]+") fails for line: 2, column: Name, value: "ben"""",Some(2),Some(0)))
}
}
"succeed for multiple rules with valid metadata" in {
app.validate(TextFile(Path.fromString(base) / "twoRulesPassMetaData.csv"), parse(base + "/twoRuleSchema.csvs"), None).isSuccess mustEqual true
}
"report warnings" in {
app.validate(TextFile(Path.fromString(base) / "warnings.csv"), parse(base + "/warnings.csvs"), None) must beLike {
case Failure(warnings) => warnings.list mustEqual IList(
FailMessage(ValidationWarning, """is("WO") fails for line: 2, column: department, value: "BT"""", Some(2), Some(0)),
FailMessage(ValidationWarning, """is("WO") fails for line: 3, column: department, value: "ED"""", Some(3), Some(0))
)
}
}
"report warnings and only first error" in {
app.validate(TextFile(Path.fromString(base) / "warningsAndErrors.csv"), parse(base + "/warnings.csvs"), None) must beLike {
case Failure(warningsAndError) => warningsAndError.list mustEqual IList(
FailMessage(ValidationWarning, """is("WO") fails for line: 2, column: department, value: "BT"""", Some(2), Some(0)),
FailMessage(ValidationWarning, """is("WO") fails for line: 3, column: department, value: "ED"""", Some(3), Some(0)),
FailMessage(ValidationError, """is("13") fails for line: 4, column: division, value: "15"""", Some(4), Some(1))
)
}
}
}
"validate schema" should {
"fail with duplicate column ids" in {
parseSchema(TextFile(Path.fromString(base) / "duplicateColumnIdsFailSchema.csvs")) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(SchemaDefinitionError, """Column: Age has duplicates on lines 3, 8
|Column: Country has duplicates on lines 4, 5, 7""".stripMargin))
}
}
"fail with unique column ids" in {
validate(TextFile(Path.fromString(base) / "duplicateColumnIdsMetaData.csv"), parse(base + "/duplicateColumnIdsPassSchema.csvs"), None).isFailure mustEqual true
}
}
"An 'or' rule" should {
"succeed if either the lhs or rhs succeeds" in {
validate(TextFile(Path.fromString(base) / "orWithTwoRulesPassMetaData.csv"), parse(base + "/orWithTwoRulesSchema.csvs"), None) must beLike {
case Success(_) => ok
}
}
"fail if both the lhs or rhs are fail" in {
validate(TextFile(Path.fromString(base) / "orWithTwoRulesFailMetaData.csv"), parse(base + "/orWithTwoRulesSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """regex("[A-Z][a-z]+") or regex("[0-9]+") fails for line: 4, column: CountryOrCountryCode, value: "Andromeda9"""",Some(4),Some(1)))
}
}
"succeed for 2 'or' rules with an 'and' rule" in {
validate(TextFile(Path.fromString(base) / "orWithFourRulesPassMetaData.csv"), parse(base + "/orWithFourRulesSchema.csvs"), None).isSuccess mustEqual true
}
"fail if 'or' rules pass and 'and' rule fails" in {
validate(TextFile(Path.fromString(base) / "orWithFourRulesFailMetaData.csv"), parse(base + "/orWithFourRulesSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """regex("[A-Z].+") fails for line: 2, column: Country, value: "ngland"""",Some(2),Some(1)))
}
}
}
"No arg standard rules" should {
"succeed if all the rules are valid" in {
validate(TextFile(Path.fromString(base) / "standardRulesPassMetaData.csv"), parse(base + "/standardRulesSchema.csvs"), None).isSuccess mustEqual true
}
"fail if all the rules are not" in {
validate(TextFile(Path.fromString(base) / "standardRulesFailMetaData.csv"), parse(base + "/standardRulesSchema.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(
FailMessage(ValidationError, """uri fails for line: 1, column: uri, value: "http:##datagov.nationalarchives.gov.uk#66#WO#409#9999#0#aaaaaaaa-aaaa-4aaa-9eee-0123456789ab"""", Some(1), Some(0)),
FailMessage(ValidationError, """xDateTime fails for line: 1, column: xDateTime, value: "2002-999-30T09:00:10"""", Some(1), Some(1)),
FailMessage(ValidationError, """xDate fails for line: 1, column: xDate, value: "02-99-30"""", Some(1), Some(2)),
FailMessage(ValidationError, """ukDate fails for line: 1, column: ukDate, value: "99/00/0009"""", Some(1), Some(3)),
FailMessage(ValidationError, """xTime fails for line: 1, column: xTime, value: "99:00:889"""", Some(1), Some(4)),
FailMessage(ValidationError, """uuid4 fails for line: 1, column: uuid4, value: "aaaaaaaab-aaaab-4aaa-9eee-0123456789ab"""", Some(1), Some(5)),
FailMessage(ValidationError, """positiveInteger fails for line: 1, column: positiveInteger, value: "12-0912459"""", Some(1), Some(6))
)
}
}
}
"Schema 1.1 should be backward compatible" in {
parseSchema(TextFile(Path.fromString(base + "/rule1_0.csvs"))).isSuccess mustEqual true
}
"No ext string provider" should {
"should remove filename extension" in {
validate(TextFile(Path.fromString(base) / "noextPass.csv"), parse(base + "/noext.csvs"), None).isSuccess mustEqual true
}
"fail for incorrect extension removal" in {
validate(TextFile(Path.fromString(base) / "noextFail.csv"), parse(base + "/noext.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """is(noext($identifier)) fails for line: 3, column: noext, value: "file:/a/b/c.txt"""",Some(3),Some(1)))
}
}
}
"Url decode string provider" should {
"decode url string to normal string" in {
validate(TextFile(Path.fromString(base) / "uriDecodePass.csv"), parse(base + "/uriDecode.csvs"), None).isSuccess mustEqual true
}
"fail for wrong url" in {
validate(TextFile(Path.fromString(base) / "uriDecodeFail.csv"), parse(base + "/uriDecode.csvs"), None).isFailure mustEqual true
}
"decode URL with optional charset parameter" in {
validate(TextFile(Path.fromString(base) / "uriDecodeWithCharsetPass.csv"), parse(base + "/uriDecodeWithCharset.csvs"), None).isSuccess mustEqual true
}
}
"Concat string provider" should {
"should concatenate string provider" in {
val x = validate(TextFile(Path.fromString(base) / "concatPass.csv"), parse(base + "/concat.csvs"), None)
x.isSuccess mustEqual true
}
"fail for incorrect concatenation" in {
validate(TextFile(Path.fromString(base) / "concatFail.csv"), parse(base + "/concat.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """is(concat($c1, $c2)) fails for line: 3, column: c3, value: "ccccc"""",Some(3),Some(2)))
}
}
"should concatenate string provider (various arguments)" in {
validate(TextFile(Path.fromString(base) / "concat4Pass.csv"), parse(base + "/concat4.csvs"), None).isSuccess mustEqual true
}
"fail for incorrect concatenation (various arguments)" in {
validate(TextFile(Path.fromString(base) / "concat4Fail.csv"), parse(base + "/concat4.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """is(concat($c1, $c2, "hello", $c4)) fails for line: 2, column: c5, value: "aabbccdd"""",Some(2),Some(4)))
}
}
}
"Redacted schema" should {
"should remove filename extension" in {
validate(TextFile(Path.fromString(base) / "redactedPass.csv"), parse(base + "/redacted.csvs"), None).isSuccess mustEqual true
}
"fail for incorrect concatenation" in {
validate(TextFile(Path.fromString(base) / "redactedFail.csv"), parse(base + "/redacted.csvs"), None) must beLike {
case Failure(errors) => errors.list mustEqual IList(FailMessage(ValidationError, """is(concat(noext($original_identifier), "_R.pdf")) fails for line: 2, column: identifier, value: "file:/some/folder/TNA%20Digital%20Preservation%20Strategy%20v0.3%5BA1031178%5D_R1.pdf"""",Some(2),Some(0)))
}
}
}
"Reader" should {
"successfully parse byte order marks" in {
validate(TextFile(Path.fromString(base) / "bom.csv"), parse(base + "/bom.csvs"), None).isSuccess mustEqual true
}
}
}
| adamretter/csv-validator | csv-validator-core/src/test/scala/uk/gov/nationalarchives/csv/validator/MetaDataValidatorAcceptanceSpec.scala | Scala | mpl-2.0 | 30,111 |
package byteme
case class ~[+A, +B](_1:A, _2:B){
def ~[C](c:C) = new ~(this, c)
} | teigen/byteme | src/main/scala/byteme/misc.scala | Scala | apache-2.0 | 84 |
package scala.tasty
package reflect
/** TASTy Reflect tree accumulator.
*
* Usage:
* ```
* class MyTreeAccumulator[R <: scala.tasty.Reflection & Singleton](val reflect: R)
* extends scala.tasty.reflect.TreeAccumulator[X] {
* import reflect.{given _, _}
* def foldTree(x: X, tree: Tree)(using ctx: Context): X = ...
* }
* ```
*/
trait TreeAccumulator[X] {
val reflect: Reflection
import reflect.{given _, _}
// Ties the knot of the traversal: call `foldOver(x, tree))` to dive in the `tree` node.
def foldTree(x: X, tree: Tree)(using ctx: Context): X
def foldTrees(x: X, trees: Iterable[Tree])(using ctx: Context): X = trees.foldLeft(x)(foldTree)
def foldOverTree(x: X, tree: Tree)(using ctx: Context): X = {
def localCtx(definition: Definition): Context = definition.symbol.localContext
tree match {
case Ident(_) =>
x
case Select(qualifier, _) =>
foldTree(x, qualifier)
case This(qual) =>
x
case Super(qual, _) =>
foldTree(x, qual)
case Apply(fun, args) =>
foldTrees(foldTree(x, fun), args)
case TypeApply(fun, args) =>
foldTrees(foldTree(x, fun), args)
case Literal(const) =>
x
case New(tpt) =>
foldTree(x, tpt)
case Typed(expr, tpt) =>
foldTree(foldTree(x, expr), tpt)
case NamedArg(_, arg) =>
foldTree(x, arg)
case Assign(lhs, rhs) =>
foldTree(foldTree(x, lhs), rhs)
case Block(stats, expr) =>
foldTree(foldTrees(x, stats), expr)
case If(cond, thenp, elsep) =>
foldTree(foldTree(foldTree(x, cond), thenp), elsep)
case While(cond, body) =>
foldTree(foldTree(x, cond), body)
case Closure(meth, tpt) =>
foldTree(x, meth)
case Match(selector, cases) =>
foldTrees(foldTree(x, selector), cases)
case Return(expr) =>
foldTree(x, expr)
case Try(block, handler, finalizer) =>
foldTrees(foldTrees(foldTree(x, block), handler), finalizer)
case Repeated(elems, elemtpt) =>
foldTrees(foldTree(x, elemtpt), elems)
case Inlined(call, bindings, expansion) =>
foldTree(foldTrees(x, bindings), expansion)
case vdef @ ValDef(_, tpt, rhs) =>
val ctx = localCtx(vdef)
given Context = ctx
foldTrees(foldTree(x, tpt), rhs)
case ddef @ DefDef(_, tparams, vparamss, tpt, rhs) =>
val ctx = localCtx(ddef)
given Context = ctx
foldTrees(foldTree(vparamss.foldLeft(foldTrees(x, tparams))(foldTrees), tpt), rhs)
case tdef @ TypeDef(_, rhs) =>
val ctx = localCtx(tdef)
given Context = ctx
foldTree(x, rhs)
case cdef @ ClassDef(_, constr, parents, derived, self, body) =>
val ctx = localCtx(cdef)
given Context = ctx
foldTrees(foldTrees(foldTrees(foldTrees(foldTree(x, constr), parents), derived), self), body)
case Import(expr, _) =>
foldTree(x, expr)
case clause @ PackageClause(pid, stats) =>
foldTrees(foldTree(x, pid), stats)(using clause.symbol.localContext)
case Inferred() => x
case TypeIdent(_) => x
case TypeSelect(qualifier, _) => foldTree(x, qualifier)
case Projection(qualifier, _) => foldTree(x, qualifier)
case Singleton(ref) => foldTree(x, ref)
case Refined(tpt, refinements) => foldTrees(foldTree(x, tpt), refinements)
case Applied(tpt, args) => foldTrees(foldTree(x, tpt), args)
case ByName(result) => foldTree(x, result)
case Annotated(arg, annot) => foldTree(foldTree(x, arg), annot)
case LambdaTypeTree(typedefs, arg) => foldTree(foldTrees(x, typedefs), arg)
case TypeBind(_, tbt) => foldTree(x, tbt)
case TypeBlock(typedefs, tpt) => foldTree(foldTrees(x, typedefs), tpt)
case MatchTypeTree(boundopt, selector, cases) =>
foldTrees(foldTree(boundopt.fold(x)(foldTree(x, _)), selector), cases)
case WildcardTypeTree() => x
case TypeBoundsTree(lo, hi) => foldTree(foldTree(x, lo), hi)
case CaseDef(pat, guard, body) => foldTree(foldTrees(foldTree(x, pat), guard), body)
case TypeCaseDef(pat, body) => foldTree(foldTree(x, pat), body)
case Bind(_, body) => foldTree(x, body)
case Unapply(fun, implicits, patterns) => foldTrees(foldTrees(foldTree(x, fun), implicits), patterns)
case Alternatives(patterns) => foldTrees(x, patterns)
}
}
}
| som-snytt/dotty | library/src/scala/tasty/reflect/TreeAccumulator.scala | Scala | apache-2.0 | 4,439 |
package play2.tools.xml
import scala.xml.{Attribute, NamespaceBinding}
import scala.collection._
trait XMLReader[T] {
// No error management for the time being... maybe later
def read(x: xml.NodeSeq): Option[T]
}
trait XMLWriter[-T] {
def write(t: T, base: xml.NodeSeq): xml.NodeSeq
}
trait XMLFormatter[T] extends XMLReader[T] with XMLWriter[T]
object EXML extends EXML
trait EXML {
def toXML[T](t: T, base: xml.NodeSeq = xml.NodeSeq.Empty)(implicit w: XMLWriter[T]): xml.NodeSeq = w.write(t, base)
def fromXML[T](x: xml.NodeSeq)(implicit r: XMLReader[T]): Option[T] = r.read(x)
}
object BasicReaders extends BasicReaders
trait BasicReaders {
import scala.util.control.Exception._
implicit object StringReader extends XMLReader[String] {
def read(x: xml.NodeSeq): Option[String] = if(x.isEmpty) None else Some(x.text)
}
implicit object IntReader extends XMLReader[Int] {
def read(x: xml.NodeSeq): Option[Int] = if(x.isEmpty) None else catching(classOf[NumberFormatException]) opt x.text.toInt
}
implicit object LongReader extends XMLReader[Long] {
def read(x: xml.NodeSeq): Option[Long] = if(x.isEmpty) None else catching(classOf[NumberFormatException]) opt x.text.toLong
}
implicit object ShortReader extends XMLReader[Short] {
def read(x: xml.NodeSeq): Option[Short] = if(x.isEmpty) None else catching(classOf[NumberFormatException]) opt x.text.toShort
}
implicit object FloatReader extends XMLReader[Float] {
def read(x: xml.NodeSeq): Option[Float] = if(x.isEmpty) None else catching(classOf[NumberFormatException]) opt x.text.toFloat
}
implicit object DoubleReader extends XMLReader[Double] {
def read(x: xml.NodeSeq): Option[Double] = if(x.isEmpty) None else catching(classOf[NumberFormatException]) opt x.text.toDouble
}
implicit object BooleanReader extends XMLReader[Boolean] {
def read(x: xml.NodeSeq): Option[Boolean] = if(x.isEmpty) None else catching(classOf[NumberFormatException]) opt x.text.toBoolean
}
}
object SpecialReaders extends SpecialReaders
trait SpecialReaders {
implicit def OptionReader[T](implicit r: XMLReader[T]) = new XMLReader[Option[T]] {
def read(x: xml.NodeSeq): Option[Option[T]] = {
x.collectFirst{ case e: xml.Elem => e }.map{ e =>
if(e.attributes.exists{ a => a.key == "nil" && a.value.text == "true" }) None
else r.read(e)
}.orElse(Some(None))
}
}
implicit def traversableReader[F[_], A](implicit bf: generic.CanBuildFrom[F[_], A, F[A]], r: XMLReader[A]) = new XMLReader[F[A]] {
def read(x: xml.NodeSeq): Option[F[A]] = {
val builder = bf()
x.foreach{ n => r.read(n).foreach{ builder += _ } }
Some(builder.result)
}
}
implicit def mapReader[K, V](implicit rk: XMLReader[K], rv: XMLReader[V]): XMLReader[collection.immutable.Map[K,V]] = new XMLReader[collection.immutable.Map[K,V]] {
def read(x: xml.NodeSeq): Option[collection.immutable.Map[K, V]] = {
Some(x.collect{ case e: xml.Elem =>
for(k <- EXML.fromXML[K](e \\ "key");
v <- EXML.fromXML[V](e \\ "value")
) yield( k -> v )
}.filter(_.isDefined).map(_.get).toMap[K,V])
}
}
}
object BasicWriters extends BasicWriters
trait BasicWriters {
implicit object StringWriter extends XMLWriter[String] {
def write(s: String, base: xml.NodeSeq): xml.NodeSeq = base.collectFirst{ case e: xml.Elem => e.copy(child = xml.Text(s)) }.getOrElse(xml.Text(s))
}
implicit object IntWriter extends XMLWriter[Int] {
def write(s: Int, base: xml.NodeSeq): xml.NodeSeq = StringWriter.write(s.toString, base)
}
implicit object LongWriter extends XMLWriter[Long] {
def write(s: Long, base: xml.NodeSeq): xml.NodeSeq = StringWriter.write(s.toString, base)
}
implicit object FloatWriter extends XMLWriter[Float] {
def write(s: Float, base: xml.NodeSeq): xml.NodeSeq = StringWriter.write(s.toString, base)
}
implicit object ShortWriter extends XMLWriter[Short] {
def write(s: Short, base: xml.NodeSeq): xml.NodeSeq = StringWriter.write(s.toString, base)
}
implicit object DoubleWriter extends XMLWriter[Double] {
def write(s: Double, base: xml.NodeSeq): xml.NodeSeq = StringWriter.write(s.toString, base)
}
implicit object BooleanWriter extends XMLWriter[Boolean] {
def write(s: Boolean, base: xml.NodeSeq): xml.NodeSeq = StringWriter.write(s.toString, base)
}
}
object SpecialWriters extends SpecialWriters
trait SpecialWriters {
val xsiNS = xml.NamespaceBinding("xsi", "http://www.w3.org/2001/XMLSchema-instance", xml.TopScope)
implicit def optionWriter[T](implicit w: XMLWriter[T]) = new XMLWriter[Option[T]] {
def write(t: Option[T], base: xml.NodeSeq) = {
t match {
case None => base.collectFirst{ case e: xml.Elem => e.copy(scope = xsiNS) % Attribute("xsi", "nil", "true", xml.Null) }.getOrElse(xml.NodeSeq.Empty)
case Some(t) => w.write(t, base)
}
}
}
implicit def traversableWriter[T](implicit w: XMLWriter[T]) = new XMLWriter[Traversable[T]] {
def write(t: Traversable[T], base: xml.NodeSeq) = {
t.foldLeft(xml.NodeSeq.Empty)( (acc, n) => acc ++ w.write(n, base) )
}
}
implicit def mapWriter[K, V](implicit kw: XMLWriter[K], vw: XMLWriter[V]) = new XMLWriter[Map[K, V]] {
def write(m: Map[K, V], base: xml.NodeSeq) = {
m.foldLeft(xml.NodeSeq.Empty){ (acc, n) =>
base.collectFirst{ case e:xml.Elem =>
e.copy( child = kw.write(n._1, <key/>) ++ vw.write(n._2, <value/>) )
}.map( acc ++ _ ).getOrElse(acc)
}
}
}
}
| mandubian/scala-xmlsoap-ersatz | src/main/scala/play2/tools/xml/EXML.scala | Scala | apache-2.0 | 5,491 |
package com.tuvistavie.xserver.bridge
import akka.actor.{ Actor, ActorRef, ActorSystem, ActorLogging, Props }
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.slf4j.Logging
import com.tuvistavie.xserver.backend.util.{ Config, RuntimeConfig }
import com.tuvistavie.xserver.backend.model.Window
import com.tuvistavie.xserver.protocol.request.CreateRootRequest
import messages._
import messages.Register
object BridgeClient {
val current = if(RuntimeConfig.standAlone) DummyBridgeClient
else TCPBridgeClient
}
trait BridgeClientLike {
def register(): Unit
def !(msg: Any): Unit
}
class TCPBridgeClient extends Actor with ActorLogging {
def receive = {
case foo =>
}
}
object TCPBridgeClient extends Logging with BridgeClientLike {
val system = ActorSystem("XBridgeClient", ConfigFactory.load.getConfig("bridge"))
val ref = system.actorOf(Props[TCPBridgeClient], "bridge-" + RuntimeConfig.displayNumber)
private val serverPath = Config.getString("bridge.server.path").format(RuntimeConfig.displayNumber)
lazy val server = system.actorFor(serverPath)
override def register() {
logger.debug(s"registering to actor with path ${serverPath}")
server ! Register(ref)
server ! RequestMessage(-1, CreateRootRequest(
Window.root.id,
Window.root.width,
Window.root.height
))
}
override def !(msg: Any) {
server ! msg
}
}
object DummyBridgeClient extends BridgeClientLike {
override def register() { }
override def !(msg: Any) { }
}
| tuvistavie/scala-x-server | backend/src/main/scala/com/tuvistavie/xserver/xbridge/BridgeClient.scala | Scala | mit | 1,544 |
/*
* Copyright 2014 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.topsort
/**
* Result parent type for topological sorting.
* Two are the possible outcomes: Success is denoted by [[com.ckkloverdos.topsort.TopSortOk]]
* and failure is denoted by [[com.ckkloverdos.topsort.TopSortCycle]].
*
* @tparam N is the graph node type
*
* @see TopSortOk, TopSortCycle
*/
sealed trait TopSortResult[N]
/**
* A successful topological sorting result.
*
* @param sorted
* @tparam N is the graph node type
*/
final case class TopSortOk[N](sorted: Traversable[N]) extends TopSortResult[N]
/**
* A failed topological sorting result due to a cyclic dependency.
* The `path` provided is the detected cycle.
*
* @param path The detected cycle.
* @tparam N is the graph node type
*
*/
final case class TopSortCycle[N](path: Traversable[N]) extends TopSortResult[N]
| loverdos/topsort | src/main/scala/com/ckkloverdos/topsort/TopSortResult.scala | Scala | apache-2.0 | 1,447 |
package lila.analyse
import akka.actor._
import akka.pattern.pipe
import com.typesafe.config.Config
import lila.notify.NotifyApi
import scala.util.{ Success, Failure }
import spray.caching.{ LruCache, Cache }
import lila.common.PimpedConfig._
final class Env(
config: Config,
db: lila.db.Env,
system: ActorSystem,
roundSocket: ActorSelection,
indexer: ActorSelection) {
private val CollectionAnalysis = config getString "collection.analysis"
private val CollectionRequester = config getString "collection.requester"
private val NetDomain = config getString "net.domain"
private val CachedNbTtl = config duration "cached.nb.ttl"
private val PaginatorMaxPerPage = config getInt "paginator.max_per_page"
private val ActorName = config getString "actor.name"
lazy val analysisColl = db(CollectionAnalysis)
lazy val requesterApi = new RequesterApi(db(CollectionRequester))
lazy val analyser = new Analyser(
indexer = indexer,
requesterApi = requesterApi,
roundSocket = roundSocket,
bus = system.lilaBus)
lazy val annotator = new Annotator(NetDomain)
}
object Env {
lazy val current = "analyse" boot new Env(
config = lila.common.PlayApp loadConfig "analyse",
db = lila.db.Env.current,
system = lila.common.PlayApp.system,
roundSocket = lila.hub.Env.current.socket.round,
indexer = lila.hub.Env.current.actor.gameSearch)
}
| clarkerubber/lila | modules/analyse/src/main/Env.scala | Scala | agpl-3.0 | 1,406 |
package org.jetbrains.plugins.scala
package worksheet.ui
import java.awt.event.{ActionEvent, ActionListener, AdjustmentEvent, AdjustmentListener}
import java.awt.{BorderLayout, Color, Dimension}
import java.util
import javax.swing.{JComponent, JLayeredPane, Timer}
import com.intellij.ide.DataManager
import com.intellij.lang.Language
import com.intellij.openapi.actionSystem.{CommonDataKeys, DataProvider}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.command.CommandProcessor
import com.intellij.openapi.diff.impl.EditingSides
import com.intellij.openapi.diff.impl.util.SyncScrollSupport
import com.intellij.openapi.editor._
import com.intellij.openapi.editor.event.{CaretAdapter, CaretEvent}
import com.intellij.openapi.editor.ex.EditorEx
import com.intellij.openapi.editor.highlighter.EditorHighlighterFactory
import com.intellij.openapi.editor.impl.{EditorImpl, FoldingModelImpl}
import com.intellij.openapi.editor.markup.TextAttributes
import com.intellij.openapi.fileTypes.LanguageFileType
import com.intellij.openapi.project.Project
import com.intellij.openapi.ui.Splitter
import com.intellij.openapi.util.Key
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.openapi.vfs.newvfs.FileAttribute
import com.intellij.psi._
import com.intellij.ui.JBSplitter
import org.jetbrains.plugins.scala
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
import org.jetbrains.plugins.scala.worksheet.processor.{FileAttributeUtilCache, WorksheetSourceProcessor}
import org.jetbrains.plugins.scala.worksheet.runconfiguration.WorksheetViewerInfo
import _root_.scala.collection.mutable.ArrayBuffer
import _root_.scala.util.Random
/**
* User: Dmitry Naydanov
* Date: 1/20/14
*/
class WorksheetEditorPrinter(originalEditor: Editor, worksheetViewer: Editor, file: ScalaFile) {
private val project = originalEditor.getProject
private val originalDocument = originalEditor.getDocument
private val viewerDocument = worksheetViewer.getDocument
private val timer = new Timer(WorksheetEditorPrinter.IDLE_TIME_MLS, TimerListener)
private val outputBuffer = new StringBuilder
private val foldingOffsets = ArrayBuffer.apply[(Int, Int, Int, Int)]()
private var linesCount = 0
private var totalCount = 0
private var insertedToOriginal = 0
private var prefix = ""
@volatile private var terminated = false
private var inited = false
private var cutoffPrinted = false
private val viewerFolding = worksheetViewer.getFoldingModel.asInstanceOf[FoldingModelImpl]
private lazy val group =
new WorksheetFoldGroup(getViewerEditor, originalEditor, project, worksheetViewer.getUserData(WorksheetEditorPrinter.DIFF_SPLITTER_KEY))
@volatile private var buffed = 0
originalEditor.asInstanceOf[EditorImpl].setScrollToCaret(false)
worksheetViewer.asInstanceOf[EditorImpl].setScrollToCaret(false)
def getViewerEditor = worksheetViewer
def getOriginalEditor = originalEditor
def scheduleWorksheetUpdate() {
timer.start()
}
def processLine(line: String): Boolean = {
if (line.stripSuffix("\n") == WorksheetSourceProcessor.END_OUTPUT_MARKER) {
flushBuffer()
terminated = true
return true
}
if (!isInsideOutput && line.trim.length == 0) {
outputBuffer append line
totalCount += 1
} else if (isResultEnd(line)) {
WorksheetSourceProcessor extractLineInfoFrom line match {
case Some((start, end)) =>
if (!inited) {
val first = init()
val diffBetweenFirst = first map {
case i => Math.min(i, start)
} getOrElse start
if (diffBetweenFirst > 0) prefix = StringUtil.repeat("\n", diffBetweenFirst)
}
val differ = end - start + 1 - linesCount
if (differ > 0) {
outputBuffer append getNewLines(differ)
} else if (0 > differ) {
insertedToOriginal -= differ
foldingOffsets += (
(start + insertedToOriginal + differ,
outputBuffer.length - outputBuffer.reverseIterator.takeWhile(_ == '\n').length,
end - start + 1, end)
)
}
buffed += linesCount
if (buffed > WorksheetEditorPrinter.BULK_COUNT) midFlush()
clear()
case _ =>
}
} else if (!cutoffPrinted) {
linesCount += 1
totalCount += 1
if (linesCount > getOutputLimit) {
outputBuffer append WorksheetEditorPrinter.END_MESSAGE
cutoffPrinted = true
} else outputBuffer append line
}
false
}
private def init(): Option[Int] = {
inited = true
val oldSync = originalEditor getUserData WorksheetEditorPrinter.DIFF_SYNC_SUPPORT
if (oldSync != null) oldSync.dispose()
WorksheetEditorPrinter.synch(originalEditor, worksheetViewer,
Option(worksheetViewer.getUserData(WorksheetEditorPrinter.DIFF_SPLITTER_KEY)), Some(group))
extensions.invokeLater {
viewerFolding runBatchFoldingOperation new Runnable {
override def run() {
viewerFolding.clearFoldRegions()
}
}
getViewerEditor.getCaretModel.moveToVisualPosition(new VisualPosition(0, 0))
}
if (file != null) {
@inline def checkFlag(psi: PsiElement) =
psi != null && psi.getCopyableUserData(WorksheetSourceProcessor.WORKSHEET_PRE_CLASS_KEY) != null
var s = file.getFirstChild
var f = checkFlag(s)
while (s.isInstanceOf[PsiWhiteSpace] || f) {
s = s.getNextSibling
f = checkFlag(s)
}
if (s != null) Some(s.getTextRange.getStartOffset) else None
} else None
}
private def isResultEnd(line: String) = line startsWith WorksheetSourceProcessor.END_TOKEN_MARKER
private def getNewLines(count: Int) = StringUtil.repeatSymbol('\n', count)
private def clear() {
linesCount = 0
cutoffPrinted = false
}
def flushBuffer() {
if (!inited) init()
if (terminated) return
val str = getCurrentText
if (timer.isRunning) timer.stop()
updateWithPersistentScroll(viewerDocument, str)
outputBuffer.clear()
prefix = ""
extensions.invokeLater {
getViewerEditor.getMarkupModel.removeAllHighlighters()
}
scala.extensions.inReadAction {
PsiDocumentManager.getInstance(project).getPsiFile(originalEditor.getDocument) match {
case scalaFile: ScalaFile =>
WorksheetEditorPrinter.saveWorksheetEvaluation(scalaFile, str,
worksheetViewer.getUserData(WorksheetEditorPrinter.DIFF_SPLITTER_KEY).getProportion)
case _ =>
}
}
// flushFolding()
}
def midFlush() {
if (terminated || buffed == 0) return
val str = getCurrentText
buffed = 0
updateWithPersistentScroll(viewerDocument, str)
// flushFolding()
// incUpdate(str)
}
def getCurrentText = prefix + outputBuffer.toString()
def internalError(errorMessage: String) {
extensions.invokeLater {
extensions.inWriteAction {
simpleUpdate("Internal error: " + errorMessage, viewerDocument)
}
}
terminated = true
}
private def updateWithPersistentScroll(document: Document, text: String) {//todo - to do
val foldingOffsetsCopy = foldingOffsets.clone()
foldingOffsets.clear()
val ed = getViewerEditor
extensions.invokeLater {
extensions.inWriteAction {
val scroll = originalEditor.getScrollingModel.getVerticalScrollOffset
val worksheetScroll = worksheetViewer.getScrollingModel.getVerticalScrollOffset
simpleUpdate(text, document)
originalEditor.getScrollingModel.scrollVertically(scroll)
worksheetViewer.getScrollingModel.scrollHorizontally(worksheetScroll)
CommandProcessor.getInstance().executeCommand(project, new Runnable {
override def run() {
viewerFolding runBatchFoldingOperation(new Runnable {
override def run() {
foldingOffsetsCopy map {
case (start, end, limit, originalEnd) =>
val offset = originalDocument getLineEndOffset Math.min(originalEnd, originalDocument.getLineCount)
val linesCount = viewerDocument.getLineNumber(end) - start - limit + 1
new WorksheetFoldRegionDelegate(
ed, viewerDocument.getLineStartOffset(start + limit - 1), end,
offset, linesCount, group, limit
)
} foreach {
case region =>
viewerFolding addFoldRegion region
}
WorksheetFoldGroup.save(file, group)
}
}, false)
}
}, null, null)
}
}
}
private def commitDocument(doc: Document) {
if (project.isDisposed) return //EA-70786
PsiDocumentManager getInstance project commitDocument doc
}
private def isInsideOutput = linesCount != 0
private def getOutputLimit = ScalaProjectSettings.getInstance(project).getOutputLimit
private def simpleUpdate(text: String, document: Document) {
document setText text
commitDocument(document)
}
private def incUpdate(text: String, document: Document) {//todo
val linesOld = viewerDocument.getLineCount
val total = totalCount
if (total >= linesOld || text.length >= viewerDocument.getTextLength) {
document setText text
commitDocument(document)
} else {
CommandProcessor.getInstance().executeCommand(project, new Runnable {
override def run() {
if (linesOld != viewerDocument.getLineCount) return
viewerDocument.deleteString(0, text.length)
viewerDocument.insertString(0, text)
for (i <- total until viewerDocument.getLineCount) getViewerEditor.getMarkupModel.addLineHighlighter(i, 0,
new TextAttributes(Color.gray, null, null, null, 0))
commitDocument(viewerDocument)
}
}, null, null)
}
}
object TimerListener extends ActionListener {
override def actionPerformed(e: ActionEvent): Unit = midFlush()
}
}
object WorksheetEditorPrinter {
val END_MESSAGE = "Output exceeds cutoff limit.\n"
val BULK_COUNT = 15
val IDLE_TIME_MLS = 1000
val DIFF_SPLITTER_KEY = Key.create[WorksheetDiffSplitters.SimpleWorksheetSplitter]("SimpleWorksheetViewerSplitter")
val DIFF_SYNC_SUPPORT = Key.create[SyncScrollSupport]("WorksheetSyncScrollSupport")
private val LAST_WORKSHEET_RUN_RESULT = new FileAttribute("LastWorksheetRunResult", 2, false)
private val LAST_WORKSHEET_RUN_RATIO = new FileAttribute("ScalaWorksheetLastRatio", 1, false)
private val patched = new util.WeakHashMap[Editor, String]()
def getPatched = patched
private def synch(originalEditor: Editor, worksheetViewer: Editor,
diffSplitter: Option[WorksheetDiffSplitters.SimpleWorksheetSplitter] = None,
foldGroup: Option[WorksheetFoldGroup] = None) {
class MyCaretAdapterBase extends CaretAdapter {
override def equals(obj: Any): Boolean = obj match {
case _: MyCaretAdapterBase => true
case _ => false
}
override def hashCode(): Int = 12345
}
def createListener(recipient: Editor, don: Editor) = foldGroup map {
case group => new CaretAdapter {
override def caretPositionChanged(e: CaretEvent) {
if (!e.getEditor.asInstanceOf[EditorImpl].getContentComponent.hasFocus) return
recipient.getCaretModel.moveToVisualPosition(
new VisualPosition(Math.min(group left2rightOffset don.getCaretModel.getVisualPosition.getLine, recipient.getDocument.getLineCount), 0))
}
}
} getOrElse new CaretAdapter {
override def caretPositionChanged(e: CaretEvent) {
if (!e.getEditor.asInstanceOf[EditorImpl].getContentComponent.hasFocus) return
recipient.getCaretModel.moveToVisualPosition(don.getCaretModel.getVisualPosition)
}
}
def checkAndAdd(don: Editor, recipient: Editor) {
patched get don match {
case "50" | null =>
patched remove don
don.getCaretModel.removeCaretListener(new MyCaretAdapterBase)
don.getCaretModel.addCaretListener(createListener(recipient, don))
patched.put(don, if (foldGroup.isDefined) "100" else "50")
case _ =>
}
}
(originalEditor, worksheetViewer) match {
case (originalImpl: EditorImpl, viewerImpl: EditorImpl) =>
ApplicationManager.getApplication invokeLater new Runnable {
override def run() {
checkAndAdd(originalImpl, viewerImpl)
// checkAndAdd(viewerImpl, originalImpl)
viewerImpl.getCaretModel.moveToVisualPosition(
new VisualPosition(Math.min(originalImpl.getCaretModel.getVisualPosition.line, viewerImpl.getDocument.getLineCount), 0)
)
val syncSupport = new SyncScrollSupport
syncSupport.install(Array[EditingSides](new WorksheetDiffSplitters.WorksheetEditingSides(originalEditor, worksheetViewer)))
originalEditor.putUserData(DIFF_SYNC_SUPPORT, syncSupport)
diffSplitter foreach {
case splitter =>
viewerImpl.getScrollPane.getVerticalScrollBar.addAdjustmentListener(new AdjustmentListener {
override def adjustmentValueChanged(e: AdjustmentEvent): Unit = splitter.redrawDiffs()
})
}
}
}
case _ =>
}
}
def saveWorksheetEvaluation(file: ScalaFile, result: String, ratio: Float = 0.5f) {
FileAttributeUtilCache.writeAttribute(LAST_WORKSHEET_RUN_RESULT, file, result)
FileAttributeUtilCache.writeAttribute(LAST_WORKSHEET_RUN_RATIO, file, ratio.toString)
}
def saveOnlyRatio(file: ScalaFile, ratio: Float = 0.5f) {
FileAttributeUtilCache.writeAttribute(LAST_WORKSHEET_RUN_RATIO, file, ratio.toString)
}
def loadWorksheetEvaluation(file: ScalaFile): Option[(String, Float)] = {
val ratio = FileAttributeUtilCache.readAttribute(LAST_WORKSHEET_RUN_RATIO, file) map {
case rr =>
try {
java.lang.Float.parseFloat(rr)
} catch {
case _: NumberFormatException => 0.5f
}
} getOrElse 0.5f
FileAttributeUtilCache.readAttribute(LAST_WORKSHEET_RUN_RESULT, file).map(s => (s, ratio))
}
def deleteWorksheetEvaluation(file: ScalaFile) {
FileAttributeUtilCache.writeAttribute(LAST_WORKSHEET_RUN_RESULT, file, "")
FileAttributeUtilCache.writeAttribute(LAST_WORKSHEET_RUN_RESULT, file, 0.5f.toString)
}
def newWorksheetUiFor(editor: Editor, virtualFile: VirtualFile) = newUiFor(editor, virtualFile, true)
def newMacrosheetUiFor(editor: Editor, virtualFile: VirtualFile) = newUiFor(editor, virtualFile, false)
def newUiFor(editor: Editor, virtualFile: VirtualFile, isPlain: Boolean) =
new WorksheetEditorPrinter(editor, createRightSideViewer(editor, virtualFile, getOrCreateViewerEditorFor(editor, isPlain)),
PsiManager getInstance editor.getProject findFile virtualFile match {
case scalaFile: ScalaFile => scalaFile
case _ => null
}
)
def createWorksheetEditor(editor: Editor) = getOrCreateViewerEditorFor(editor, true)
def createMacroEditor(editor: Editor) = getOrCreateViewerEditorFor(editor, false)
def createRightSideViewer(editor: Editor, virtualFile: VirtualFile, rightSideEditor: Editor, modelSync: Boolean = false): Editor = {
val editorComponent = editor.getComponent
val editorContentComponent = editor.getContentComponent
val worksheetViewer = rightSideEditor.asInstanceOf[EditorImpl]
val prop = if (editorComponent.getComponentCount > 0) editorComponent.getComponent(0) match {
case splitter: JBSplitter => splitter.getProportion
case _ if worksheetViewer.getUserData(DIFF_SPLITTER_KEY) != null =>
worksheetViewer.getUserData(DIFF_SPLITTER_KEY).getProportion
case _ => 0.5f
} else 0.5f
val dimension = editorComponent.getSize()
val prefDim = new Dimension(dimension.width / 2, dimension.height)
editor.getSettings setFoldingOutlineShown false
worksheetViewer.getComponent setPreferredSize prefDim
if (modelSync) synch(editor, worksheetViewer)
editorContentComponent.setPreferredSize(prefDim)
if (!ApplicationManager.getApplication.isUnitTestMode) {
val child = editorComponent.getParent
val parent = child.getParent
val diffPane = WorksheetDiffSplitters.createSimpleSplitter(editor, worksheetViewer, List.empty, List.empty, prop)
worksheetViewer.putUserData(DIFF_SPLITTER_KEY, diffPane)
@inline def preserveFocus(body: => Unit) {
val hadFocus = editorContentComponent.hasFocus
body
if (hadFocus) editorContentComponent.requestFocusInWindow()
}
@inline def patchEditor(): Unit = preserveFocus {
(parent, child) match {
case (parentPane: JLayeredPane, _) =>
parentPane remove child
parentPane.add(diffPane, BorderLayout.CENTER)
case (_, childPane: JLayeredPane) =>
childPane remove editorComponent
childPane.add(diffPane, BorderLayout.CENTER)
case _ =>
}
}
if (parent.getComponentCount > 1) parent.getComponent(1) match {
case splitter: Splitter =>
preserveFocus {
parent.remove(1)
parent.add(diffPane, 1)
}
case _ => patchEditor()
} else patchEditor()
}
WorksheetViewerInfo.addViewer(worksheetViewer, editor)
worksheetViewer
}
private def getOrCreateViewerEditorFor(editor: Editor, isPlain: Boolean) = {
WorksheetViewerInfo getViewer editor match {
case editorImpl: EditorImpl => editorImpl
case _ => if (isPlain) createBlankEditor(editor.getProject) else
createBlankEditorWithLang(editor.getProject, ScalaFileType.SCALA_LANGUAGE, ScalaFileType.SCALA_FILE_TYPE)
}
}
private def createBlankEditor(project: Project): Editor = {
val factory: EditorFactory = EditorFactory.getInstance
val editor: Editor = factory.createViewer(factory createDocument "", project)
editor setBorder null
editor.getContentComponent.getParent match {
case jComp: JComponent =>
jComp.putClientProperty(
DataManager.CLIENT_PROPERTY_DATA_PROVIDER, new DataProvider {
override def getData(dataId: String) = if (CommonDataKeys.HOST_EDITOR.is(dataId)) editor else null
})
case _ =>
}
editor
}
private def createBlankEditorWithLang(project: Project, lang: Language, fileType: LanguageFileType): Editor = {
val file = PsiFileFactory.getInstance(project).createFileFromText("dummy_" + Random.nextString(10), lang, "")
val editor = EditorFactory.getInstance.createViewer(PsiDocumentManager.getInstance(project).getDocument(file), project)
val editorHighlighter = EditorHighlighterFactory.getInstance.createEditorHighlighter(project, fileType)
editor.asInstanceOf[EditorEx].setHighlighter(editorHighlighter)
editor setBorder null
editor
}
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/worksheet/ui/WorksheetEditorPrinter.scala | Scala | apache-2.0 | 19,376 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.dsl.transformations
import java.io.{ FileInputStream, InputStream }
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege
import org.apache.hadoop.hive.ql.udf.UDFLength
import org.apache.hive.hcatalog.data.schema.HCatSchema
import org.apache.hive.hcatalog.pig.HCatLoader
import parquet.pig.ParquetStorer
/**
* Pig Transformation - Compute a view from a shell script.
*
* @param latin Pig script to execute
* @param dirsToDelete List of directories to empty before Pig execution. Does not include the view's fullPath!
*
*/
case class PigTransformation(latin: String, dirsToDelete: List[String] = List()) extends Transformation {
override def name = "pig"
override def stringsToChecksum = List(latin)
description = "[..]" + StringUtils.abbreviate(latin.replaceAll("\\n", "").replaceAll("\\t", "").replaceAll("\\\\s+", " "), 60)
def defaultLibraries = {
// FIXME: declare jars instead of any random class included in this jar
val classes = List(
// needed for usage of HCatalog table management
classOf[HCatLoader], classOf[HCatSchema], classOf[HiveObjectPrivilege], classOf[UDFLength],
// needed for usage of storage format Parquet with pig
classOf[ParquetStorer])
classes.map(cl => try {
cl.getProtectionDomain().getCodeSource().getLocation().getFile
} catch {
case t: Throwable => null
})
.filter(cl => cl != null && !"".equals(cl.trim))
}
}
object PigTransformation {
def scriptFrom(inputStream: InputStream): String = scala.io.Source.fromInputStream(inputStream, "UTF-8").mkString
def scriptFromResource(resourcePath: String): String = scriptFrom(getClass().getClassLoader().getResourceAsStream(resourcePath))
def scriptFrom(filePath: String): String = scriptFrom(new FileInputStream(filePath))
}
| hpzorn/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/dsl/transformations/PigTransformation.scala | Scala | apache-2.0 | 2,467 |
package com.outr.arango.api.model
import io.circe.Json
case class PostAPINewTasks(name: String,
command: Option[String] = None,
offset: Option[Long] = None,
params: Option[String] = None,
period: Option[Long] = None) | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/model/PostAPINewTasks.scala | Scala | mit | 327 |
package es.us.cluster
import es.us.linkage.{Distance, Linkage}
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.clustering.{BisectingKMeans, KMeans, KMeansEmpleo}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.rdd.RDD
/**
* This object contains three methods that calculates the optimal number for
* clustering using Kmeans, Bisecting KMeans or Linkage in SPARK MLLIB
*
* @author José María Luna and José David Martín
* @version 1.0
* @since v1.0 Dev
*/
object ClusterIndex extends Logging {
//Return 0 if the data is empty, else return data parsed to Double
def dataToDouble(s: String): Double = {
return if (s.isEmpty) 0 else s.toDouble
}
/**
* Return Silhouette, Dunn, Davies-Bouldin and WSSSE validity clustering indices and its time after using KMeans from Mllib
* @param parsedData RDD with parsed data ready to cluster. Its values are set in Vector from mllib
* @param numClusters Set the number of clusters to apply
* @param numIterations Set the number of iterations that Kmeans does
* @return A tuple composed by the Silhouette, Dunn, Davies-Bouldin and WSSSE and its corresponding time
* @example getIndicesKMeans(parsedData, 2, 500)
*/
def getIndicesKMeans(parsedData: RDD[org.apache.spark.mllib.linalg.Vector], numClusters: Int, numIterations: Int): (Double, Double, Double, Double, Long, Long, Long, Long) = {
var i = 1
var s = ""
val sc = parsedData.sparkContext
val clusters = KMeansEmpleo.train(parsedData, numClusters, numIterations, 1, "k-means||", Utils.giveMeTime())
//Global Center
val centroides = sc.parallelize(clusters.clusterCenters)
val centroidesCartesian = centroides.cartesian(centroides).filter(x => x._1 != x._2).cache()
var startTimeK = System.currentTimeMillis
val intraMean = clusters.computeCost(parsedData) / parsedData.count()
val interMeanAux = centroidesCartesian.map(x => Vectors.sqdist(x._1, x._2)).reduce(_ + _)
val interMean = interMeanAux / centroidesCartesian.count()
/*val clusterCentroides = KMeans.train(centroides, 1, numIterations)
val interMean = clusterCentroides.computeCost(centroides) / centroides.count()
*/
//Get Silhoutte index: (intercluster - intracluster)/Max(intercluster,intracluster)
val silhoutte = (interMean - intraMean) / (if (interMean > intraMean) interMean else intraMean)
s += i + ";" + silhoutte + ";"
var stopTimeK = System.currentTimeMillis
val elapsedTimeSil = (stopTimeK - startTimeK)
//DUNN
startTimeK = System.currentTimeMillis
//Min distance between centroids
val minA = centroidesCartesian.map(x => Vectors.sqdist(x._1, x._2)).min()
/*
//Min distance from centroids to global centroid
val minA = centroides.map { x =>
Vectors.sqdist(x, clusterCentroides.clusterCenters.head)
}.min()
*/
//Max distance from points to its centroid
val maxB = parsedData.map { x =>
Vectors.sqdist(x, clusters.clusterCenters(clusters.predict(x)))
}.max
//Get Dunn index: Mín(Dist centroides al centroide)/Max(dist punto al centroide)
val dunn = minA / maxB
stopTimeK = System.currentTimeMillis
val elapsedTime = (stopTimeK - startTimeK)
//DAVIES-BOULDIN
startTimeK = System.currentTimeMillis
val avgCentroid = parsedData.map { x =>
//Vectors.sqdist(x, clusters.clusterCenters(clusters.predict(x)))
(clusters.predict(x), x)
}.map(x => (x._1, (Vectors.sqdist(x._2, clusters.clusterCenters(x._1)))))
.mapValues(x => (x, 1))
.reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))
.mapValues(y => 1.0 * y._1 / y._2)
.collectAsMap()
val bcAvgCentroid = sc.broadcast(avgCentroid)
val centroidesWithId = centroides.zipWithIndex()
.map(_.swap).cache()
val cartesianCentroides = centroidesWithId.cartesian(centroidesWithId).filter(x => x._1._1 != x._2._1)
val davis = cartesianCentroides.map { case (x, y) => (x._1.toInt, (bcAvgCentroid.value(x._1.toInt) + bcAvgCentroid.value(y._1.toInt)) / Vectors.sqdist(x._2, y._2)) }
.groupByKey()
.map(_._2.max)
.reduce(_ + _)
val bouldin = davis / numClusters
stopTimeK = System.currentTimeMillis
val elapsedTimeDavies = (stopTimeK - startTimeK)
//WSSSE
startTimeK = System.currentTimeMillis
val wssse = clusters.computeCost(parsedData)
stopTimeK = System.currentTimeMillis
val elapsedTimeW = (stopTimeK - startTimeK)
(silhoutte, dunn, bouldin, wssse, elapsedTimeSil, elapsedTime, elapsedTimeDavies, elapsedTimeW)
}
/**
* Return Silhouette, Dunn, Davies-Bouldin and WSSSE validity clustering indices and its time after using Bisecting KMeans from Mllib
* @param parsedData RDD with parsed data ready to cluster. Its values are set in Vector from mllib
* @param numClusters Set the number of clusters to apply
* @param numIterations Set the number of iterations that Kmeans does
* @return A tuple composed by the Silhouette, Dunn, Davies-Bouldin and WSSSE and its corresponding time
* @example getIndicesBKM(parsedData, 2, 500)
*/
def getIndicesBKM(parsedData: RDD[org.apache.spark.mllib.linalg.Vector], numClusters: Int, numIterations: Int): (Double, Double, Double, Double, Long, Long, Long, Long) = {
var i = 1
var s = ""
val sc = parsedData.sparkContext
//val clusters = KMeans.train(parsedData, numClusters, numIterations, 1, "k-means||", Utils.giveMeTime())
val clusters = new BisectingKMeans()
.setK(numClusters)
.setMaxIterations(numIterations)
.setSeed(Utils.giveMeTime())
.run(parsedData)
//Global Center
val centroides = sc.parallelize(clusters.clusterCenters)
val centroidesCartesian = centroides.cartesian(centroides).filter(x => x._1 != x._2).cache()
var startTimeK = System.currentTimeMillis
val intraMean = clusters.computeCost(parsedData) / parsedData.count()
val interMeanAux = centroidesCartesian.map(x => Vectors.sqdist(x._1, x._2)).reduce(_ + _)
val interMean = interMeanAux / centroidesCartesian.count()
/*val clusterCentroides = KMeans.train(centroides, 1, numIterations)
val interMean = clusterCentroides.computeCost(centroides) / centroides.count()
*/
//Get Silhoutte index: (intercluster - intracluster)/Max(intercluster,intracluster)
val silhoutte = (interMean - intraMean) / (if (interMean > intraMean) interMean else intraMean)
s += i + ";" + silhoutte + ";"
var stopTimeK = System.currentTimeMillis
val elapsedTimeSil = (stopTimeK - startTimeK)
//DUNN
startTimeK = System.currentTimeMillis
//Min distance between centroids
val minA = centroidesCartesian.map(x => Vectors.sqdist(x._1, x._2)).min()
/*
//Min distance from centroids to global centroid
val minA = centroides.map { x =>
Vectors.sqdist(x, clusterCentroides.clusterCenters.head)
}.min()
*/
//Max distance from points to its centroid
val maxB = parsedData.map { x =>
Vectors.sqdist(x, clusters.clusterCenters(clusters.predict(x)))
}.max
//Get Dunn index: Mín(Dist centroides al centroide)/Max(dist punto al centroide)
val dunn = minA / maxB
stopTimeK = System.currentTimeMillis
val elapsedTime = (stopTimeK - startTimeK)
//DAVIES-BOULDIN
startTimeK = System.currentTimeMillis
val avgCentroid = parsedData.map { x =>
//Vectors.sqdist(x, clusters.clusterCenters(clusters.predict(x)))
(clusters.predict(x), x)
}.map(x => (x._1, (Vectors.sqdist(x._2, clusters.clusterCenters(x._1)))))
.mapValues(x => (x, 1))
.reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))
.mapValues(y => 1.0 * y._1 / y._2)
.collectAsMap()
val bcAvgCentroid = sc.broadcast(avgCentroid)
val centroidesWithId = centroides.zipWithIndex()
.map(_.swap).cache()
val cartesianCentroides = centroidesWithId.cartesian(centroidesWithId).filter(x => x._1._1 != x._2._1)
val davis = cartesianCentroides.map { case (x, y) => (x._1.toInt, (bcAvgCentroid.value(x._1.toInt) + bcAvgCentroid.value(y._1.toInt)) / Vectors.sqdist(x._2, y._2)) }
.groupByKey()
.map(_._2.max)
.reduce(_ + _)
val bouldin = davis / numClusters
stopTimeK = System.currentTimeMillis
val elapsedTimeDavies = (stopTimeK - startTimeK)
//WSSSE
startTimeK = System.currentTimeMillis
val wssse = clusters.computeCost(parsedData)
stopTimeK = System.currentTimeMillis
val elapsedTimeW = (stopTimeK - startTimeK)
(silhoutte, dunn, bouldin, wssse, elapsedTimeSil, elapsedTime, elapsedTimeDavies, elapsedTimeW)
}
/**
* Return Silhouette, Dunn, Davies-Bouldin and WSSSE validity clustering indices and its time after using Linkage
* @param parsedData RDD with parsed data ready to cluster. Its values are set in Vector from mllib
* @param coordinates RDD with the values of each point and its id. The format is (Int, Vector)
* @param distances RDD with the distances between all points. Its values are Distances class instances
* @param numPoints The total number of points to the data
* @param clusterFilterNumber A Int number to filter the minimum number of points in each centroids
* @param strategyDistance Strategy to run Linkage algorithm in String (min, max or avg)
* @param minClusters Set the minimum number of clusters
* @param maxClusters Set the maximum number of clusters
* @return A RDD composed by each iteration and the Silhouette, Dunn, Davies-Bouldin and WSSSE and its corresponding time
* @example getIndicesLinkage(parsedData, coordinates, distances, 150, 1, "avg", 2, 10)
*/
def getIndicesLinkage(parsedData: RDD[org.apache.spark.mllib.linalg.Vector], coordinates: RDD[(Int,Vector)], distances: RDD[Distance],
numPoints: Int, clusterFilterNumber: Int, strategyDistance: String, minClusters: Int, maxClusters: Int):
RDD[(Int, (Double, Double, Double, Double, Long, Long, Long, Long))] = {
var i = 1
var s = ""
val sc = parsedData.sparkContext
//Create a Map to save the result model
var modelResult = scala.collection.mutable.Map[Int, (Double, Double, Double, Double, Long, Long, Long, Long)]()
//Set up the checkpoint directory
sc.setCheckpointDir("checkpoints")
//Initialize the number of clusters with the minimum number of clusters
var numberClusters = minClusters
println("Number of points: " + numPoints)
//Create a Linkage object with the number of clusters and the strategy distance choose (min,max,avg)
val linkage = new Linkage(numberClusters, strategyDistance)
println("New Linkage with strategy: " + strategyDistance)
//Run the Linkage algorithm and create the clusters variable
var clusters = linkage.runAlgorithm(distances, numPoints)
//Initialize an RDD from 1 to the number of points in our database
val totalPoints = sc.parallelize(1 to numPoints).cache()
for (k <- minClusters to maxClusters) {
val start = System.nanoTime
//For each iteration the number of clusters is equal to the K iteration
val numClusters = k
//In function of the number of clusters the centroids will chance
val resultPoints = clusters.createClusters(numPoints, numClusters, totalPoints)
val centroids = clusters.inicializeCenters(coordinates, numClusters, numPoints, clusterFilterNumber, resultPoints)
clusters.setClusterCenters(centroids)
//Global Center
val centroides = sc.parallelize(clusters.clusterCenters)
val centroidesCartesian = centroides.cartesian(centroides).filter(x => x._1 != x._2).cache()
var startTimeK = System.currentTimeMillis
val intraMean = clusters.computeCost(parsedData) / parsedData.count()
val interMeanAux = centroidesCartesian.map(x => Vectors.sqdist(x._1, x._2)).reduce(_ + _)
val interMean = interMeanAux / centroidesCartesian.count()
//Get Silhoutte index: (intercluster - intracluster)/Max(intercluster,intracluster)
val silhoutte = (interMean - intraMean) / (if (interMean > intraMean) interMean else intraMean)
s += i + ";" + silhoutte + ";"
var stopTimeK = System.currentTimeMillis
val elapsedTimeSil = (stopTimeK - startTimeK)
//DUNN
startTimeK = System.currentTimeMillis
//Min distance between centroids
val minA = centroidesCartesian.map(x => Vectors.sqdist(x._1, x._2)).min()
//Max distance from points to its centroid
val maxB = parsedData.map { x =>
Vectors.sqdist(x, clusters.clusterCenters(clusters.predict(x)))
}.max
//Get Dunn index: Mín(Dist centroides al centroide)/Max(dist punto al centroide)
val dunn = minA / maxB
stopTimeK = System.currentTimeMillis
val elapsedTime = (stopTimeK - startTimeK)
//DAVIES-BOULDIN
startTimeK = System.currentTimeMillis
val avgCentroid = parsedData.map { x =>
(clusters.predict(x), x)
}.map(x => (x._1, (Vectors.sqdist(x._2, clusters.clusterCenters(x._1)))))
.mapValues(x => (x, 1))
.reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))
.mapValues(y => 1.0 * y._1 / y._2)
.collectAsMap()
val bcAvgCentroid = sc.broadcast(avgCentroid)
val centroidesWithId = centroides.zipWithIndex()
.map(_.swap).cache()
val cartesianCentroides = centroidesWithId.cartesian(centroidesWithId).filter(x => x._1._1 != x._2._1)
val davis = cartesianCentroides.map { case (x, y) => (x._1.toInt, (bcAvgCentroid.value(x._1.toInt) + bcAvgCentroid.value(y._1.toInt)) / Vectors.sqdist(x._2, y._2)) }
.groupByKey
.map(_._2.max)
.reduce(_ + _)
val bouldin = davis / numClusters
stopTimeK = System.currentTimeMillis
val elapsedTimeDavies = (stopTimeK - startTimeK)
//WSSSE
startTimeK = System.currentTimeMillis
val wssse = clusters.computeCost(parsedData)
stopTimeK = System.currentTimeMillis
val elapsedTimeW = (stopTimeK - startTimeK)
println("*** K = " + k + " ***")
println("Executing Indices")
println("VALUES:")
println("\\tSilhouette: " + silhoutte)
println("\\tDunn: " + dunn)
println("\\tDavies-Bouldin: " + bouldin)
println("\\tWSSSE: " + wssse)
println("Elapsed Time:")
println("\\tTime Silhouette: " + elapsedTimeSil)
println("\\tTime Dunn: " + elapsedTime)
println("\\tTime Davies-Bouldin: " + elapsedTimeDavies)
println("\\tTime WSSSE: " + elapsedTimeW)
println("\\n")
//Show the duration to run the algorithm with eack K
val duration = (System.nanoTime - start) / 1e9d
logInfo(s"Time for iteration $k: " + duration)
//Save in the model the results of each iteration
modelResult += k -> (silhoutte, dunn, bouldin, wssse, elapsedTimeSil, elapsedTime, elapsedTimeDavies, elapsedTimeW)
}
sc.parallelize(modelResult.toSeq)
}
}
| josemarialuna/ClusterIndices | src/main/scala/es/us/cluster/ClusterIndex.scala | Scala | apache-2.0 | 15,052 |
package org.openjdk.jmh.samples
import org.openjdk.jmh.annotations._
import scala.util.Random.nextInt
object VectorBenchmark {
@State(Scope.Benchmark)
class BenchmarkState {
@Param(Array("10", "100", "1000", "10000"))
var size: Int = 0
var list: List[Int] = _
var vector: Vector[Int] = _
var array: Array[Int] = _
@Setup(Level.Trial)
def setup(): Unit = {
list = (1 to size).map(_ => nextInt).toList
vector = list.toVector
array = vector.toArray
}
}
}
class VectorBenchmark {
import VectorBenchmark.BenchmarkState
@Benchmark
def prependList(st: BenchmarkState): List[Int] =
999 :: st.list
@Benchmark
def prependVector(st: BenchmarkState): Vector[Int] =
999 +: st.vector
@Benchmark
def appendVector(st: BenchmarkState): Vector[Int] =
st.vector :+ 999
// @Benchmark
// def prependArray(st: BenchmarkState): Array[Int] = {
// val arr = new Array[Int](st.size + 1)
// System.arraycopy(st.array, 0, arr, 1, st.array.length)
// arr(0) = 999
// arr
// }
}
| non/debox | benchmark/src/main/scala/debox/benchmark/VectorBenchmark.scala | Scala | mit | 1,065 |
package models.querymodels
import java.time.OffsetDateTime
import ore.data.project.ProjectNamespace
import ore.db.DbRef
import ore.models.admin.Review
case class ReviewActivity(
endedAt: Option[OffsetDateTime],
id: DbRef[Review],
project: ProjectNamespace
)
case class FlagActivity(
resolvedAt: Option[OffsetDateTime],
project: ProjectNamespace
)
| SpongePowered/Ore | ore/app/models/querymodels/activity.scala | Scala | mit | 370 |
package im.actor.server.session
import im.actor.server.ActorSpecification
import scala.concurrent.duration._
import scala.util.Random
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import im.actor.api.rpc._
import im.actor.api.rpc.auth.{ RequestSendAuthCodeObsolete, ResponseSendAuthCodeObsolete }
import im.actor.api.rpc.codecs.RequestCodec
import im.actor.api.rpc.contacts.UpdateContactRegistered
import im.actor.server.mtproto.codecs.protocol.MessageBoxCodec
import im.actor.server.mtproto.protocol._
import im.actor.server.push.SeqUpdatesManager
class SessionResendSpec extends BaseSessionSpec(
ActorSpecification.createSystem(ConfigFactory.parseString(
"""
|session {
| resend {
| ack-timeout = 5 seconds
| }
|}
""".stripMargin
))
) {
behavior of "Session's ReSender"
it should "Resend messages if no ack received within ack-timeout" in Sessions().e1
it should "Resend messages to new client" in Sessions().e2
it should "not Resend messages if ack received within ack-timeout" in Sessions().e3
it should "Resend updates if no ack received within ack-timeout" in Sessions().e4
case class Sessions() {
def e1() = {
implicit val probe = TestProbe()
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
val encodedRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(75553333333L, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedRequest))
ignoreNewSession(authId, sessionId)
expectMessageAck(authId, sessionId, messageId)
expectRpcResult(sendAckAt = None) should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, _)) ⇒
}
// We didn't send Ack
Thread.sleep(5000)
expectRpcResult(sendAckAt = None) should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, _)) ⇒
}
// Still no ack
Thread.sleep(5000)
expectRpcResult(sendAckAt = None) should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, _)) ⇒
}
// Still no ack
Thread.sleep(5000)
expectRpcResult() should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, _)) ⇒
}
probe.expectNoMsg(5.seconds)
}
def e2() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
{
implicit val probe = TestProbe()
val encodedRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(75553333333L, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedRequest))
expectNewSession(authId, sessionId, messageId)
expectMessageAck(authId, sessionId, messageId)
expectRpcResult(sendAckAt = None) should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, _)) ⇒
}
}
// We didn't send Ack
Thread.sleep(5000)
{
implicit val probe = TestProbe()
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, SessionHello)
// response to previous request
expectRpcResult() should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, _)) ⇒
}
expectMessageAck(authId, sessionId, messageId)
probe.expectNoMsg(6.seconds)
}
}
def e3() = {
implicit val probe = TestProbe()
val authId = createAuthId()
val sessionId = Random.nextLong()
ignoreNewSession(authId, sessionId)
// Single ack
{
val messageId = Random.nextLong()
val encodedRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(75553333333L, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedRequest))
expectMessageAck(authId, sessionId, messageId)
val mb = expectMessageBox(authId, sessionId)
sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong(), MessageAck(Vector(mb.messageId)))
probe.expectNoMsg(6.seconds)
}
// Ack inside Container
{
val messageId = Random.nextLong()
val encodedRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(75553333333L, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedRequest))
expectMessageAck(authId, sessionId, messageId)
val mb = expectMessageBox(authId, sessionId)
val containerMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, containerMessageId, Container(Seq(MessageBox(Random.nextLong, MessageAck(Vector(mb.messageId))))))
expectMessageAck(authId, sessionId, containerMessageId)
probe.expectNoMsg(6.seconds)
}
}
def e4() = {
implicit val probe = TestProbe()
val authId = createAuthId()
val sessionId = Random.nextLong()
val helloMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, helloMessageId, SessionHello)
expectNewSession(authId, sessionId, helloMessageId)
expectMessageAck(authId, sessionId, helloMessageId)
val update = UpdateContactRegistered(1, false, 1L, 2L)
SeqUpdatesManager.persistAndPushUpdate(authId, update, None, isFat = false)
expectSeqUpdate(authId, sessionId, None)
// Still no ack
Thread.sleep(5000)
expectSeqUpdate(authId, sessionId)
probe.expectNoMsg(6.seconds)
}
}
} | Just-D/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/session/SessionResendSpec.scala | Scala | mit | 5,693 |
package org.scaladebugger.api.lowlevel.requests.filters
import org.scaladebugger.api.lowlevel.requests.JDIRequestProcessor
import org.scaladebugger.api.lowlevel.requests.filters.processors.ClassInclusionFilterProcessor
/**
* Represents a filter used to limit requests to only classes specified by
* this filter. Requests are checked by verifying the class containing the
* current method being invoked.
*
* @note Only used by AccessWatchpointRequest, ClassPrepareRequest,
* ClassUnloadRequest, ExceptionRequest, MethodEntryRequest,
* MethodExitRequest, ModificationWatchpointRequest,
* MonitorContendedEnteredRequest, MonitorContendedEnterRequest,
* MonitorWaitedRequest, MonitorWaitRequest, and StepRequest.
*
* @param classPattern Classes whose names do match this pattern will be
* included, can only take normal characters and wildcard
* "*", meaning "*.Foo" or "java.*"
*/
case class ClassInclusionFilter(classPattern: String) extends JDIRequestFilter {
/**
* Creates a new JDI request processor based on this filter.
*
* @return The new JDI request processor instance
*/
override def toProcessor: JDIRequestFilterProcessor =
new ClassInclusionFilterProcessor(this)
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/requests/filters/ClassInclusionFilter.scala | Scala | apache-2.0 | 1,273 |
package org.openurp.edu.eams.teach.lesson.task.service.impl
import org.beangle.commons.collection.Collections
import org.openurp.edu.teach.code.CourseTakeType
import org.openurp.edu.teach.lesson.LessonLimitGroup
import org.openurp.edu.teach.lesson.CourseTake
import org.openurp.edu.teach.lesson.Lesson
import org.openurp.edu.teach.lesson.TeachClass
import org.openurp.edu.eams.teach.lesson.task.service.LessonMergeSplitService
import org.openurp.edu.eams.teach.lesson.task.splitter.AbstractTeachClassSplitter
import org.beangle.commons.dao.impl.BaseServiceImpl
import org.openurp.edu.eams.teach.lesson.dao.LessonDao
import org.openurp.edu.eams.teach.lesson.service.LessonLimitService
import org.openurp.edu.eams.teach.lesson.service.TeachClassNameStrategy
import org.openurp.edu.eams.teach.lesson.util.LessonElectionUtil
import org.openurp.edu.eams.teach.lesson.service.LessonLogHelper
import scala.collection.mutable.HashSet
import org.openurp.edu.eams.teach.lesson.service.LessonLogBuilder
import org.openurp.edu.teach.code.model.CourseTakeTypeBean
class LessonMergeSplitServiceImpl extends BaseServiceImpl with LessonMergeSplitService {
var lessonDao: LessonDao = _
var lessonLimitService: LessonLimitService = _
var lessonLogHelper: LessonLogHelper = _
var teachClassNameStrategy: TeachClassNameStrategy = _
def merge(taskIds: Array[Long]): Lesson = merge(taskIds, 0)
def merge(lessonIds: Array[Long], reservedId: java.lang.Long): Lesson = {
if (null == lessonIds || lessonIds.length < 1 || null == reservedId) {
return null
}
for (i <- 0 until lessonIds.length if reservedId == lessonIds(i)) {
return merge(lessonIds, i)
}
null
}
private def merge(taskIds: Array[java.lang.Long], target: Int): Lesson = {
if (null == taskIds || taskIds.length == 0 || target >= taskIds.length ||
target < 0) {
return null
}
val taskList = entityDao.find(classOf[Lesson], taskIds)
if (taskList.isEmpty || taskList.size != taskIds.length) {
return null
}
val tasks = Array.ofDim[Lesson](taskList.size)
taskList.toArray(tasks)
merge(tasks, target)
}
private def merge(tasks: Array[Lesson], target: Int): Lesson = {
for (i <- 0 until tasks.length) {
if (i == target) {
//continue
}
dirtywork(tasks(target), tasks(i))
}
lessonDao.saveMergeResult(tasks, target)
logMergeResult(tasks, target)
tasks(target)
}
private def logMergeResult(tasks: Array[Lesson], target: Int) {
for (i <- 0 until tasks.length) {
if (i == target) {
lessonLogHelper.log(LessonLogBuilder.update(tasks(i), "合并任务,更新任务"))
} else {
lessonLogHelper.log(LessonLogBuilder.delete(tasks(i), "合并任务,删除任务"))
}
}
}
def split(lesson: Lesson,
num: Int,
mode: AbstractTeachClassSplitter,
splitUnitNums: Array[Integer]): Array[Lesson] = {
if (num <= 1) {
return Array(lesson)
}
val lessons = Array.ofDim[Lesson](num)
lessons(0) = lesson
for (i <- 1 until lessons.length) {
lessons(i) = lesson.clone().asInstanceOf[Lesson]
}
if (splitUnitNums != null) {
mode.setSplitStdNums(splitUnitNums)
}
val splitClasses = mode.splitClass(lesson.teachClass, num)
LessonElectionUtil.normalizeTeachClass(lesson)
for (j <- 1 until lessons.length) {
lessons(j).teachClass = splitClasses(j)
for (take <- lessons(j).teachClass.courseTakes) {
// take.courseTakeType = new CourseTakeType(CourseTakeType.NORMAL )
take.courseTakeType = new CourseTakeTypeBean
}
LessonElectionUtil.normalizeTeachClass(lessons(j))
if (Collections.isNotEmpty(lessons(j).teachClass.courseTakes) &&
lessons(j).teachClass.courseTakes.iterator.next.persisted) {
// val persistedTakes = new HashSet[CourseTake](lessons(j).teachClass.getCourseTakes)
val persistedTakes = Collections.newSet[CourseTake]
lessons(j).teachClass.courseTakes.clear()
for (persistedTake <- persistedTakes) {
persistedTake.lesson = lesson
}
lessonDao.saveOrUpdate(lessons(j))
lessons(j).teachClass.courseTakes ++= persistedTakes
for (persistedTake <- persistedTakes) {
persistedTake.lesson = lessons(j)
}
lessonDao.saveOrUpdate(lessons(j))
} else {
lessonDao.saveOrUpdate(lessons(j))
}
lessonLogHelper.log(LessonLogBuilder.create(lessons(j), "拆分任务,新建任务"))
}
lesson.teachClass.limitGroups.clear()
lessonDao.saveOrUpdate(lesson)
lesson.teachClass.limitGroups ++= splitClasses(0).limitGroups
lesson.teachClass.name = splitClasses(0).name
lesson.teachClass.courseTakes = Collections.newBuffer[CourseTake]
LessonElectionUtil.addCourseTakes(lesson.teachClass, splitClasses(0).courseTakes)
lesson.teachClass.stdCount = splitClasses(0).stdCount
lesson.teachClass.limitCount = splitClasses(0).limitCount
LessonElectionUtil.normalizeTeachClass(lesson)
lessonDao.saveOrUpdate(lesson)
lessonLogHelper.log(LessonLogBuilder.update(lesson, "拆分任务,更新任务"))
lessons
}
private def dirtywork(target: Lesson, source: Lesson): Lesson = {
entityDao.executeUpdate("update " + classOf[CourseTake].getName + " take set take.lesson=?1 where take.lesson=?2",
target, source)
entityDao.executeUpdate("update " + classOf[LessonLimitGroup].getName + " clg set clg.lesson=?1 where clg.lesson=?2",
target, source)
teachClassNameStrategy.autoName(target.teachClass)
var limitCount = 0
limitCount += target.teachClass.limitCount
limitCount += source.teachClass.limitCount
target.teachClass.limitCount = limitCount
var stdCount = 0
stdCount += target.teachClass.stdCount
stdCount += source.teachClass.stdCount
target.teachClass.stdCount = stdCount
target
}
}
| openurp/edu-eams-webapp | schedule/src/main/scala/org/openurp/edu/eams/teach/lesson/task/service/impl/LessonMergeSplitServiceImpl.scala | Scala | gpl-3.0 | 5,950 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* @author Mario Pastorelli (mario.pastorelli@teralitycs.ch)
*/
package pureconfig.example
import java.nio.file.{Path, Paths}
import pureconfig.StringConvert
import scala.util.Try
/*
This is an example of configuration for our directory watcher. The configuration needs:
- the path that is the directory to watch
- a filter that will be used to decide if a path should be notified or not
- the email configuration used to send emails
The root namespace will be "dirwatch". For instance, valid property file for this
configuration will contain:
dirwatch.path="/path/to/observe"
dirwatch.filter="*"
dirwatch.email.host=host_of_email_service
dirwatch.email.port=port_of_email_service
dirwatch.email.message="Dirwatch new path found report"
dirwatch.email.recipients="recipient1,recipient2"
dirwatch.email.sender="sender"
*/
package object conf {
case class Config(dirwatch: DirWatchConfig)
case class DirWatchConfig(path: Path, filter: String, email: EmailConfig)
case class EmailConfig(host: String, port: Int, message: String, recipients: Set[String], sender: String)
// path doesn't have a StringConvert instance, we are going to create it here
implicit val deriveStringConvertForPath = new StringConvert[Path] {
override def from(str: String): Try[Path] = Try(Paths.get(str))
override def to(path: Path): String = path.toString
}
}
| 13h3r/pureconfig | example/src/main/scala/pureconfig/example/conf/package.scala | Scala | mpl-2.0 | 1,572 |
package ahlers.michael.basic
import ahlers.michael.basic.BasicActor._
import akka.serialization.SerializerWithStringManifest
import com.google.protobuf.ByteString
import scala.collection.convert.ImplicitConversionsToJava._
import scala.collection.convert.ImplicitConversionsToScala._
/**
* @author <a href="mailto:michael@ahlers.consulting">Michael Ahlers</a>
*/
object BasicProtoBufSerializer extends SerializerWithStringManifest {
override def identifier: Int = 1001
final val CommandManifestV1 = classOf[V1.Command].getName
final val EventManifestV1 = classOf[V1.Event].getName
final val StateManifestV1 = classOf[V1.State].getName
final val CommandManifestV2 = classOf[V2.Command].getName
final val EventManifestV2 = classOf[V2.Event].getName
final val StateManifestV2 = classOf[V2.State].getName
override def manifest(o: AnyRef): String =
o match {
/* Version 1 is deprecated; it's not longer written. */
//case _: Command => CommandManifestV1
//case _: Event => EventManifestV1
//case _: State => StateManifestV1
case _: Command => CommandManifestV2
case _: Event => EventManifestV2
case _: State => StateManifestV2
}
override def toBinary(o: AnyRef): Array[Byte] =
o match {
/* Version 1 is deprecated; it's not longer written. */
//case Command(message) =>
// V1.Command.newBuilder
// .setData(message)
// .build
// .toByteArray
//
//case Event(message) =>
// V1.Event.newBuilder
// .setData(message)
// .build
// .toByteArray
//
//case State(messages) =>
// V1.State.newBuilder
// .addAllDatas(messages)
// .build
// .toByteArray
case Command(data) =>
V2.Command.newBuilder
.setData(ByteString.copyFrom(data.toArray))
.build
.toByteArray
case Event(data) =>
V2.Event.newBuilder
.setData(ByteString.copyFrom(data.toArray))
.build
.toByteArray
case State(datas) =>
V2.State.newBuilder
.addAllDatas(datas map { data => ByteString.copyFrom(data.toArray) })
.build
.toByteArray
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef =
manifest match {
case CommandManifestV1 =>
val command = V1.Command.parseFrom(bytes)
Command(command.getData.getBytes)
case EventManifestV1 =>
val event = V1.Event.parseFrom(bytes)
Event(event.getData.getBytes)
case StateManifestV1 =>
val state = V1.State.parseFrom(bytes)
State(state.getDatasList.toList map {
_.getBytes.toSeq
})
case CommandManifestV2 =>
val command = V2.Command.parseFrom(bytes)
Command(command.getData.toByteArray.toSeq)
case EventManifestV2 =>
val event = V2.Event.parseFrom(bytes)
Event(event.getData.toByteArray.toSeq)
case StateManifestV2 =>
val state = V2.State.parseFrom(bytes)
State(state.getDatasList.toList map {
_.toByteArray.toSeq
})
case _ =>
throw new IllegalArgumentException(s"""Don't know how to consume serialization with manifest "$manifest".""")
}
}
| michaelahlers/sandbox-akka-serialization | src/main/scala/ahlers/michael/basic/BasicProtoBufSerializer.scala | Scala | mit | 3,313 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.api.generation.scala
import ai.h2o.sparkling.api.generation.common._
object AlgorithmTemplate
extends ((AlgorithmSubstitutionContext, Seq[ParameterSubstitutionContext]) => String)
with ScalaEntityTemplate {
def apply(
algorithmSubstitutionContext: AlgorithmSubstitutionContext,
parameterSubstitutionContext: Seq[ParameterSubstitutionContext]): String = {
val entityName = algorithmSubstitutionContext.entityName
val h2oSchemaClassName = algorithmSubstitutionContext.h2oSchemaClass.getSimpleName
val parents = Seq(s"${algorithmSubstitutionContext.algorithmType}[${h2oSchemaClassName}]", s"${entityName}Params") ++
algorithmSubstitutionContext.extraInheritedEntities
val imports = Seq(
s"ai.h2o.sparkling.ml.params.${entityName}Params",
"ai.h2o.sparkling.ml.utils.H2OParamsReadable",
algorithmSubstitutionContext.h2oSchemaClass.getCanonicalName,
"org.apache.spark.ml.util.Identifiable",
s"ai.h2o.sparkling.ml.models.${entityName}MOJOModel",
"org.apache.spark.sql.Dataset")
val parameters = "(override val uid: String)"
val entitySubstitutionContext =
EntitySubstitutionContext(algorithmSubstitutionContext.namespace, entityName, parents, imports, parameters)
val algorithmClass = generateEntity(entitySubstitutionContext, "class") {
s""" def this() = this(Identifiable.randomUID(classOf[$entityName].getSimpleName))
|
| override def fit(dataset: Dataset[_]): ${entityName}MOJOModel = {
| super.fit(dataset).asInstanceOf[${entityName}MOJOModel]
| }""".stripMargin
}
val algorithmObject = s"object $entityName extends H2OParamsReadable[$entityName]"
s"""$algorithmClass
|$algorithmObject
""".stripMargin
}
}
| h2oai/sparkling-water | api-generation/src/main/scala/ai/h2o/sparkling/api/generation/scala/AlgorithmTemplate.scala | Scala | apache-2.0 | 2,608 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features.serialization
import java.util.{Collections => JCollections, Map => JMap, UUID}
import com.vividsolutions.jts.geom.Geometry
import org.geotools.factory.Hints
import org.locationtech.geomesa.features.serialization.AbstractWriter._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes._
/** Combines all readers.
*
*/
trait AbstractReader[Reader]
extends PrimitiveReader[Reader]
with NullableReader[Reader]
with CollectionReader[Reader]
with GeometryReader[Reader]
with HintKeyReader[Reader] {
def readUUID: DatumReader[Reader, UUID] = (in: Reader) => {
val mostSignificantBits = readLong(in)
val leastSignificantBits = readLong(in)
new UUID(mostSignificantBits, leastSignificantBits)
}
/** A [[DatumReader]] which reads a class name and then an object of that class. If the class name is a null marker
* then ``null`` will be returned.
*/
def readGeneric(version: Version): DatumReader[Reader, AnyRef] = (reader) => {
val className = readString(reader)
if (className == NULL_MARKER_STR) {
null
} else {
val clazz = Class.forName(className)
selectReader(clazz, version).apply(reader)
}
}
/**
* A [[DatumReader]] for reading a map where the key and values may be any type. The map may not be null. The reader
* will call ``readArrayStart(reader)`` and then, for each entry, read up to four items.
*/
def readGenericMap(version: Version): DatumReader[Reader, JMap[AnyRef, AnyRef]] = (reader) => {
var toRead = readArrayStart(reader)
val map = new java.util.HashMap[AnyRef, AnyRef](toRead)
while (toRead > 0) {
val key = readGeneric(version).apply(reader)
val value = readGeneric(version).apply(reader)
map.put(key, value)
toRead -= 1
}
map
}
/**
* @param cls the [[Class]] of the object to be read
* @return a [[DatumReader]] capable of reading object of the given ``clazz``
*/
def selectReader(cls: Class[_], version: Version,
metadata: JMap[_ <: AnyRef, _ <: AnyRef] = JCollections.emptyMap(),
isNullable: isNullableFn = notNullable): DatumReader[Reader, AnyRef] = {
val reader: DatumReader[Reader, AnyRef] = {
if (classOf[java.lang.String].isAssignableFrom(cls)) readString
else if (classOf[java.lang.Integer].isAssignableFrom(cls)) readInt
else if (classOf[java.lang.Long].isAssignableFrom(cls)) readLong
else if (classOf[java.lang.Float].isAssignableFrom(cls)) readFloat
else if (classOf[java.lang.Double].isAssignableFrom(cls)) readDouble
else if (classOf[java.lang.Boolean].isAssignableFrom(cls)) readBoolean
else if (classOf[java.util.Date].isAssignableFrom(cls)) readDate
else if (classOf[UUID].isAssignableFrom(cls)) readUUID
else if (classOf[Geometry].isAssignableFrom(cls)) selectGeometryReader(version)
else if (classOf[Hints.Key].isAssignableFrom(cls)) readHintKey
else if (classOf[java.util.List[_]].isAssignableFrom(cls)) {
val elemClass = metadata.get(USER_DATA_LIST_TYPE).asInstanceOf[Class[_]]
val elemReader = selectReader(elemClass, version, isNullable = isNullable)
readList(elemReader)
}
else if (classOf[java.util.Map[_, _]].isAssignableFrom(cls)) {
val keyClass = metadata.get(USER_DATA_MAP_KEY_TYPE).asInstanceOf[Class[_]]
val valueClass = metadata.get(USER_DATA_MAP_VALUE_TYPE).asInstanceOf[Class[_]]
val keyDecoding = selectReader(keyClass, version, isNullable = isNullable)
val valueDecoding = selectReader(valueClass, version, isNullable = isNullable)
readMap(keyDecoding, valueDecoding)
}
else if (classOf[Array[Byte]].isAssignableFrom(cls)) readBytes
else throw new IllegalArgumentException("Unsupported class: " + cls)
}.asInstanceOf[DatumReader[Reader, AnyRef]]
if (isNullable(cls)) {
readNullable(reader)
} else {
reader
}
}
}
| mdzimmerman/geomesa | geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/serialization/AbstractReader.scala | Scala | apache-2.0 | 4,477 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.table
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.scala._
import org.apache.flink.table.sources.{DefinedProctimeAttribute, DefinedRowtimeAttribute, StreamTableSource}
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.types.Row
import org.junit.Test
class TableSourceTest extends TableTestBase {
@Test
def testRowTimeTableSourceSimple(): Unit = {
val util = streamTestUtil()
util.tableEnv.registerTableSource("rowTimeT", new TestRowtimeSource("addTime"))
val t = util.tableEnv.scan("rowTimeT").select("addTime, id, name, val")
val expected =
unaryNode(
"DataStreamCalc",
"StreamTableSourceScan(table=[[rowTimeT]], fields=[id, val, name, addTime])",
term("select", "addTime", "id", "name", "val")
)
util.verifyTable(t, expected)
}
@Test
def testRowTimeTableSourceGroupWindow(): Unit = {
val util = streamTestUtil()
util.tableEnv.registerTableSource("rowTimeT", new TestRowtimeSource("addTime"))
val t = util.tableEnv.scan("rowTimeT")
.filter("val > 100")
.window(Tumble over 10.minutes on 'addTime as 'w)
.groupBy('name, 'w)
.select('name, 'w.end, 'val.avg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamGroupWindowAggregate",
unaryNode(
"DataStreamCalc",
"StreamTableSourceScan(table=[[rowTimeT]], fields=[id, val, name, addTime])",
term("select", "name", "val", "addTime"),
term("where", ">(val, 100)")
),
term("groupBy", "name"),
term("window", "TumblingGroupWindow('w, 'addTime, 600000.millis)"),
term("select", "name", "AVG(val) AS TMP_1", "end('w) AS TMP_0")
),
term("select", "name", "TMP_0", "TMP_1")
)
util.verifyTable(t, expected)
}
@Test
def testProcTimeTableSourceSimple(): Unit = {
val util = streamTestUtil()
util.tableEnv.registerTableSource("procTimeT", new TestProctimeSource("pTime"))
val t = util.tableEnv.scan("procTimeT").select("pTime, id, name, val")
val expected =
unaryNode(
"DataStreamCalc",
"StreamTableSourceScan(table=[[procTimeT]], fields=[id, val, name, pTime])",
term("select", "PROCTIME(pTime) AS pTime", "id", "name", "val")
)
util.verifyTable(t, expected)
}
@Test
def testProcTimeTableSourceOverWindow(): Unit = {
val util = streamTestUtil()
util.tableEnv.registerTableSource("procTimeT", new TestProctimeSource("pTime"))
val t = util.tableEnv.scan("procTimeT")
.window(Over partitionBy 'id orderBy 'pTime preceding 2.hours as 'w)
.select('id, 'name, 'val.sum over 'w as 'valSum)
.filter('valSum > 100)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
"StreamTableSourceScan(table=[[procTimeT]], fields=[id, val, name, pTime])",
term("partitionBy", "id"),
term("orderBy", "pTime"),
term("range", "BETWEEN 7200000 PRECEDING AND CURRENT ROW"),
term("select", "id", "val", "name", "pTime", "SUM(val) AS w0$o0")
),
term("select", "id", "name", "w0$o0 AS valSum"),
term("where", ">(w0$o0, 100)")
)
util.verifyTable(t, expected)
}
}
class TestRowtimeSource(timeField: String)
extends StreamTableSource[Row] with DefinedRowtimeAttribute {
override def getDataStream(execEnv: StreamExecutionEnvironment): DataStream[Row] = ???
override def getRowtimeAttribute: String = timeField
override def getReturnType: TypeInformation[Row] = {
new RowTypeInfo(
Array(Types.INT, Types.LONG, Types.STRING).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "val", "name"))
}
}
class TestProctimeSource(timeField: String)
extends StreamTableSource[Row] with DefinedProctimeAttribute {
override def getDataStream(execEnv: StreamExecutionEnvironment): DataStream[Row] = ???
override def getProctimeAttribute: String = timeField
override def getReturnType: TypeInformation[Row] = {
new RowTypeInfo(
Array(Types.INT, Types.LONG, Types.STRING).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "val", "name"))
}
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/table/TableSourceTest.scala | Scala | apache-2.0 | 5,447 |
package im.actor.server.activation.internal
import akka.actor.ActorSystem
import akka.http.scaladsl.util.FastFuture
import cats.data.{ Xor, XorT }
import im.actor.api.rpc.messaging.ApiTextMessage
import im.actor.api.rpc.peers.{ ApiPeer, ApiPeerType }
import im.actor.concurrent.FutureResult
import im.actor.server.acl.ACLUtils
import im.actor.server.activation.common._
import im.actor.server.db.DbExtension
import im.actor.server.dialog.DialogExtension
import im.actor.server.model.{ AuthEmailTransaction, AuthPhoneTransaction }
import im.actor.server.db.ActorPostgresDriver.api._
import im.actor.server.persist.auth.AuthTransactionRepo
import im.actor.server.persist.presences.UserPresenceRepo
import im.actor.server.persist.{ AuthCodeRepo, UserEmailRepo, UserPhoneRepo }
import scala.concurrent.Future
private[activation] final class InternalCodeProvider(system: ActorSystem)
extends ActivationProvider
with CommonAuthCodes
with FutureResult[String] {
private val config = InternalActivationConfig.load.getOrElse(throw new RuntimeException("Failed to load InternalActivationConfig"))
private val onlineTimeWindow = config.onlineWindow.toMillis
protected val activationConfig: ActivationConfig = ActivationConfig.load.getOrElse(throw new RuntimeException("Failed to load activation config"))
protected val db = DbExtension(system).db
protected implicit val ec = system.dispatcher
override def send(txHash: String, code: Code): Future[Xor[CodeFailure, Unit]] = {
val response = (for {
userId ← findUserId(txHash)
presence ← fromFutureOption("No presence found for user")(db.run(UserPresenceRepo.find(userId)))
lastSeen ← fromOption("No last seen date for user presence")(presence.lastSeenAt)
_ ← fromFuture(
if (wasOnlineRecently(lastSeen.getMillis))
sendCode(userId, code.code)
else FastFuture.successful(())
)
} yield ()).fold(
failure ⇒ {
system.log.debug("Failed to send message via internal code provider: {}", failure)
Xor.right[CodeFailure, Unit](())
},
success ⇒ Xor.right[CodeFailure, Unit](())
) recover { case e: RuntimeException ⇒ Xor.left(SendFailure(e.toString)) }
for {
resp ← response
_ ← createAuthCodeIfNeeded(resp, txHash, code.code)
} yield resp
}
// we just validate code here, don't expire it
override def validate(txHash: String, code: String): Future[ValidationResponse] = {
val action = for {
optCode ← AuthCodeRepo.findByTransactionHash(txHash)
result = optCode map {
case s if isExpired(s, activationConfig.expiration.toMillis) ⇒ ExpiredCode
case s if s.code != code ⇒
if (s.attempts + 1 >= activationConfig.attempts) ExpiredCode else InvalidCode
case _ ⇒ Validated
} getOrElse InvalidHash
} yield result
db.run(action)
}
override def cleanup(txHash: String): Future[Unit] = deleteAuthCode(txHash)
private def sendCode(userId: Int, code: String): Future[Unit] = {
val messageText = config.messageTemplate.replace("$$CODE$$", code)
val userPeer = ApiPeer(ApiPeerType.Private, userId)
val message = ApiTextMessage(messageText, Vector.empty, None)
DialogExtension(system).sendMessageInternal(
peer = userPeer,
senderUserId = config.senderUserId,
randomId = ACLUtils.randomLong(),
message = message
) map { _ ⇒ system.log.debug("Successfully sent activation code to user: {}", userId) }
}
private def wasOnlineRecently(lastSeenMillis: Long): Boolean =
(lastSeenMillis + onlineTimeWindow) > System.currentTimeMillis
private def findUserId(txHash: String): XorT[Future, String, Int] =
for {
tx ← fromFutureOption("No auth transaction found")(db.run(AuthTransactionRepo.findChildren(txHash)))
userId ← fromFutureOption("User does not exist(possibly it is new user)")(db.run(tx match {
case phone: AuthPhoneTransaction ⇒ UserPhoneRepo.findByPhoneNumber(phone.phoneNumber).headOption map (_.map(_.userId))
case email: AuthEmailTransaction ⇒ UserEmailRepo.find(email.email) map (_.map(_.userId))
case _ ⇒ DBIO.successful(None)
}))
} yield userId
}
| EaglesoftZJ/actor-platform | actor-server/actor-activation/src/main/scala/im/actor/server/activation/internal/InternalCodeProvider.scala | Scala | agpl-3.0 | 4,262 |
package tu.model.knowledge.narrative
import tu.model.knowledge.{Resource, Probability, KnowledgeURI}
/**
* Narrative is the sequence of Resources.
* @author toschev alex, talanov max
* Date: 03.05.12
* Time: 12:22
*/
class Narrative[Type <: Resource](_resources: List[Type], _uri: KnowledgeURI, _probability: Probability = new Probability())
extends Resource(_uri, _probability) {
def this(_resources: List[Type], _uri: KnowledgeURI) {
this(_resources: List[Type], _uri: KnowledgeURI, new Probability())
}
def resources = _resources
def size = _resources.size
}
object Narrative {
def applyResource(name: String, resources: List[Resource]): Narrative[Resource] = {
new Narrative(resources, KnowledgeURI(name))
}
def apply[Type <: Resource](name: String, resources: List[Type]): Narrative[Type] = {
new Narrative(resources, KnowledgeURI(name), new Probability())
}
}
| tu-team/2 | model.knowledge/src/main/scala/tu/model/knowledge/narrative/Narrative.scala | Scala | gpl-3.0 | 928 |
package test_data.v23
import scala.xml.Elem
case class SectionAboutEmployment(xml: Elem) {
val rootPathJobDetails = xml \\\\ "DWPCATransaction" \\\\ "DWPCAClaim" \\\\ "Employment" \\\\ "JobDetails"
val address = (rootPathJobDetails \\\\ "Employer" \\\\ "Address" \\\\ "Answer" \\\\"Line").map(x => x.text).filterNot(x => x.isEmpty).mkString(" ")
val postCode = rootPathJobDetails \\\\ "Employer" \\\\ "Address" \\\\ "Answer" \\\\ "PostCode"
val employmentDetails: Seq[String] = {
(rootPathJobDetails
map (y =>
(y \\\\ "Employer").
map(x => {
Seq((x \\\\ "CurrentlyEmployed" \\ "QuestionLabel").text+" "+(x \\\\ "CurrentlyEmployed" \\ "Answer").text,
(x \\\\ "Name" \\\\ "QuestionLabel").text+ " "+(x \\\\ "Name" \\\\ "Answer").text,
(x \\\\ "DidJobStartBeforeClaimDate" \\\\ "QuestionLabel").text+ " "+(x \\\\ "DidJobStartBeforeClaimDate" \\\\ "Answer").text,
(x \\\\ "DateJobStarted" \\\\ "QuestionLabel").text+" "+(x \\\\ "DateJobStarted" \\\\ "Answer").text,
(x \\\\ "DateJobEnded" \\\\ "QuestionLabel").text+" "+(x \\\\ "DateJobEnded" \\\\ "Answer").text,
(x \\\\ "P45LeavingDate" \\\\ "QuestionLabel").text+" "+(x \\\\ "P45LeavingDate" \\\\ "Answer").text,
"Employer's contact details",
(x \\\\ "Address" \\\\ "QuestionLabel").text+" "+(x \\\\ "Address" \\\\ "Answer" \\\\"Line").map(x => x.text).filterNot(x => x.isEmpty).mkString(" "),
(x \\\\ "Address" \\\\ "Answer" \\\\ "PostCode").text,
(x \\\\ "EmployersPhoneNumber" \\\\ "QuestionLabel").text+" "+(x \\\\ "EmployersPhoneNumber" \\\\ "Answer").text,
(x \\\\ "JobType" \\\\ "QuestionLabel").text+" "+(x \\\\ "JobType" \\\\ "Answer").text
)
}).flatten ++
(y \\\\ "Pay").
map(x => {
Seq((x \\\\ "WeeklyHoursWorked" \\\\ "QuestionLabel").text+" "+(x \\\\ "WeeklyHoursWorked" \\\\ "Answer").text,
"Your last wage",
(x \\\\ "DateLastPaid" \\\\ "QuestionLabel").text+" "+(x \\\\ "DateLastPaid" \\\\ "Answer").text,
(x \\\\ "GrossPayment" \\\\ "QuestionLabel").text+" "+(x \\\\ "GrossPayment" \\\\ "Answer" \\\\ "Amount").text,
(x \\\\ "IncludedInWage" \\\\ "QuestionLabel").text+" "+(x \\\\ "IncludedInWage" \\\\ "Answer").text,
(x \\\\ "ConstantEarnings" \\\\ "QuestionLabel").text+" "+(x \\\\ "ConstantEarnings" \\\\ "Answer").text,
"Additional details on your last wage",
(x \\\\ "PayFrequency" \\\\ "QuestionLabel").text+" "+(x \\\\ "PayFrequency" \\\\ "Answer").text,
(x \\\\ "PayFrequency" \\\\ "Other").text,
(x \\\\ "UsualPayDay" \\\\ "QuestionLabel").text+" "+(x \\\\ "UsualPayDay" \\\\ "Answer").text
)
}).flatten ++
(y).map(x => {
Seq((x \\\\ "OweMoney" \\\\ "QuestionLabel").text+" "+(x \\\\ "OweMoney" \\\\ "Answer").text,
"Pension And Expenses",
(x \\\\ "PaidForPension" \\\\ "QuestionLabel").text+" "+(x \\\\ "PaidForPension" \\\\ "Answer").text,
(x \\\\ "PensionExpenses" \\\\ "Expense" \\\\ "QuestionLabel").text+" "+(x \\\\ "PensionExpenses" \\\\ "Expense" \\\\ "Answer" ).text,
(x \\\\ "PaidForThingsToDoJob" \\\\ "QuestionLabel").text+" "+(x \\\\ "PaidForThingsToDoJob" \\\\ "Answer").text,
(x \\\\ "ExpensesToDoJob" \\\\ "Expense" \\\\ "QuestionLabel").text+" "+(x \\\\ "ExpensesToDoJob" \\\\ "Expense" \\\\ "Answer" ).text,
(x \\\\ "PaidForJobExpenses" \\\\ "QuestionLabel").text+" "+(x \\\\ "PaidForJobExpenses" \\\\ "Answer").text,
(x \\\\ "JobExpenses" \\\\ "Expense" \\\\ "QuestionLabel").text+" "+(x \\\\ "JobExpenses" \\\\ "Expense" \\\\ "Answer").text
)
}).flatten
)
).flatten
}
}
| Department-for-Work-and-Pensions/RenderingService | test/test_data/v23/SectionAboutEmployment.scala | Scala | mit | 3,524 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs
/** A base random access data interface.
*
* Data is an indexed sequence of bytes that has a specific length and can be randomly read or written.
*
* `Data` implementation may allow to increase the data length (i.e. to extend the data)
* by writing additional bytes at `length` position.
*
* @note Non-sequential access time may be not constant (depending on implementation).
*
* @define entity data
*
* @see [[com.pavelfatin.fs.File]]
*/
trait Data {
/** Returns the length of this $entity (in bytes). */
def length: Long
/** Reads `length` bytes from this $entity at `position` into `buffer` at `offset`.
*
* @param position the start position in this $entity
* @param length the number of bytes to read
* @param buffer the buffer into which the data is read
* @param offset the start offset in the buffer
* @throws IllegalArgumentException if (position, length, offset) combination are invalid or conflicts
* either with this $entity length or with the buffer length
* @throws java.io.IOException if an I/O error occurs
*/
def read(position: Long, length: Int, buffer: Array[Byte], offset: Int = 0)
/** Writes `length` bytes of `buffer` bytes at `offset` into this $entity at `position`.
*
* `Data` implementations may allow to extend this $entity by writing
* additional bytes at the `length` position.
*
* @note Writing at positions greater than this $entity `length` is always prohibited.
*
* @param position the start position in this $entity
* @param length the number of bytes to write
* @param buffer the data to be written
* @param offset the start offset in the data
* @throws IllegalArgumentException if (position, length, offset) combination are invalid or conflicts
* either with this $entity length or with the buffer length
* @throws java.io.IOException if an I/O error occurs
*/
def write(position: Long, length: Int, buffer: Array[Byte], offset: Int = 0)
/** Returns a new `Data` that is a projection on this $entity.
*
* @note The projected data cannot be extended even if this $entity is extendable.
*
* @param position the projection position
* @param length the projection length
* @throws IllegalArgumentException if (position, length) combination are invalid or conflicts
* with this $entity length
*/
def projection(position: Long, length: Long): Data
}
| pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/Data.scala | Scala | gpl-3.0 | 3,288 |
/*
This file is part of Static Web Gallery (SWG).
MathMaster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MathMaster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SWG. If not, see <http://www.gnu.org/licenses/>.
*/
package eu.lateral.swg.db
import org.squeryl.KeyedEntity
import org.squeryl.PrimitiveTypeMode._
import org.squeryl.Schema
import org.squeryl.annotations.Column
import org.squeryl.dsl.OneToMany
class Translation(
@Column("id") val id: Long,
@Column("project_id") val projectId: Long,
@Column("project_language_id") val projectLanguageId: Long,
@Column("translation_name") val translationName: String,
@Column("translation") val translation: String) extends KeyedEntity[Long] {
def this() = this(0, 0, 0, "", "")
}
class TranslationView(
@Column("id") val id: Long,
@Column("project_id") val projectId: Long,
@Column("project_language_id") val projectLanguageId: Long,
@Column("translation_name") val translationName: String,
@Column("translation") val translation: String,
@Column("language_code") val languageCode: String,
@Column("language_name") val languageName: String) extends KeyedEntity[Long] {
def this() = this(0, 0, 0, "", "", "", "")
}
| orest-d/swg | swg/src/main/scala/eu/lateral/swg/db/Translations.scala | Scala | gpl-3.0 | 1,642 |
package org.sisioh.aws4s.eb.model
import java.util.Date
import com.amazonaws.services.elasticbeanstalk.model._
import org.sisioh.aws4s.PimpedType
object CreateEnvironmentResultFactory {
def create(): CreateEnvironmentResult = new CreateEnvironmentResult()
}
class RichCreateEnvironmentResult(val underlying: CreateEnvironmentResult)
extends AnyVal
with PimpedType[CreateEnvironmentResult] {
def environmentNameOpt: Option[String] = Option(underlying.getEnvironmentName)
def environmentNameOpt_=(value: Option[String]): Unit =
underlying.setEnvironmentName(value.orNull)
def withEnvironmentNameOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withEnvironmentName(value.orNull)
// ---
def environmentIdOpt: Option[String] = Option(underlying.getEnvironmentId)
def environmentIdOpt_=(value: Option[String]): Unit =
underlying.setEnvironmentId(value.orNull)
def withEnvironmentIdOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withEnvironmentId(value.orNull)
// ---
def applicationNameOpt: Option[String] = Option(underlying.getApplicationName)
def applicationNameOpt_=(value: Option[String]): Unit =
underlying.setApplicationName(value.orNull)
def withApplicationNameOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withApplicationName(value.orNull)
// ---
def versionLabelOpt: Option[String] = Option(underlying.getVersionLabel)
def versionLabelOpt_=(value: Option[String]): Unit =
underlying.setVersionLabel(value.orNull)
def withVersionLabelOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withVersionLabel(value.orNull)
// ---
def solutionStackNameOpt: Option[String] =
Option(underlying.getSolutionStackName)
def solutionStackNameOpt_=(value: Option[String]): Unit =
underlying.setSolutionStackName(value.orNull)
def withSolutionStackNameOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withSolutionStackName(value.orNull)
// ---
def templateNameOpt: Option[String] = Option(underlying.getTemplateName)
def templateNameOpt_=(value: Option[String]): Unit =
underlying.setTemplateName(value.orNull)
def withTemplateNameOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withTemplateName(value.orNull)
// ---
def descriptionOpt: Option[String] = Option(underlying.getDescription)
def descriptionOpt_=(value: Option[String]): Unit =
underlying.setDescription(value.orNull)
def withDescriptionOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withDescription(value.orNull)
// ---
def endpointURLOpt: Option[String] = Option(underlying.getEndpointURL)
def endpointURLOpt_=(value: Option[String]): Unit =
underlying.setEndpointURL(value.orNull)
def withEndpointURLOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withEndpointURL(value.orNull)
// ---
def cNAMEOpt: Option[String] = Option(underlying.getCNAME)
def cNAMEOpt_=(value: Option[String]): Unit =
underlying.setCNAME(value.orNull)
def withCNAMEOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withCNAME(value.orNull)
// ---
def dateCreatedOpt: Option[Date] = Option(underlying.getDateCreated)
def dateCreatedOpt_=(value: Option[Date]): Unit =
underlying.setDateCreated(value.orNull)
def withDateCreatedOpt(value: Option[Date]): CreateEnvironmentResult =
underlying.withDateCreated(value.orNull)
// ---
def dateUpdatedOpt: Option[Date] = Option(underlying.getDateUpdated)
def dateUpdatedOpt_=(value: Option[Date]): Unit =
underlying.setDateUpdated(value.orNull)
def withDateUpdatedOpt(value: Option[Date]): CreateEnvironmentResult =
underlying.withDateUpdated(value.orNull)
// ---
def statusOpt: Option[String] = Option(underlying.getStatus)
def statusOpt_=(value: Option[String]): Unit =
underlying.setStatus(value.orNull)
def withStatusOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withStatus(value.orNull)
// ---
def healthOpt: Option[String] = Option(underlying.getHealth)
def healthOpt_=(value: Option[String]): Unit =
underlying.setHealth(value.orNull)
def withHealthOpt(value: Option[String]): CreateEnvironmentResult =
underlying.withHealth(value.orNull)
// ---
def resourcesOpt: Option[EnvironmentResourcesDescription] =
Option(underlying.getResources)
def resourcesOpt_=(value: Option[EnvironmentResourcesDescription]): Unit =
underlying.setResources(value.orNull)
def withResourcesOpt(value: Option[EnvironmentResourcesDescription]): CreateEnvironmentResult =
underlying.withResources(value.orNull)
// ---
def tierOpt: Option[EnvironmentTier] = Option(underlying.getTier)
def tierOpt_=(value: Option[EnvironmentTier]): Unit =
underlying.setTier(value.orNull)
def withTierOpt(value: Option[EnvironmentTier]): CreateEnvironmentResult =
underlying.withTier(value.orNull)
}
| sisioh/aws4s | aws4s-eb/src/main/scala/org/sisioh/aws4s/eb/model/RichCreateEnvironmentResult.scala | Scala | mit | 5,013 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.core.hash
import scodec.bits.ByteVector
import swave.compat.scodec._
import swave.core._
class HashTransformationSpec extends SwaveSpec {
import swave.core.text._
implicit val env = StreamEnv()
"HashTransformations" - {
"md5" in {
Spout
.one("swave rocks!")
.utf8Encode
.md5
.drainToHead()
.value
.get
.get shouldEqual ByteVector.fromHex("e1b2b603f9cca4a909c07d42a5788fe3").get
}
}
}
| sirthias/swave | core-tests/src/test/scala/swave/core/hash/HashTransformationSpec.scala | Scala | mpl-2.0 | 691 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.util.Collections
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.util.control.NonFatal
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.AMRMClient
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.rpc.{RpcCallContext, RpcEndpointRef}
import org.apache.spark.scheduler.{ExecutorExited, ExecutorLossReason}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RemoveExecutor
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RetrieveLastAllocatedExecutorId
import org.apache.spark.scheduler.cluster.SchedulerBackendUtils
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils}
/**
* YarnAllocator is charged with requesting containers from the YARN ResourceManager and deciding
* what to do with containers when YARN fulfills these requests.
*
* This class makes use of YARN's AMRMClient APIs. We interact with the AMRMClient in three ways:
* * Making our resource needs known, which updates local bookkeeping about containers requested.
* * Calling "allocate", which syncs our local container requests with the RM, and returns any
* containers that YARN has granted to us. This also functions as a heartbeat.
* * Processing the containers granted to us to possibly launch executors inside of them.
*
* The public methods of this class are thread-safe. All methods that mutate state are
* synchronized.
*/
private[yarn] class YarnAllocator(
driverUrl: String,
driverRef: RpcEndpointRef,
conf: YarnConfiguration,
sparkConf: SparkConf,
amClient: AMRMClient[ContainerRequest],
appAttemptId: ApplicationAttemptId,
securityMgr: SecurityManager,
localResources: Map[String, LocalResource],
resolver: SparkRackResolver,
clock: Clock = new SystemClock)
extends Logging {
import YarnAllocator._
// Visible for testing.
val allocatedHostToContainersMap = new HashMap[String, collection.mutable.Set[ContainerId]]
val allocatedContainerToHostMap = new HashMap[ContainerId, String]
// Containers that we no longer care about. We've either already told the RM to release them or
// will on the next heartbeat. Containers get removed from this map after the RM tells us they've
// completed.
private val releasedContainers = Collections.newSetFromMap[ContainerId](
new ConcurrentHashMap[ContainerId, java.lang.Boolean])
private val runningExecutors = Collections.newSetFromMap[String](
new ConcurrentHashMap[String, java.lang.Boolean]())
private val numExecutorsStarting = new AtomicInteger(0)
/**
* Used to generate a unique ID per executor
*
* Init `executorIdCounter`. when AM restart, `executorIdCounter` will reset to 0. Then
* the id of new executor will start from 1, this will conflict with the executor has
* already created before. So, we should initialize the `executorIdCounter` by getting
* the max executorId from driver.
*
* And this situation of executorId conflict is just in yarn client mode, so this is an issue
* in yarn client mode. For more details, can check in jira.
*
* @see SPARK-12864
*/
private var executorIdCounter: Int =
driverRef.askSync[Int](RetrieveLastAllocatedExecutorId)
private[spark] val failureTracker = new FailureTracker(sparkConf, clock)
private val allocatorBlacklistTracker =
new YarnAllocatorBlacklistTracker(sparkConf, amClient, failureTracker)
@volatile private var targetNumExecutors =
SchedulerBackendUtils.getInitialTargetExecutorNumber(sparkConf)
// Executor loss reason requests that are pending - maps from executor ID for inquiry to a
// list of requesters that should be responded to once we find out why the given executor
// was lost.
private val pendingLossReasonRequests = new HashMap[String, mutable.Buffer[RpcCallContext]]
// Maintain loss reasons for already released executors, it will be added when executor loss
// reason is got from AM-RM call, and be removed after querying this loss reason.
private val releasedExecutorLossReasons = new HashMap[String, ExecutorLossReason]
// Keep track of which container is running which executor to remove the executors later
// Visible for testing.
private[yarn] val executorIdToContainer = new HashMap[String, Container]
private var numUnexpectedContainerRelease = 0L
private val containerIdToExecutorId = new HashMap[ContainerId, String]
// Executor memory in MiB.
protected val executorMemory = sparkConf.get(EXECUTOR_MEMORY).toInt
// Additional memory overhead.
protected val memoryOverhead: Int = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, MEMORY_OVERHEAD_MIN)).toInt
protected val pysparkWorkerMemory: Int = if (sparkConf.get(IS_PYTHON_APP)) {
sparkConf.get(PYSPARK_EXECUTOR_MEMORY).map(_.toInt).getOrElse(0)
} else {
0
}
// Number of cores per executor.
protected val executorCores = sparkConf.get(EXECUTOR_CORES)
private val executorResourceRequests =
sparkConf.getAllWithPrefix(config.YARN_EXECUTOR_RESOURCE_TYPES_PREFIX).toMap
// Resource capability requested for each executor
private[yarn] val resource: Resource = {
val resource = Resource.newInstance(
executorMemory + memoryOverhead + pysparkWorkerMemory, executorCores)
ResourceRequestHelper.setResourceRequests(executorResourceRequests, resource)
logDebug(s"Created resource capability: $resource")
resource
}
private val launcherPool = ThreadUtils.newDaemonCachedThreadPool(
"ContainerLauncher", sparkConf.get(CONTAINER_LAUNCH_MAX_THREADS))
// For testing
private val launchContainers = sparkConf.getBoolean("spark.yarn.launchContainers", true)
private val labelExpression = sparkConf.get(EXECUTOR_NODE_LABEL_EXPRESSION)
// A map to store preferred hostname and possible task numbers running on it.
private var hostToLocalTaskCounts: Map[String, Int] = Map.empty
// Number of tasks that have locality preferences in active stages
private[yarn] var numLocalityAwareTasks: Int = 0
// A container placement strategy based on pending tasks' locality preference
private[yarn] val containerPlacementStrategy =
new LocalityPreferredContainerPlacementStrategy(sparkConf, conf, resource, resolver)
def getNumExecutorsRunning: Int = runningExecutors.size()
def getNumReleasedContainers: Int = releasedContainers.size()
def getNumExecutorsFailed: Int = failureTracker.numFailedExecutors
def isAllNodeBlacklisted: Boolean = allocatorBlacklistTracker.isAllNodeBlacklisted
/**
* A sequence of pending container requests that have not yet been fulfilled.
*/
def getPendingAllocate: Seq[ContainerRequest] = getPendingAtLocation(ANY_HOST)
def numContainersPendingAllocate: Int = synchronized {
getPendingAllocate.size
}
/**
* A sequence of pending container requests at the given location that have not yet been
* fulfilled.
*/
private def getPendingAtLocation(location: String): Seq[ContainerRequest] = {
amClient.getMatchingRequests(RM_REQUEST_PRIORITY, location, resource).asScala
.flatMap(_.asScala)
.toSeq
}
/**
* Request as many executors from the ResourceManager as needed to reach the desired total. If
* the requested total is smaller than the current number of running executors, no executors will
* be killed.
* @param requestedTotal total number of containers requested
* @param localityAwareTasks number of locality aware tasks to be used as container placement hint
* @param hostToLocalTaskCount a map of preferred hostname to possible task counts to be used as
* container placement hint.
* @param nodeBlacklist blacklisted nodes, which is passed in to avoid allocating new containers
* on them. It will be used to update the application master's blacklist.
* @return Whether the new requested total is different than the old value.
*/
def requestTotalExecutorsWithPreferredLocalities(
requestedTotal: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int],
nodeBlacklist: Set[String]): Boolean = synchronized {
this.numLocalityAwareTasks = localityAwareTasks
this.hostToLocalTaskCounts = hostToLocalTaskCount
if (requestedTotal != targetNumExecutors) {
logInfo(s"Driver requested a total number of $requestedTotal executor(s).")
targetNumExecutors = requestedTotal
allocatorBlacklistTracker.setSchedulerBlacklistedNodes(nodeBlacklist)
true
} else {
false
}
}
/**
* Request that the ResourceManager release the container running the specified executor.
*/
def killExecutor(executorId: String): Unit = synchronized {
executorIdToContainer.get(executorId) match {
case Some(container) if !releasedContainers.contains(container.getId) =>
internalReleaseContainer(container)
runningExecutors.remove(executorId)
case _ => logWarning(s"Attempted to kill unknown executor $executorId!")
}
}
/**
* Request resources such that, if YARN gives us all we ask for, we'll have a number of containers
* equal to maxExecutors.
*
* Deal with any containers YARN has granted to us by possibly launching executors in them.
*
* This must be synchronized because variables read in this method are mutated by other methods.
*/
def allocateResources(): Unit = synchronized {
updateResourceRequests()
val progressIndicator = 0.1f
// Poll the ResourceManager. This doubles as a heartbeat if there are no pending container
// requests.
val allocateResponse = amClient.allocate(progressIndicator)
val allocatedContainers = allocateResponse.getAllocatedContainers()
allocatorBlacklistTracker.setNumClusterNodes(allocateResponse.getNumClusterNodes)
if (allocatedContainers.size > 0) {
logDebug(("Allocated containers: %d. Current executor count: %d. " +
"Launching executor count: %d. Cluster resources: %s.")
.format(
allocatedContainers.size,
runningExecutors.size,
numExecutorsStarting.get,
allocateResponse.getAvailableResources))
handleAllocatedContainers(allocatedContainers.asScala)
}
val completedContainers = allocateResponse.getCompletedContainersStatuses()
if (completedContainers.size > 0) {
logDebug("Completed %d containers".format(completedContainers.size))
processCompletedContainers(completedContainers.asScala)
logDebug("Finished processing %d completed containers. Current running executor count: %d."
.format(completedContainers.size, runningExecutors.size))
}
}
/**
* Update the set of container requests that we will sync with the RM based on the number of
* executors we have currently running and our target number of executors.
*
* Visible for testing.
*/
def updateResourceRequests(): Unit = {
val pendingAllocate = getPendingAllocate
val numPendingAllocate = pendingAllocate.size
val missing = targetNumExecutors - numPendingAllocate -
numExecutorsStarting.get - runningExecutors.size
logDebug(s"Updating resource requests, target: $targetNumExecutors, " +
s"pending: $numPendingAllocate, running: ${runningExecutors.size}, " +
s"executorsStarting: ${numExecutorsStarting.get}")
if (missing > 0) {
if (log.isInfoEnabled()) {
var requestContainerMessage = s"Will request $missing executor container(s), each with " +
s"${resource.getVirtualCores} core(s) and " +
s"${resource.getMemory} MB memory (including $memoryOverhead MB of overhead)"
if (ResourceRequestHelper.isYarnResourceTypesAvailable() &&
executorResourceRequests.nonEmpty) {
requestContainerMessage ++= s" with custom resources: " + resource.toString
}
logInfo(requestContainerMessage)
}
// Split the pending container request into three groups: locality matched list, locality
// unmatched list and non-locality list. Take the locality matched container request into
// consideration of container placement, treat as allocated containers.
// For locality unmatched and locality free container requests, cancel these container
// requests, since required locality preference has been changed, recalculating using
// container placement strategy.
val (localRequests, staleRequests, anyHostRequests) = splitPendingAllocationsByLocality(
hostToLocalTaskCounts, pendingAllocate)
// cancel "stale" requests for locations that are no longer needed
staleRequests.foreach { stale =>
amClient.removeContainerRequest(stale)
}
val cancelledContainers = staleRequests.size
if (cancelledContainers > 0) {
logInfo(s"Canceled $cancelledContainers container request(s) (locality no longer needed)")
}
// consider the number of new containers and cancelled stale containers available
val availableContainers = missing + cancelledContainers
// to maximize locality, include requests with no locality preference that can be cancelled
val potentialContainers = availableContainers + anyHostRequests.size
val containerLocalityPreferences = containerPlacementStrategy.localityOfRequestedContainers(
potentialContainers, numLocalityAwareTasks, hostToLocalTaskCounts,
allocatedHostToContainersMap, localRequests)
val newLocalityRequests = new mutable.ArrayBuffer[ContainerRequest]
containerLocalityPreferences.foreach {
case ContainerLocalityPreferences(nodes, racks) if nodes != null =>
newLocalityRequests += createContainerRequest(resource, nodes, racks)
case _ =>
}
if (availableContainers >= newLocalityRequests.size) {
// more containers are available than needed for locality, fill in requests for any host
for (i <- 0 until (availableContainers - newLocalityRequests.size)) {
newLocalityRequests += createContainerRequest(resource, null, null)
}
} else {
val numToCancel = newLocalityRequests.size - availableContainers
// cancel some requests without locality preferences to schedule more local containers
anyHostRequests.slice(0, numToCancel).foreach { nonLocal =>
amClient.removeContainerRequest(nonLocal)
}
if (numToCancel > 0) {
logInfo(s"Canceled $numToCancel unlocalized container requests to resubmit with locality")
}
}
newLocalityRequests.foreach { request =>
amClient.addContainerRequest(request)
}
if (log.isInfoEnabled()) {
val (localized, anyHost) = newLocalityRequests.partition(_.getNodes() != null)
if (anyHost.nonEmpty) {
logInfo(s"Submitted ${anyHost.size} unlocalized container requests.")
}
localized.foreach { request =>
logInfo(s"Submitted container request for host ${hostStr(request)}.")
}
}
} else if (numPendingAllocate > 0 && missing < 0) {
val numToCancel = math.min(numPendingAllocate, -missing)
logInfo(s"Canceling requests for $numToCancel executor container(s) to have a new desired " +
s"total $targetNumExecutors executors.")
val matchingRequests = amClient.getMatchingRequests(RM_REQUEST_PRIORITY, ANY_HOST, resource)
if (!matchingRequests.isEmpty) {
matchingRequests.iterator().next().asScala
.take(numToCancel).foreach(amClient.removeContainerRequest)
} else {
logWarning("Expected to find pending requests, but found none.")
}
}
}
private def hostStr(request: ContainerRequest): String = {
Option(request.getNodes) match {
case Some(nodes) => nodes.asScala.mkString(",")
case None => "Any"
}
}
/**
* Creates a container request, handling the reflection required to use YARN features that were
* added in recent versions.
*/
private def createContainerRequest(
resource: Resource,
nodes: Array[String],
racks: Array[String]): ContainerRequest = {
new ContainerRequest(resource, nodes, racks, RM_REQUEST_PRIORITY, true, labelExpression.orNull)
}
/**
* Handle containers granted by the RM by launching executors on them.
*
* Due to the way the YARN allocation protocol works, certain healthy race conditions can result
* in YARN granting containers that we no longer need. In this case, we release them.
*
* Visible for testing.
*/
def handleAllocatedContainers(allocatedContainers: Seq[Container]): Unit = {
val containersToUse = new ArrayBuffer[Container](allocatedContainers.size)
// Match incoming requests by host
val remainingAfterHostMatches = new ArrayBuffer[Container]
for (allocatedContainer <- allocatedContainers) {
matchContainerToRequest(allocatedContainer, allocatedContainer.getNodeId.getHost,
containersToUse, remainingAfterHostMatches)
}
// Match remaining by rack
val remainingAfterRackMatches = new ArrayBuffer[Container]
for (allocatedContainer <- remainingAfterHostMatches) {
val rack = resolver.resolve(conf, allocatedContainer.getNodeId.getHost)
matchContainerToRequest(allocatedContainer, rack, containersToUse,
remainingAfterRackMatches)
}
// Assign remaining that are neither node-local nor rack-local
val remainingAfterOffRackMatches = new ArrayBuffer[Container]
for (allocatedContainer <- remainingAfterRackMatches) {
matchContainerToRequest(allocatedContainer, ANY_HOST, containersToUse,
remainingAfterOffRackMatches)
}
if (!remainingAfterOffRackMatches.isEmpty) {
logDebug(s"Releasing ${remainingAfterOffRackMatches.size} unneeded containers that were " +
s"allocated to us")
for (container <- remainingAfterOffRackMatches) {
internalReleaseContainer(container)
}
}
runAllocatedContainers(containersToUse)
logInfo("Received %d containers from YARN, launching executors on %d of them."
.format(allocatedContainers.size, containersToUse.size))
}
/**
* Looks for requests for the given location that match the given container allocation. If it
* finds one, removes the request so that it won't be submitted again. Places the container into
* containersToUse or remaining.
*
* @param allocatedContainer container that was given to us by YARN
* @param location resource name, either a node, rack, or *
* @param containersToUse list of containers that will be used
* @param remaining list of containers that will not be used
*/
private def matchContainerToRequest(
allocatedContainer: Container,
location: String,
containersToUse: ArrayBuffer[Container],
remaining: ArrayBuffer[Container]): Unit = {
// SPARK-6050: certain Yarn configurations return a virtual core count that doesn't match the
// request; for example, capacity scheduler + DefaultResourceCalculator. So match on requested
// memory, but use the asked vcore count for matching, effectively disabling matching on vcore
// count.
val matchingResource = Resource.newInstance(allocatedContainer.getResource.getMemory,
resource.getVirtualCores)
ResourceRequestHelper.setResourceRequests(executorResourceRequests, matchingResource)
logDebug(s"Calling amClient.getMatchingRequests with parameters: " +
s"priority: ${allocatedContainer.getPriority}, " +
s"location: $location, resource: $matchingResource")
val matchingRequests = amClient.getMatchingRequests(allocatedContainer.getPriority, location,
matchingResource)
// Match the allocation to a request
if (!matchingRequests.isEmpty) {
val containerRequest = matchingRequests.get(0).iterator.next
logDebug(s"Removing container request via AM client: $containerRequest")
amClient.removeContainerRequest(containerRequest)
containersToUse += allocatedContainer
} else {
remaining += allocatedContainer
}
}
/**
* Launches executors in the allocated containers.
*/
private def runAllocatedContainers(containersToUse: ArrayBuffer[Container]): Unit = {
for (container <- containersToUse) {
executorIdCounter += 1
val executorHostname = container.getNodeId.getHost
val containerId = container.getId
val executorId = executorIdCounter.toString
assert(container.getResource.getMemory >= resource.getMemory)
logInfo(s"Launching container $containerId on host $executorHostname " +
s"for executor with ID $executorId")
def updateInternalState(): Unit = synchronized {
runningExecutors.add(executorId)
numExecutorsStarting.decrementAndGet()
executorIdToContainer(executorId) = container
containerIdToExecutorId(container.getId) = executorId
val containerSet = allocatedHostToContainersMap.getOrElseUpdate(executorHostname,
new HashSet[ContainerId])
containerSet += containerId
allocatedContainerToHostMap.put(containerId, executorHostname)
}
if (runningExecutors.size() < targetNumExecutors) {
numExecutorsStarting.incrementAndGet()
if (launchContainers) {
launcherPool.execute(new Runnable {
override def run(): Unit = {
try {
new ExecutorRunnable(
Some(container),
conf,
sparkConf,
driverUrl,
executorId,
executorHostname,
executorMemory,
executorCores,
appAttemptId.getApplicationId.toString,
securityMgr,
localResources
).run()
updateInternalState()
} catch {
case e: Throwable =>
numExecutorsStarting.decrementAndGet()
if (NonFatal(e)) {
logError(s"Failed to launch executor $executorId on container $containerId", e)
// Assigned container should be released immediately
// to avoid unnecessary resource occupation.
amClient.releaseAssignedContainer(containerId)
} else {
throw e
}
}
}
})
} else {
// For test only
updateInternalState()
}
} else {
logInfo(("Skip launching executorRunnable as running executors count: %d " +
"reached target executors count: %d.").format(
runningExecutors.size, targetNumExecutors))
}
}
}
// Visible for testing.
private[yarn] def processCompletedContainers(completedContainers: Seq[ContainerStatus]): Unit = {
for (completedContainer <- completedContainers) {
val containerId = completedContainer.getContainerId
val alreadyReleased = releasedContainers.remove(containerId)
val hostOpt = allocatedContainerToHostMap.get(containerId)
val onHostStr = hostOpt.map(host => s" on host: $host").getOrElse("")
val exitReason = if (!alreadyReleased) {
// Decrement the number of executors running. The next iteration of
// the ApplicationMaster's reporting thread will take care of allocating.
containerIdToExecutorId.get(containerId) match {
case Some(executorId) => runningExecutors.remove(executorId)
case None => logWarning(s"Cannot find executorId for container: ${containerId.toString}")
}
logInfo("Completed container %s%s (state: %s, exit status: %s)".format(
containerId,
onHostStr,
completedContainer.getState,
completedContainer.getExitStatus))
// Hadoop 2.2.X added a ContainerExitStatus we should switch to use
// there are some exit status' we shouldn't necessarily count against us, but for
// now I think its ok as none of the containers are expected to exit.
val exitStatus = completedContainer.getExitStatus
val (exitCausedByApp, containerExitReason) = exitStatus match {
case ContainerExitStatus.SUCCESS =>
(false, s"Executor for container $containerId exited because of a YARN event (e.g., " +
"pre-emption) and not because of an error in the running job.")
case ContainerExitStatus.PREEMPTED =>
// Preemption is not the fault of the running tasks, since YARN preempts containers
// merely to do resource sharing, and tasks that fail due to preempted executors could
// just as easily finish on any other executor. See SPARK-8167.
(false, s"Container ${containerId}${onHostStr} was preempted.")
// Should probably still count memory exceeded exit codes towards task failures
case VMEM_EXCEEDED_EXIT_CODE =>
val vmemExceededPattern = raw"$MEM_REGEX of $MEM_REGEX virtual memory used".r
val diag = vmemExceededPattern.findFirstIn(completedContainer.getDiagnostics)
.map(_.concat(".")).getOrElse("")
val message = "Container killed by YARN for exceeding virtual memory limits. " +
s"$diag Consider boosting ${EXECUTOR_MEMORY_OVERHEAD.key} or boosting " +
s"${YarnConfiguration.NM_VMEM_PMEM_RATIO} or disabling " +
s"${YarnConfiguration.NM_VMEM_CHECK_ENABLED} because of YARN-4714."
(true, message)
case PMEM_EXCEEDED_EXIT_CODE =>
val pmemExceededPattern = raw"$MEM_REGEX of $MEM_REGEX physical memory used".r
val diag = pmemExceededPattern.findFirstIn(completedContainer.getDiagnostics)
.map(_.concat(".")).getOrElse("")
val message = "Container killed by YARN for exceeding physical memory limits. " +
s"$diag Consider boosting ${EXECUTOR_MEMORY_OVERHEAD.key}."
(true, message)
case _ =>
// all the failures which not covered above, like:
// disk failure, kill by app master or resource manager, ...
allocatorBlacklistTracker.handleResourceAllocationFailure(hostOpt)
(true, "Container marked as failed: " + containerId + onHostStr +
". Exit status: " + completedContainer.getExitStatus +
". Diagnostics: " + completedContainer.getDiagnostics)
}
if (exitCausedByApp) {
logWarning(containerExitReason)
} else {
logInfo(containerExitReason)
}
ExecutorExited(exitStatus, exitCausedByApp, containerExitReason)
} else {
// If we have already released this container, then it must mean
// that the driver has explicitly requested it to be killed
ExecutorExited(completedContainer.getExitStatus, exitCausedByApp = false,
s"Container $containerId exited from explicit termination request.")
}
for {
host <- hostOpt
containerSet <- allocatedHostToContainersMap.get(host)
} {
containerSet.remove(containerId)
if (containerSet.isEmpty) {
allocatedHostToContainersMap.remove(host)
} else {
allocatedHostToContainersMap.update(host, containerSet)
}
allocatedContainerToHostMap.remove(containerId)
}
containerIdToExecutorId.remove(containerId).foreach { eid =>
executorIdToContainer.remove(eid)
pendingLossReasonRequests.remove(eid) match {
case Some(pendingRequests) =>
// Notify application of executor loss reasons so it can decide whether it should abort
pendingRequests.foreach(_.reply(exitReason))
case None =>
// We cannot find executor for pending reasons. This is because completed container
// is processed before querying pending result. We should store it for later query.
// This is usually happened when explicitly killing a container, the result will be
// returned in one AM-RM communication. So query RPC will be later than this completed
// container process.
releasedExecutorLossReasons.put(eid, exitReason)
}
if (!alreadyReleased) {
// The executor could have gone away (like no route to host, node failure, etc)
// Notify backend about the failure of the executor
numUnexpectedContainerRelease += 1
driverRef.send(RemoveExecutor(eid, exitReason))
}
}
}
}
/**
* Register that some RpcCallContext has asked the AM why the executor was lost. Note that
* we can only find the loss reason to send back in the next call to allocateResources().
*/
private[yarn] def enqueueGetLossReasonRequest(
eid: String,
context: RpcCallContext): Unit = synchronized {
if (executorIdToContainer.contains(eid)) {
pendingLossReasonRequests
.getOrElseUpdate(eid, new ArrayBuffer[RpcCallContext]) += context
} else if (releasedExecutorLossReasons.contains(eid)) {
// Executor is already released explicitly before getting the loss reason, so directly send
// the pre-stored lost reason
context.reply(releasedExecutorLossReasons.remove(eid).get)
} else {
logWarning(s"Tried to get the loss reason for non-existent executor $eid")
context.sendFailure(
new SparkException(s"Fail to find loss reason for non-existent executor $eid"))
}
}
private def internalReleaseContainer(container: Container): Unit = {
releasedContainers.add(container.getId())
amClient.releaseAssignedContainer(container.getId())
}
private[yarn] def getNumUnexpectedContainerRelease = numUnexpectedContainerRelease
private[yarn] def getNumPendingLossReasonRequests: Int = synchronized {
pendingLossReasonRequests.size
}
/**
* Split the pending container requests into 3 groups based on current localities of pending
* tasks.
* @param hostToLocalTaskCount a map of preferred hostname to possible task counts to be used as
* container placement hint.
* @param pendingAllocations A sequence of pending allocation container request.
* @return A tuple of 3 sequences, first is a sequence of locality matched container
* requests, second is a sequence of locality unmatched container requests, and third is a
* sequence of locality free container requests.
*/
private def splitPendingAllocationsByLocality(
hostToLocalTaskCount: Map[String, Int],
pendingAllocations: Seq[ContainerRequest]
): (Seq[ContainerRequest], Seq[ContainerRequest], Seq[ContainerRequest]) = {
val localityMatched = ArrayBuffer[ContainerRequest]()
val localityUnMatched = ArrayBuffer[ContainerRequest]()
val localityFree = ArrayBuffer[ContainerRequest]()
val preferredHosts = hostToLocalTaskCount.keySet
pendingAllocations.foreach { cr =>
val nodes = cr.getNodes
if (nodes == null) {
localityFree += cr
} else if (nodes.asScala.toSet.intersect(preferredHosts).nonEmpty) {
localityMatched += cr
} else {
localityUnMatched += cr
}
}
(localityMatched.toSeq, localityUnMatched.toSeq, localityFree.toSeq)
}
}
private object YarnAllocator {
val MEM_REGEX = "[0-9.]+ [KMG]B"
val VMEM_EXCEEDED_EXIT_CODE = -103
val PMEM_EXCEEDED_EXIT_CODE = -104
}
| mdespriee/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala | Scala | apache-2.0 | 32,972 |
package org.kimbasoft.scala
/**
* Missing documentation
*
* @since 1.0
*/
package object general {
// Specifying an out-of-package Class and Companion Object to appear as package local
type PackagePerson = org.kimbasoft.scala.helper.PackagePerson
val PackagePerson = org.kimbasoft.scala.helper.PackagePerson
// Exposing a package level method
def sqr(x: Int) = x * x
}
| kimba74/sandbox-scala | src/main/scala/org/kimbasoft/scala/general/package.scala | Scala | gpl-3.0 | 386 |
/*
* Code Pulse: A real-time code coverage testing tool. For more information
* see http://code-pulse.com
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.codepulse.data.model
import scala.collection.immutable.Queue
import com.fasterxml.jackson.core.{ JsonFactory, JsonGenerator }
import com.secdec.codepulse.data.bytecode.CodeTreeNodeKind
case class TreeNode(data: TreeNodeData, children: List[TreeNode])(treeNodeData: TreeNodeDataAccess) {
import treeNodeData.ExtendedTreeNodeData
def traced = data.traced
def vulnerable = {
if (data.kind == CodeTreeNodeKind.Grp)
Some(data.flags contains TreeNodeFlag.HasVulnerability)
else
None
}
}
case class PackageTreeNode(
id: Option[Int],
kind: CodeTreeNodeKind,
label: String,
methodCount: Int,
isSurfaceMethod: Boolean,
hasSurfaceDescendants: Boolean,
otherDescendantIds: List[Int],
children: List[PackageTreeNode])(
tracedLookup: => Option[Boolean],
vulnLookup: => Option[Boolean]) {
def traced = tracedLookup
def vulnerable = vulnLookup
}
/** Builds/projects treemap and package tree data as JSON for client.
* TODO: manage lifetime of cached data internally
*
* @author robertf
*/
class TreeBuilder(treeNodeData: TreeNodeDataAccess) {
/** Full set of tree roots and nodes */
lazy val (roots, nodes) = {
val roots = List.newBuilder[Int]
val nodes = Map.newBuilder[Int, TreeNodeData]
val children = collection.mutable.HashMap.empty[Int, collection.mutable.Builder[Int, List[Int]]]
def childrenFor(id: Int) = children.getOrElseUpdate(id, List.newBuilder[Int])
treeNodeData foreach { data =>
nodes += data.id -> data
(data.parentId match {
case Some(parent) => childrenFor(parent)
case None => roots
}) += data.id
}
val nodeMap = nodes.result
def buildNode(id: Int): TreeNode = {
val children = childrenFor(id).result.map(buildNode)
val node = nodeMap(id)
TreeNode(node, children)(treeNodeData)
}
(roots.result.map(buildNode), nodeMap)
}
/** Full package tree, with self nodes */
lazy val packageTree = {
// build up a package tree with the relevant data
import treeNodeData.ExtendedTreeNodeData
/** A node is eligible for a self node if it is a package node that has at least one
* package child and one non-package child (class/method).
*/
def isEligibleForSelfNode(node: TreeNode) = {
(node.data.kind == CodeTreeNodeKind.Pkg || node.data.kind == CodeTreeNodeKind.Grp) &&
node.children.exists {
case TreeNode(data, _) if data.kind == node.data.kind => true
case _ => false
} &&
node.children.exists {
case TreeNode(data, _) if data.kind == CodeTreeNodeKind.Cls || data.kind == CodeTreeNodeKind.Mth => true
case _ => false
}
}
def countMethods(node: TreeNode): Int = {
(node.data.kind match {
case CodeTreeNodeKind.Mth => 1
case _ => node.children.map(countMethods).sum
})
}
def getOtherDescendants(node: TreeNode): List[Int] = {
val builder = List.newBuilder[Int]
def recurse(from: TreeNode): Unit = from.data.kind match {
case CodeTreeNodeKind.Mth | CodeTreeNodeKind.Cls =>
// add the node's id and all of its descendants
builder += from.data.id
from.children foreach recurse
case _ =>
// do not act on package|group nodes
}
node.children foreach recurse
builder.result
}
def hasSurfaceDescendants(node: TreeNode, descendantFilter: TreeNode => Boolean): Boolean = {
val nodes = new collection.mutable.Queue[TreeNode]
nodes ++= node.children.filter(descendantFilter)
while(!nodes.isEmpty) {
val potentialNode = nodes.dequeue()
if(potentialNode.data.isSurfaceMethod.getOrElse(false)) {
return true
} else {
nodes ++= potentialNode.children.filter(descendantFilter)
}
}
false
}
def transform(isRoot: Boolean)(node: TreeNode): PackageTreeNode = {
// we only want groups and packages
def filterChildren(children: List[TreeNode]) = children.filter { n => n.data.kind == CodeTreeNodeKind.Grp || n.data.kind == CodeTreeNodeKind.Pkg }
val otherDescendants = getOtherDescendants(node)
if (isEligibleForSelfNode(node)) {
// split the node children depending on where they belong
val (selfChildren, children) = node.children.partition {
case TreeNode(data, _) if data.kind == CodeTreeNodeKind.Cls || data.kind == CodeTreeNodeKind.Mth => true
case _ => false
}
val nodeHasClassOrMethodSurfaceDescendants = hasSurfaceDescendants(node, n => n.data.kind == CodeTreeNodeKind.Cls || n.data.kind == CodeTreeNodeKind.Mth)
// build the self node
val selfNode = PackageTreeNode(Some(node.data.id), node.data.kind, if (isRoot) "<root>" else "<self>", selfChildren.map(countMethods).sum, node.data.isSurfaceMethod.getOrElse(false), nodeHasClassOrMethodSurfaceDescendants, otherDescendants, Nil)(node.traced, node.vulnerable)
PackageTreeNode(None, node.data.kind, node.data.label, countMethods(node), node.data.isSurfaceMethod.getOrElse(false), nodeHasClassOrMethodSurfaceDescendants, Nil, selfNode :: filterChildren(children).map(transform(false)))(node.traced, node.vulnerable)
} else {
val nodeHasSurfaceDescendants = hasSurfaceDescendants(node, n => true)
PackageTreeNode(Some(node.data.id), node.data.kind, node.data.label, countMethods(node), node.data.isSurfaceMethod.getOrElse(false), nodeHasSurfaceDescendants, otherDescendants, filterChildren(node.children).map(transform(false)))(node.traced, node.vulnerable)
}
}
roots.map(transform(true))
}
/** Projects a tree containing the selected packages and their immediate children */
def projectTree(selectedNodes: Set[Int]) = {
val incidentalNodes = collection.mutable.HashSet.empty[Int]
// recursively mark all parents of `selectedNodes` as incidental nodes (partially accepted)
def markIncidentalPath(node: Int) {
incidentalNodes += node
for {
node <- nodes get node
parent <- node.parentId
} markIncidentalPath(parent)
}
selectedNodes.foreach(markIncidentalPath)
// build the projected tree
def filterNode(node: TreeNode): Option[TreeNode] = {
def isSubstantialChild(node: TreeNode) = node.data.kind != CodeTreeNodeKind.Grp && node.data.kind != CodeTreeNodeKind.Pkg
// only include this node if it is incidental
if (incidentalNodes contains node.data.id) {
val isSelected = selectedNodes contains node.data.id
// filter children; don't include substantial data if only incidental
// the only exception to this is if that child was requested specifically
val filteredChildren = node.children.flatMap {
case child if isSubstantialChild(child) =>
if (isSelected) Some(child)
else filterNode(child)
case child => filterNode(child)
}
if (isSelected || !filteredChildren.isEmpty)
Some(TreeNode(node.data, filteredChildren)(treeNodeData))
else
None
} else None
}
roots.flatMap(filterNode)
}
} | secdec/codepulse | codepulse/src/main/scala/com/secdec/codepulse/data/model/TreeBuilder.scala | Scala | apache-2.0 | 7,543 |
package com.twitter.finagle.toggle
/**
* `Toggles` are used for modifying behavior without changing code.
*
* @param id the identifying name of the `Toggle`.
* These should generally be fully qualified names to avoid conflicts
* between libraries. For example, "com.twitter.finagle.CoolThing".
* Valid characters are `A-Z`, `a-z`, `0-9`, `_`, `-`, `.`.
*
* @see [[http://martinfowler.com/articles/feature-toggles.html Feature Toggles]]
* for detailed discussion on the topic.
* @see [[ToggleMap]]
*/
abstract class Toggle[-T](
private[toggle] val id: String)
extends PartialFunction[T, Boolean] { self =>
Toggle.validateId(id)
/**
* Similar to `PartialFunction.orElse` but specialized
* for [[Toggle Toggles]].
*
* @note the returned [[Toggle]] will keep the current `id`.
*/
def orElse[T1 <: T](that: Toggle[T1]): Toggle[T1] = {
new Toggle[T1](self.id) {
override def toString: String =
s"${self.toString}.orElse(${that.toString})"
def isDefinedAt(x: T1): Boolean =
self.isDefinedAt(x) || that.isDefinedAt(x)
def apply(v1: T1): Boolean =
self.applyOrElse(v1, that)
}
}
}
object Toggle {
/**
* The metadata about a [[Toggle]].
*
* @param id the identifying name of the `Toggle`.
* These should generally be fully qualified names to avoid conflicts
* between libraries. For example, "com.twitter.finagle.CoolThing".
* Valid characters are `A-Z`, `a-z`, `0-9`, `_`, `-`, `.`.
* See [[Toggle$.isValidId]].
* @param fraction must be between `0.0 and 1.0`, inclusive.
* This represents the fraction of inputs that will
* return `true`. See [[Toggle$.isValidFraction]].
* @param description human-readable description of the Toggle's purpose.
* @param source the origin of this [[Toggle]] which is often given by
* `toString` of the [[ToggleMap]] that created it.
*/
case class Metadata(
id: String,
fraction: Double,
description: Option[String],
source: String) {
validateId(id)
validateFraction(id, fraction)
validateDescription(id, description)
}
/**
* Whether or not the given `fraction` is valid.
*
* @param fraction must be between `0.0 and 1.0`, inclusive.
* This represents the fraction of inputs that will
* return `true`.
*/
def isValidFraction(fraction: Double): Boolean =
fraction >= 0.0 && fraction <= 1.0
private[toggle] def validateFraction(id: String, f: Double): Unit = {
if (!isValidFraction(f))
throw new IllegalArgumentException(
s"fraction for $id must be between 0.0 and 1.0 inclusive: $f")
}
private[this] def validateDescription(id: String, desc: Option[String]): Unit = {
desc match {
case None =>
case Some(d) =>
if (d.trim.isEmpty)
throw new IllegalArgumentException(
s"description for $id must not be empty: '$d'")
}
}
private[this] val AllowedIdChars: Set[Char] = {
('A'.to('Z') ++
'a'.to('z') ++
'0'.to('9') ++
Set('_', '-', '.')).toSet
}
/** Return `Some(ErrorMessage)` when invalid. */
private def checkId(id: String): Option[String] = {
val invalidCh = id.find { ch =>
!AllowedIdChars.contains(ch)
}
invalidCh match {
case Some(ch) =>
Some(s"invalid char '$ch' in id: '$id'")
case None =>
// do some minimal verification to make sure it looks "packagey".
if (id.length < 3) {
Some(s"id too short: '$id'")
} else {
// test that it has atleast 1 "."
val firstDot = id.indexOf('.')
if (firstDot <= 0)
Some(s"id must be package-like: '$id'")
else
None
}
}
}
/**
* Whether or not the given `id` is valid.
*
* @param id the identifying name of the `Toggle`.
* These should generally be fully qualified names to avoid conflicts
* between libraries. For example, "com.twitter.finagle.CoolThing".
* Valid characters are `A-Z`, `a-z`, `0-9`, `_`, `-`, `.`.
*/
def isValidId(id: String): Boolean =
checkId(id).isEmpty
private[toggle] def validateId(id: String): Unit = {
checkId(id) match {
case Some(msg) => throw new IllegalArgumentException(msg)
case None =>
}
}
private[toggle] def apply[T](
id: String,
pf: PartialFunction[T, Boolean]
): Toggle[T] = new Toggle[T](id) {
override def toString: String = s"Toggle($id)"
def isDefinedAt(x: T): Boolean = pf.isDefinedAt(x)
def apply(v1: T): Boolean = pf(v1)
}
private[this] val AlwaysTrue: PartialFunction[Any, Boolean] =
{ case _ => true }
/**
* A [[Toggle]] which is defined for all inputs and always returns `true`.
*/
def on[T](id: String): Toggle[T] =
apply(id, AlwaysTrue)
private[this] val AlwaysFalse: PartialFunction[Any, Boolean] =
{ case _ => false }
/**
* A [[Toggle]] which is defined for all inputs and always returns `false`.
*/
def off[T](id: String): Toggle[T] =
apply(id, AlwaysFalse)
/**
* A [[Toggle]] which is defined for no inputs.
*/
private[toggle] val Undefined: Toggle[Any] =
new Toggle[Any]("com.twitter.finagle.toggle.Undefined") {
def isDefinedAt(x: Any): Boolean = false
def apply(v1: Any): Boolean = throw new UnsupportedOperationException()
override def toString: String = "Undefined"
// an optimization that allows for avoiding unnecessary Toggles
// by "flattening" them out.
override def orElse[T](that: Toggle[T]): Toggle[T] = that
}
}
| adriancole/finagle | finagle-toggle/src/main/scala/com/twitter/finagle/toggle/Toggle.scala | Scala | apache-2.0 | 5,772 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.words
import org.scalatest._
import Matchers._
class ResultOfBeWordForNoExceptionSpec extends Spec {
object `ResultOfBeWordForNoException ` {
def `should have pretty toString when used` {
val result = noException should be
result.toString should be ("ResultOfBeWordForNoException")
}
}
} | travisbrown/scalatest | src/test/scala/org/scalatest/words/ResultOfBeWordForNoExceptionSpec.scala | Scala | apache-2.0 | 952 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.jdbc.engine
import java.sql.Types._
import org.beangle.commons.lang.Strings
import org.beangle.data.jdbc.meta.{Column, Index, Table}
class SQLServer(v: String) extends AbstractEngine(Version(v)) {
this.registerReserved("t-sql.txt")
registerTypes(
CHAR -> "char($l)", VARCHAR -> "varchar(MAX)", NVARCHAR -> "nvarchar(MAX)",
BIT -> "bit", BOOLEAN -> "bit",
TINYINT -> "smallint", SMALLINT -> "smallint", INTEGER -> "int", BIGINT -> "bigint",
FLOAT -> "float", DOUBLE -> "double precision",
DECIMAL -> "double precision", NUMERIC -> "numeric($p,$s)",
DATE -> "date", TIME -> "time", TIMESTAMP -> "datetime2",
BINARY -> "binary", VARBINARY -> "varbinary(MAX)",
LONGVARCHAR -> "text", LONGVARBINARY -> "varbinary(MAX)",
BLOB -> "varbinary(MAX)", CLOB -> "varchar(MAX)")
registerTypes2(
(VARCHAR, 8000, "varchar($l)"),
(VARBINARY, 8000, "varbinary($l)"),
(NVARCHAR, 4000, "nvarchar($l)"))
options.comment.supportsCommentOn = false
options.sequence.supports = false
options.alter { a =>
a.table.changeType = "alter {column} {type}"
a.table.setDefault = "add constraint {column}_dflt default {value} for {column}"
a.table.dropDefault = "drop constraint {column}_dflt"
a.table.setNotNull = "alter column {column} {type} not null"
a.table.dropNotNull = "alter column {column} {type}"
a.table.addColumn = "add {column} {type}"
a.table.dropColumn = "drop column {column}"
a.table.renameColumn = "EXEC sp_rename '{table}.{oldcolumn}', '{newcolumn}', 'COLUMN'"
a.table.addPrimaryKey = "add constraint {name} primary key ({column-list})"
a.table.dropConstraint = "drop constraint {name}"
}
options.validate()
override def limit(querySql: String, offset: Int, limit: Int): (String, List[Int]) = {
val sb: StringBuilder = new StringBuilder(querySql)
val orderByIndex: Int = querySql.toLowerCase().indexOf("order by")
var orderby: CharSequence = "order by current_timestmap"
if (orderByIndex > 0) orderby = sb.subSequence(orderByIndex, sb.length())
// Delete the order by clause at the end of the query
if (orderByIndex > 0) {
sb.delete(orderByIndex, orderByIndex + orderby.length())
}
// HHH-5715 bug fix
replaceDistinctWithGroupBy(sb)
insertRowNumberFunction(sb, orderby)
// Wrap the query within a with statement:
sb.insert(0, "with query as (").append(") select * from query ")
sb.append("where _rownum_ between ? and ?")
(sb.toString(), List(offset + 1, offset + limit))
}
override def alterTableRenameColumn(table: Table, col: Column, newName: String): String = {
var renameClause = options.alter.table.renameColumn
renameClause = Strings.replace(renameClause, "{oldcolumn}", col.name.toLiteral(table.engine))
renameClause = Strings.replace(renameClause, "{newcolumn}", newName)
renameClause = Strings.replace(renameClause, "{table}", table.qualifiedName)
renameClause
}
protected def replaceDistinctWithGroupBy(sql: StringBuilder): Unit = {
val distinctIndex = sql.indexOf("distinct")
if (distinctIndex > 0) {
sql.delete(distinctIndex, distinctIndex + "distinct".length() + 1)
sql.append(" group by").append(getSelectFieldsWithoutAliases(sql))
}
}
protected def insertRowNumberFunction(sql: StringBuilder, orderby: CharSequence): Unit = {
// Find the start of the from statement
val fromIndex = sql.toString().toLowerCase().indexOf("from")
// Insert before the from statement the row_number() function:
sql.insert(fromIndex, ",ROW_NUMBER() OVER (" + orderby + ") as _rownum_ ")
}
protected def getSelectFieldsWithoutAliases(sql: StringBuilder): String = {
val select = sql.substring(sql.indexOf("select") + "select".length(), sql.indexOf("from"))
// Strip the as clauses
stripAliases(select)
}
protected def stripAliases(str: String): String = {
str.replaceAll("\\\\sas[^,]+(,?)", "$1")
}
override def dropIndex(i: Index): String = {
"drop index " + i.table.qualifiedName + "." + i.literalName
}
override def defaultSchema: String = {
"dbo"
}
override def name: String = "Microsoft SQL Server"
}
| beangle/data | jdbc/src/main/scala/org/beangle/data/jdbc/engine/SQLServer.scala | Scala | lgpl-3.0 | 4,937 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Date, UUID}
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, ExternalCatalogUtils}
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.execution.{SortExec, SparkPlan, SQLExecution}
import org.apache.spark.sql.types.StringType
import org.apache.spark.util.{SerializableConfiguration, Utils}
/** A helper object for writing FileFormat data out to a location. */
object FileFormatWriter extends Logging {
/**
* Max number of files a single task writes out due to file size. In most cases the number of
* files written should be very small. This is just a safe guard to protect some really bad
* settings, e.g. maxRecordsPerFile = 1.
*/
private val MAX_FILE_COUNTER = 1000 * 1000
/** Describes how output files should be placed in the filesystem. */
case class OutputSpec(
outputPath: String, customPartitionLocations: Map[TablePartitionSpec, String])
/** A shared job description for all the write tasks. */
private class WriteJobDescription(
val uuid: String, // prevent collision between different (appending) write jobs
val serializableHadoopConf: SerializableConfiguration,
val outputWriterFactory: OutputWriterFactory,
val allColumns: Seq[Attribute],
val dataColumns: Seq[Attribute],
val partitionColumns: Seq[Attribute],
val bucketIdExpression: Option[Expression],
val path: String,
val customPartitionLocations: Map[TablePartitionSpec, String],
val maxRecordsPerFile: Long,
val timeZoneId: String)
extends Serializable {
assert(AttributeSet(allColumns) == AttributeSet(partitionColumns ++ dataColumns),
s"""
|All columns: ${allColumns.mkString(", ")}
|Partition columns: ${partitionColumns.mkString(", ")}
|Data columns: ${dataColumns.mkString(", ")}
""".stripMargin)
}
/** The result of a successful write task. */
private case class WriteTaskResult(commitMsg: TaskCommitMessage, summary: ExecutedWriteSummary)
/**
* Basic work flow of this command is:
* 1. Driver side setup, including output committer initialization and data source specific
* preparation work for the write job to be issued.
* 2. Issues a write job consists of one or more executor side tasks, each of which writes all
* rows within an RDD partition.
* 3. If no exception is thrown in a task, commits that task, otherwise aborts that task; If any
* exception is thrown during task commitment, also aborts that task.
* 4. If all tasks are committed, commit the job, otherwise aborts the job; If any exception is
* thrown during job commitment, also aborts the job.
*/
def write(
sparkSession: SparkSession,
plan: SparkPlan,
fileFormat: FileFormat,
committer: FileCommitProtocol,
outputSpec: OutputSpec,
hadoopConf: Configuration,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
refreshFunction: (Seq[ExecutedWriteSummary]) => Unit,
options: Map[String, String]): Unit = {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, new Path(outputSpec.outputPath))
val allColumns = plan.output
val partitionSet = AttributeSet(partitionColumns)
val dataColumns = allColumns.filterNot(partitionSet.contains)
val bucketIdExpression = bucketSpec.map { spec =>
val bucketColumns = spec.bucketColumnNames.map(c => dataColumns.find(_.name == c).get)
// Use `HashPartitioning.partitionIdExpression` as our bucket id expression, so that we can
// guarantee the data distribution is same between shuffle and bucketed data source, which
// enables us to only shuffle one side when join a bucketed table and a normal one.
HashPartitioning(bucketColumns, spec.numBuckets).partitionIdExpression
}
val sortColumns = bucketSpec.toSeq.flatMap {
spec => spec.sortColumnNames.map(c => dataColumns.find(_.name == c).get)
}
val caseInsensitiveOptions = CaseInsensitiveMap(options)
// Note: prepareWrite has side effect. It sets "job".
val outputWriterFactory =
fileFormat.prepareWrite(sparkSession, job, caseInsensitiveOptions, dataColumns.toStructType)
val description = new WriteJobDescription(
uuid = UUID.randomUUID().toString,
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
outputWriterFactory = outputWriterFactory,
allColumns = allColumns,
dataColumns = dataColumns,
partitionColumns = partitionColumns,
bucketIdExpression = bucketIdExpression,
path = outputSpec.outputPath,
customPartitionLocations = outputSpec.customPartitionLocations,
maxRecordsPerFile = caseInsensitiveOptions.get("maxRecordsPerFile").map(_.toLong)
.getOrElse(sparkSession.sessionState.conf.maxRecordsPerFile),
timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone)
)
// We should first sort by partition columns, then bucket id, and finally sorting columns.
val requiredOrdering = partitionColumns ++ bucketIdExpression ++ sortColumns
// the sort order doesn't matter
val actualOrdering = plan.outputOrdering.map(_.child)
val orderingMatched = if (requiredOrdering.length > actualOrdering.length) {
false
} else {
requiredOrdering.zip(actualOrdering).forall {
case (requiredOrder, childOutputOrder) =>
requiredOrder.semanticEquals(childOutputOrder)
}
}
SQLExecution.checkSQLExecutionId(sparkSession)
// This call shouldn't be put into the `try` block below because it only initializes and
// prepares the job, any exception thrown from here shouldn't cause abortJob() to be called.
committer.setupJob(job)
try {
val rdd = if (orderingMatched) {
plan.execute()
} else {
SortExec(
requiredOrdering.map(SortOrder(_, Ascending)),
global = false,
child = plan).execute()
}
val ret = new Array[WriteTaskResult](rdd.partitions.length)
sparkSession.sparkContext.runJob(
rdd,
(taskContext: TaskContext, iter: Iterator[InternalRow]) => {
executeTask(
description = description,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.attemptNumber(),
committer,
iterator = iter)
},
0 until rdd.partitions.length,
(index, res: WriteTaskResult) => {
committer.onTaskCommit(res.commitMsg)
ret(index) = res
})
val commitMsgs = ret.map(_.commitMsg)
committer.commitJob(job, commitMsgs)
logInfo(s"Job ${job.getJobID} committed.")
refreshFunction(ret.map(_.summary))
} catch { case cause: Throwable =>
logError(s"Aborting job ${job.getJobID}.", cause)
committer.abortJob(job)
throw new SparkException("Job aborted.", cause)
}
}
/** Writes data out in a single Spark task. */
private def executeTask(
description: WriteJobDescription,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow]): WriteTaskResult = {
val jobId = SparkHadoopWriterUtils.createJobID(new Date, sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)
// Set up the attempt context required to use in the output committer.
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
hadoopConf.set("mapreduce.job.id", jobId.toString)
hadoopConf.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapreduce.task.ismap", true)
hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
committer.setupTask(taskAttemptContext)
val writeTask =
if (description.partitionColumns.isEmpty && description.bucketIdExpression.isEmpty) {
new SingleDirectoryWriteTask(description, taskAttemptContext, committer)
} else {
new DynamicPartitionWriteTask(description, taskAttemptContext, committer)
}
try {
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
// Execute the task to write rows out and commit the task.
val summary = writeTask.execute(iterator)
writeTask.releaseResources()
WriteTaskResult(committer.commitTask(taskAttemptContext), summary)
})(catchBlock = {
// If there is an error, release resource and then abort the task
try {
writeTask.releaseResources()
} finally {
committer.abortTask(taskAttemptContext)
logError(s"Job $jobId aborted.")
}
})
} catch {
case e: FetchFailedException =>
throw e
case t: Throwable =>
throw new SparkException("Task failed while writing rows.", t)
}
}
/**
* A simple trait for writing out data in a single Spark task, without any concerns about how
* to commit or abort tasks. Exceptions thrown by the implementation of this trait will
* automatically trigger task aborts.
*/
private trait ExecuteWriteTask {
/**
* The data structures used to measure metrics during writing.
*/
protected var numOutputRows: Long = 0L
protected var numOutputBytes: Long = 0L
/**
* Writes data out to files, and then returns the summary of relative information which
* includes the list of partition strings written out. The list of partitions is sent back
* to the driver and used to update the catalog. Other information will be sent back to the
* driver too and used to update the metrics in UI.
*/
def execute(iterator: Iterator[InternalRow]): ExecutedWriteSummary
def releaseResources(): Unit
/**
* A helper function used to determine the size in bytes of a written file.
*/
protected def getFileSize(conf: Configuration, filePath: String): Long = {
if (filePath != null) {
val path = new Path(filePath)
val fs = path.getFileSystem(conf)
fs.getFileStatus(path).getLen()
} else {
0L
}
}
}
/** Writes data to a single directory (used for non-dynamic-partition writes). */
private class SingleDirectoryWriteTask(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) extends ExecuteWriteTask {
private[this] var currentWriter: OutputWriter = _
private[this] var currentPath: String = _
private def newOutputWriter(fileCounter: Int): Unit = {
val ext = description.outputWriterFactory.getFileExtension(taskAttemptContext)
currentPath = committer.newTaskTempFile(
taskAttemptContext,
None,
f"-c$fileCounter%03d" + ext)
currentWriter = description.outputWriterFactory.newInstance(
path = currentPath,
dataSchema = description.dataColumns.toStructType,
context = taskAttemptContext)
}
override def execute(iter: Iterator[InternalRow]): ExecutedWriteSummary = {
var fileCounter = 0
var recordsInFile: Long = 0L
newOutputWriter(fileCounter)
while (iter.hasNext) {
if (description.maxRecordsPerFile > 0 && recordsInFile >= description.maxRecordsPerFile) {
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
recordsInFile = 0
releaseResources()
numOutputRows += recordsInFile
newOutputWriter(fileCounter)
}
val internalRow = iter.next()
currentWriter.write(internalRow)
recordsInFile += 1
}
releaseResources()
numOutputRows += recordsInFile
ExecutedWriteSummary(
updatedPartitions = Set.empty,
numOutputFile = fileCounter + 1,
numOutputBytes = numOutputBytes,
numOutputRows = numOutputRows)
}
override def releaseResources(): Unit = {
if (currentWriter != null) {
try {
currentWriter.close()
numOutputBytes += getFileSize(taskAttemptContext.getConfiguration, currentPath)
} finally {
currentWriter = null
}
}
}
}
/**
* Writes data to using dynamic partition writes, meaning this single function can write to
* multiple directories (partitions) or files (bucketing).
*/
private class DynamicPartitionWriteTask(
desc: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) extends ExecuteWriteTask {
// currentWriter is initialized whenever we see a new key
private var currentWriter: OutputWriter = _
private var currentPath: String = _
/** Expressions that given partition columns build a path string like: col1=val/col2=val/... */
private def partitionPathExpression: Seq[Expression] = {
desc.partitionColumns.zipWithIndex.flatMap { case (c, i) =>
val partitionName = ScalaUDF(
ExternalCatalogUtils.getPartitionPathString _,
StringType,
Seq(Literal(c.name), Cast(c, StringType, Option(desc.timeZoneId))))
if (i == 0) Seq(partitionName) else Seq(Literal(Path.SEPARATOR), partitionName)
}
}
/**
* Opens a new OutputWriter given a partition key and optional bucket id.
* If bucket id is specified, we will append it to the end of the file name, but before the
* file extension, e.g. part-r-00009-ea518ad4-455a-4431-b471-d24e03814677-00002.gz.parquet
*
* @param partColsAndBucketId a row consisting of partition columns and a bucket id for the
* current row.
* @param getPartitionPath a function that projects the partition values into a path string.
* @param fileCounter the number of files that have been written in the past for this specific
* partition. This is used to limit the max number of records written for a
* single file. The value should start from 0.
* @param updatedPartitions the set of updated partition paths, we should add the new partition
* path of this writer to it.
*/
private def newOutputWriter(
partColsAndBucketId: InternalRow,
getPartitionPath: UnsafeProjection,
fileCounter: Int,
updatedPartitions: mutable.Set[String]): Unit = {
val partDir = if (desc.partitionColumns.isEmpty) {
None
} else {
Option(getPartitionPath(partColsAndBucketId).getString(0))
}
partDir.foreach(updatedPartitions.add)
// If the bucketId expression is defined, the bucketId column is right after the partition
// columns.
val bucketId = if (desc.bucketIdExpression.isDefined) {
BucketingUtils.bucketIdToString(partColsAndBucketId.getInt(desc.partitionColumns.length))
} else {
""
}
// This must be in a form that matches our bucketing format. See BucketingUtils.
val ext = f"$bucketId.c$fileCounter%03d" +
desc.outputWriterFactory.getFileExtension(taskAttemptContext)
val customPath = partDir match {
case Some(dir) =>
desc.customPartitionLocations.get(PartitioningUtils.parsePathFragment(dir))
case _ =>
None
}
currentPath = if (customPath.isDefined) {
committer.newTaskTempFileAbsPath(taskAttemptContext, customPath.get, ext)
} else {
committer.newTaskTempFile(taskAttemptContext, partDir, ext)
}
currentWriter = desc.outputWriterFactory.newInstance(
path = currentPath,
dataSchema = desc.dataColumns.toStructType,
context = taskAttemptContext)
}
override def execute(iter: Iterator[InternalRow]): ExecutedWriteSummary = {
val getPartitionColsAndBucketId = UnsafeProjection.create(
desc.partitionColumns ++ desc.bucketIdExpression, desc.allColumns)
// Generates the partition path given the row generated by `getPartitionColsAndBucketId`.
val getPartPath = UnsafeProjection.create(
Seq(Concat(partitionPathExpression)), desc.partitionColumns)
// Returns the data columns to be written given an input row
val getOutputRow = UnsafeProjection.create(desc.dataColumns, desc.allColumns)
// If anything below fails, we should abort the task.
var recordsInFile: Long = 0L
var fileCounter = 0
var totalFileCounter = 0
var currentPartColsAndBucketId: UnsafeRow = null
val updatedPartitions = mutable.Set[String]()
for (row <- iter) {
val nextPartColsAndBucketId = getPartitionColsAndBucketId(row)
if (currentPartColsAndBucketId != nextPartColsAndBucketId) {
if (currentPartColsAndBucketId != null) {
totalFileCounter += (fileCounter + 1)
}
// See a new partition or bucket - write to a new partition dir (or a new bucket file).
currentPartColsAndBucketId = nextPartColsAndBucketId.copy()
logDebug(s"Writing partition: $currentPartColsAndBucketId")
numOutputRows += recordsInFile
recordsInFile = 0
fileCounter = 0
releaseResources()
newOutputWriter(currentPartColsAndBucketId, getPartPath, fileCounter, updatedPartitions)
} else if (desc.maxRecordsPerFile > 0 &&
recordsInFile >= desc.maxRecordsPerFile) {
// Exceeded the threshold in terms of the number of records per file.
// Create a new file by increasing the file counter.
numOutputRows += recordsInFile
recordsInFile = 0
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
releaseResources()
newOutputWriter(currentPartColsAndBucketId, getPartPath, fileCounter, updatedPartitions)
}
currentWriter.write(getOutputRow(row))
recordsInFile += 1
}
if (currentPartColsAndBucketId != null) {
totalFileCounter += (fileCounter + 1)
}
releaseResources()
numOutputRows += recordsInFile
ExecutedWriteSummary(
updatedPartitions = updatedPartitions.toSet,
numOutputFile = totalFileCounter,
numOutputBytes = numOutputBytes,
numOutputRows = numOutputRows)
}
override def releaseResources(): Unit = {
if (currentWriter != null) {
try {
currentWriter.close()
numOutputBytes += getFileSize(taskAttemptContext.getConfiguration, currentPath)
} finally {
currentWriter = null
}
}
}
}
}
/**
* Wrapper class for the metrics of writing data out.
*
* @param updatedPartitions the partitions updated during writing data out. Only valid
* for dynamic partition.
* @param numOutputFile the total number of files.
* @param numOutputRows the number of output rows.
* @param numOutputBytes the bytes of output data.
*/
case class ExecutedWriteSummary(
updatedPartitions: Set[String],
numOutputFile: Int,
numOutputRows: Long,
numOutputBytes: Long)
| aokolnychyi/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala | Scala | apache-2.0 | 21,549 |
package com.lambtors.poker_api.module.poker.domain
import cats.data.OptionT
import com.lambtors.poker_api.module.poker.domain.model.{GameId, PokerGame}
trait PokerGameRepository[P[_]] {
def search(gameId: GameId): OptionT[P, PokerGame]
def update(pokerGame: PokerGame): P[Unit]
def insert(pokerGame: PokerGame): P[Unit]
}
| lambtors/poker-api | src/main/scala/com/lambtors/poker_api/module/poker/domain/PokerGameRepository.scala | Scala | mit | 332 |
package com.twitter.finagle.http.codec
import com.twitter.concurrent.AsyncQueue
import com.twitter.conversions.time._
import com.twitter.finagle.{Service, Status}
import com.twitter.finagle.http.{Fields, Request, Response, Status => HttpStatus, Version}
import com.twitter.finagle.http.exp.StreamTransport
import com.twitter.finagle.http.netty3.Bijections._
import com.twitter.finagle.netty3.ChannelBufferBuf
import com.twitter.finagle.netty3.http.Netty3ServerStreamTransport
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.transport.{QueueTransport, Transport}
import com.twitter.io.Reader
import com.twitter.util.{Await, Future, Promise}
import org.jboss.netty.buffer.ChannelBuffers
import org.jboss.netty.handler.codec.http.{
DefaultHttpChunk,
HttpChunk,
HttpResponse,
HttpResponseStatus
}
import org.scalatest.FunSuite
// Note: This is shared between Netty3 and Netty4 implementations, but we need a concrete impl
// to test it so the finagle-http package is most appropriate even though the implementation
// is in finagle-base-http.
class HttpServerDispatcherTest extends FunSuite {
import HttpServerDispatcherTest._
def testChunk(trans: Transport[Any, Any], chunk: HttpChunk) = {
val f = trans.read()
assert(!f.isDefined)
Await.ready(trans.write(chunk), 5.seconds)
val c = Await.result(f, 5.seconds).asInstanceOf[HttpChunk]
assert(c.getContent == chunk.getContent)
}
test("invalid message") {
val (in, out) = mkPair[Any, Any]
val service = Service.mk { req: Request =>
Future.value(Response())
}
val disp = new HttpServerDispatcher(out, service, NullStatsReceiver)
in.write("invalid")
Await.ready(out.onClose, 5.seconds)
assert(out.status == Status.Closed)
}
test("don't clobber service 'Connection: close' headers set by service") {
val service = Service.mk { _: Request =>
val resp = Response()
resp.setContentString("foo")
resp.headerMap.set(Fields.Connection, "close")
Future.value(resp)
}
val (in, out) = mkPair[Any, Any]
val disp = new HttpServerDispatcher(out, service, NullStatsReceiver)
in.write(from(Request("/foo")))
Await.result(in.read, 5.seconds) match {
case resp: HttpResponse =>
assert(resp.getStatus == HttpResponseStatus.OK)
assert(resp.headers().get(Fields.Connection) == "close")
case other => fail(s"Received unknown type: ${other.getClass.getSimpleName}")
}
}
test("streaming request body") {
val service = Service.mk { req: Request =>
ok(req.reader)
}
val (in, out) = mkPair[Any, Any]
val disp = new HttpServerDispatcher(out, service, NullStatsReceiver)
val req = Request()
req.setChunked(true)
in.write(from(req))
Await.result(in.read, 5.seconds)
testChunk(in, chunk("a"))
testChunk(in, chunk("foo"))
testChunk(in, HttpChunk.LAST_CHUNK)
}
test("client abort before dispatch") {
val promise = new Promise[Response]
val service = Service.mk { _: Request =>
promise
}
val (in, out) = mkPair[Any, Any]
val disp = new HttpServerDispatcher(out, service, NullStatsReceiver)
in.write(from(Request()))
// Simulate channel closure
out.close()
assert(promise.isInterrupted.isDefined)
}
test("client abort after dispatch") {
val req = Request()
val res = req.response
val service = Service.mk { _: Request =>
Future.value(res)
}
val (in, out) = mkPair[Any, Any]
val disp = new HttpServerDispatcher(out, service, NullStatsReceiver)
req.response.setChunked(true)
in.write(from(req))
Await.result(in.read(), 5.seconds)
// Simulate channel closure
out.close()
intercept[Reader.ReaderDiscarded] { Await.result(res.writer.write(buf(".")), 5.seconds) }
}
}
object HttpServerDispatcherTest {
def mkPair[A: Manifest, B: Manifest]: (Transport[A, B], StreamTransport[Response, Request]) = {
val inQ = new AsyncQueue[Any]
val outQ = new AsyncQueue[Any]
(
Transport.cast[A, B](new QueueTransport(outQ, inQ)),
new Netty3ServerStreamTransport(new QueueTransport(inQ, outQ))
)
}
def wrap(msg: String) = ChannelBuffers.wrappedBuffer(msg.getBytes("UTF-8"))
def buf(msg: String) = ChannelBufferBuf.Owned(wrap(msg))
def chunk(msg: String) = new DefaultHttpChunk(wrap(msg))
def ok(reader: Reader): Future[Response] =
Future.value(Response(Version.Http11, HttpStatus.Ok, reader))
}
| mkhq/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/codec/HttpServerDispatcherTest.scala | Scala | apache-2.0 | 4,492 |
package me.m1key.audioliciousmigration.mining
import com.google.inject.Inject
import me.m1key.audioliciousmigration.persistence.mongodb.MorphiaMongoDbPersistenceProvider
import com.mongodb.Mongo
import com.mongodb.BasicDBList
import com.mongodb.BasicDBObject
import java.text.NumberFormat
import java.text.ParsePosition
import java.util.Locale
class HighestRatedGenreMining @Inject() (private val persistenceProvider: MorphiaMongoDbPersistenceProvider) {
// This is a hack.
// It won't allow two statements in the reduce call.
// Therefore the totalAlbums++ one has to be used in the if statement.
private val query = "db.MongoDbSong.group(" +
"{key: {genre: 1}," +
"initial: {totalRatings: 0, totalSongs: 0, totalRatingPerGenre: 0}," +
"reduce: function(obj, prev) {obj.statsList.forEach(function(item){if(item.libraryUuid == '%s' && ++prev.totalSongs){ prev.totalRatings += item.percentage}})}," +
"finalize: function(prev) {prev.totalRatingPerGenre = prev.totalRatings / prev.totalSongs}" +
"})";
private val formatter = NumberFormat.getInstance(Locale.ENGLISH)
def mine(libraryUuid: String): Option[List[(String, Double)]] = {
val mongo = persistenceProvider.getMongo
val result = mongo.getDB("audiolicious").eval(query.format(libraryUuid))
result match {
case list: BasicDBList => return Some(processAndRetrieveResults(list))
case _ => println("Error while obtaining stats. Result of unknown type [%s].".format(result))
return None;
}
}
private def processAndRetrieveResults(list: BasicDBList): List[(String, Double)] = {
return processResults(list).sortWith(compareSecondValueInteger)
}
def compareSecondValueInteger(e1: (String, Double), e2: (String, Double)) = e1._2 > e2._2
private def processResults(list: BasicDBList): List[(String, Double)] = {
var results : List[(String, Double)] = List()
for (i <- 0 until list.size()) {
val item = list.get(i)
item match {
case dbObject: BasicDBObject => results ::= processResult(dbObject)
case _ => println("Error while obtaining stats. Result item of unknown type [%s].".format(item.getClass()))
}
}
return results
}
private def processResult(dbObject: BasicDBObject): (String, Double) = {
val genre = dbObject.get("genre").toString()
val totalPlays = parseDouble(dbObject.get("totalRatingPerGenre").toString(), formatter)
return (genre, totalPlays)
}
// http://stackoverflow.com/a/9542323
def parseDouble(s: String, nf: NumberFormat) = {
val pp = new ParsePosition(0)
val d = nf.parse(s, pp)
if (pp.getErrorIndex == -1) d.doubleValue else 0
}
}
| m1key/audiolicious-migration | src/main/scala/me/m1key/audioliciousmigration/mining/HighestRatedGenreMining.scala | Scala | gpl-3.0 | 2,672 |
package io.vamp.common
import io.vamp.common.util.HashUtil
object Artifact {
val version = "v1"
val kind: String = "kind"
val metadata = "metadata"
}
trait Artifact {
def name: String
def kind: String
def metadata: Map[String, Any]
}
trait Reference extends Artifact {
val metadata = Map[String, Any]()
}
trait Type {
def `type`: String
}
object Lookup {
val entry = "lookup_name"
}
trait Lookup extends Artifact {
def lookupName = lookup(name)
def lookup(string: String) = HashUtil.hexSha1(s"$getClass@$string", Artifact.version)
}
| magneticio/vamp | common/src/main/scala/io/vamp/common/Artifact.scala | Scala | apache-2.0 | 569 |
package ch.megard.akka.http.cors.scaladsl.model
import akka.http.scaladsl.model.headers.HttpOrigin
import org.scalatest.Inspectors
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class HttpOriginMatcherSpec extends AnyWordSpec with Matchers with Inspectors {
"The `*` matcher" should {
"match any Origin" in {
val origins = Seq(
"http://localhost",
"http://192.168.1.1",
"http://test.com",
"http://test.com:8080",
"https://test.com",
"https://test.com:4433"
).map(HttpOrigin.apply)
forAll(origins) { o => HttpOriginMatcher.*.matches(o) shouldBe true }
}
"be printed as `*`" in {
HttpOriginMatcher.*.toString shouldBe "*"
}
}
"The strict() method" should {
"build a strict matcher, comparing exactly the origins" in {
val positives = Seq(
"http://localhost",
"http://test.com",
"https://test.ch:12345",
"https://*.test.uk.co"
).map(HttpOrigin.apply)
val negatives = Seq(
"http://localhost:80",
"https://localhost",
"http://test.com:8080",
"https://test.ch",
"https://abc.test.uk.co"
).map(HttpOrigin.apply)
val matcher = HttpOriginMatcher.strict(positives: _*)
forAll(positives) { o => matcher.matches(o) shouldBe true }
forAll(negatives) { o => matcher.matches(o) shouldBe false }
}
"build a matcher with a toString() method that is a valid range" in {
val matcher = HttpOriginMatcher(Seq("http://test.com", "https://test.ch:12345").map(HttpOrigin.apply): _*)
matcher.toString shouldBe "http://test.com https://test.ch:12345"
}
}
"The apply() method" should {
"build a matcher accepting sub-domains with wildcards" in {
val matcher = HttpOriginMatcher(
Seq(
"http://test.com",
"https://test.ch:12345",
"https://*.test.uk.co",
"http://*.abc.com:8080",
"http://*abc.com", // Must start with `*.`
"http://abc.*.middle.com" // The wildcard can't be in the middle
).map(HttpOrigin.apply): _*
)
val positives = Seq(
"http://test.com",
"https://test.ch:12345",
"https://sub.test.uk.co",
"https://sub1.sub2.test.uk.co",
"http://sub.abc.com:8080"
).map(HttpOrigin.apply)
val negatives = Seq(
"http://test.com:8080",
"http://sub.test.uk.co", // must compare the scheme
"http://sub.abc.com", // must compare the port
"http://abc.test.com", // no wildcard
"http://sub.abc.com",
"http://subabc.com",
"http://abc.sub.middle.com",
"http://abc.middle.com"
).map(HttpOrigin.apply)
forAll(positives) { o => matcher.matches(o) shouldBe true }
forAll(negatives) { o => matcher.matches(o) shouldBe false }
}
"build a matcher with a toString() method that is a valid range" in {
val matcher = HttpOriginMatcher(Seq("http://test.com", "https://*.test.ch:12345").map(HttpOrigin.apply): _*)
matcher.toString shouldBe "http://test.com https://*.test.ch:12345"
}
}
}
| lomigmegard/akka-http-cors | akka-http-cors/src/test/scala/ch/megard/akka/http/cors/scaladsl/model/HttpOriginMatcherSpec.scala | Scala | apache-2.0 | 3,206 |
package mesosphere.marathon.event.http
import mesosphere.marathon.state.MarathonState
import mesosphere.marathon.Protos
import collection.JavaConversions._
import mesosphere.marathon.api.validation.FieldConstraints.FieldJsonProperty
import mesosphere.marathon.Protos.StorageVersion
case class EventSubscribers(
@FieldJsonProperty("callbackUrls") urls: Set[String] = Set.empty[String]) extends MarathonState[Protos.EventSubscribers, EventSubscribers] {
override def mergeFromProto(message: Protos.EventSubscribers): EventSubscribers =
EventSubscribers(Set(message.getCallbackUrlsList: _*))
override def mergeFromProto(bytes: Array[Byte]): EventSubscribers = {
val proto = Protos.EventSubscribers.parseFrom(bytes)
mergeFromProto(proto)
}
override def toProto: Protos.EventSubscribers = {
val builder = Protos.EventSubscribers.newBuilder()
urls.foreach(builder.addCallbackUrls(_))
builder.build()
}
}
| tnachen/marathon | src/main/scala/mesosphere/marathon/event/http/EventSubscribers.scala | Scala | apache-2.0 | 943 |
package fpinscala.streamingio
import fpinscala.iomonad.{IO,Monad,Free,unsafePerformIO}
object ImperativeAndLazyIO {
/*
We are going to consider various approaches to the simple task of
checking whether a file contains more than 40,000 lines.
Our first implementation is an imperative implementation, embedded
into `IO`.
*/
import java.io._
def linesGt40k(filename: String): IO[Boolean] = IO {
// There are a number of convenience functions in scala.io.Source
// for reading from external sources such as files.
val src = io.Source.fromFile(filename)
try {
var count = 0
// Obtain a stateful iterator from the Source
val lines: Iterator[String] = src.getLines
while (count <= 40000 && lines.hasNext) {
lines.next // has side effect of advancing to next element
count += 1
}
count > 40000
}
finally src.close
}
/*
The above code is rather low-level, and it's not compositional,
either. Consider the following scenarios:
* Check whether the number of _nonempty_ lines in the file exceeds
40,000
* Find a line index before 40,000 where the first letter of
consecutive lines spells out `"abracadabra"`.
We cannot just compose our existing implementation with some
other combinator(s) to implement these tasks. Our implementation is
a monolithic loop, and we must modify this loop directly if we want
to change its behavior.
Now imagine if we had a `Stream[String]` for the lines of the file
and we could assemble functionality using all the `Stream` functions
we know and love.
*/
object Examples {
val lines: Stream[String] = sys.error("defined elsewhere")
val ex1 = lines.zipWithIndex.exists(_._2 + 1 >= 40000)
val ex2 = lines.filter(!_.trim.isEmpty).zipWithIndex.exists(_._2 + 1 >= 40000)
val ex3 = lines.take(40000).map(_.head).indexOfSlice("abracadabra".toList)
}
/*
Could we actually write the above? Not quite. We could 'cheat' and
return an `IO[Stream[String]]` representing the lines of a file:
*/
def lines(filename: String): IO[Stream[String]] = IO {
val src = io.Source.fromFile(filename)
src.getLines.toStream append { src.close; Stream.empty }
}
/*
This is called _lazy I/O_, and it's problematic for a number of
reasons, discussed in the book text. However, it would be nice to
recover the same high-level, compositional style we are used to
from our use of `List` and `Stream`.
*/
}
object SimpleStreamTransducers {
/*
We now introduce a type, `Process`, representing pure, single-input
stream transducers. It can be in of three states - it can be
emitting a value to the output (`Emit`), reading a value from its
input (`Await`) or signaling termination via `Halt`.
*/
sealed trait Process[I,O] {
import Process._
/*
* A `Process[I,O]` can be used to transform a `Stream[I]` to a
* `Stream[O]`.
*/
def apply(s: Stream[I]): Stream[O] = this match {
case Halt() => Stream()
case Await(recv) => s match {
case h #:: t => recv(Some(h))(t)
case xs => recv(None)(xs) // Stream is empty
}
case Emit(h,t) => h #:: t(s)
}
/*
* `Process` can be thought of as a sequence of values of type `O`
* and many of the operations that would be defined for `List[O]`
* can be defined for `Process[I,O]`, for instance `map`, `++` and
* `flatMap`. The definitions are analogous.
*/
def map[O2](f: O => O2): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) => Emit(f(h), t map f)
case Await(recv) => Await(recv andThen (_ map f))
}
def ++(p: => Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Emit(h, t) => Emit(h, t ++ p)
case Await(recv) => Await(recv andThen (_ ++ p))
}
def flatMap[O2](f: O => Process[I,O2]): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) => f(h) ++ t.flatMap(f)
case Await(recv) => Await(recv andThen (_ flatMap f))
}
/*
* Exercise 5: Implement `|>`. Let the types guide your implementation.
*/
def |>[O2](p2: Process[O,O2]): Process[I,O2] = {
p2 match {
case Halt() => Halt()
case Emit(h,t) => Emit(h, this |> t)
case Await(f) => this match {
case Emit(h,t) => t |> f(Some(h))
case Halt() => Halt() |> f(None)
case Await(g) => Await((i: Option[I]) => g(i) |> p2)
}
}
}
/*
* Feed `in` to this `Process`. Uses a tail recursive loop as long
* as `this` is in the `Await` state.
*/
def feed(in: Seq[I]): Process[I,O] = {
@annotation.tailrec
def go(in: Seq[I], cur: Process[I,O]): Process[I,O] =
cur match {
case Halt() => Halt()
case Await(recv) =>
if (in.nonEmpty) go(in.tail, recv(Some(in.head)))
else cur
case Emit(h, t) => Emit(h, t.feed(in))
}
go(in, this)
}
/*
* See `Process.lift` for a typical repeating `Process`
* definition expressed with explicit recursion.
*/
/*
* `Process` definitions can often be expressed without explicit
* recursion, by repeating some simpler `Process` forever.
*/
def repeat: Process[I,O] = {
def go(p: Process[I,O]): Process[I,O] = p match {
case Halt() => go(this)
case Await(recv) => Await {
case None => recv(None)
case i => go(recv(i))
}
case Emit(h, t) => Emit(h, go(t))
}
go(this)
}
def repeatN(n: Int): Process[I,O] = {
def go(n: Int, p: Process[I,O]): Process[I,O] = p match {
case Halt() => if (n > 0) go(n-1, this) else Halt()
case Await(recv) => Await {
case None => recv(None)
case i => go(n,recv(i))
}
case Emit(h, t) => Emit(h, go(n,t))
}
go(n, this)
}
/*
* As an example of `repeat`, see `Process.filter`. We define
* a convenience function here for composing this `Process`
* with a `Process` that filters the output type `O`.
*/
def filter(f: O => Boolean): Process[I,O] =
this |> Process.filter(f)
/** Exercise 7: see definition below. */
def zip[O2](p: Process[I,O2]): Process[I,(O,O2)] =
Process.zip(this, p)
/*
* Exercise 6: Implement `zipWithIndex`.
*/
def zipWithIndex: Process[I,(O,Int)] =
this zip (count map (_ - 1))
/* Add `p` to the fallback branch of this process */
def orElse(p: Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Await(recv) => Await {
case None => p
case x => recv(x)
}
case _ => this
}
}
object Process {
case class Emit[I,O](
head: O,
tail: Process[I,O] = Halt[I,O]())
extends Process[I,O]
case class Await[I,O](
recv: Option[I] => Process[I,O])
extends Process[I,O]
case class Halt[I,O]() extends Process[I,O]
def emit[I,O](head: O,
tail: Process[I,O] = Halt[I,O]()): Process[I,O] =
Emit(head, tail)
// Process forms a monad, and we provide monad syntax for it
import fpinscala.iomonad.Monad
def monad[I]: Monad[({ type f[x] = Process[I,x]})#f] =
new Monad[({ type f[x] = Process[I,x]})#f] {
def unit[O](o: => O): Process[I,O] = emit(o)
def flatMap[O,O2](p: Process[I,O])(f: O => Process[I,O2]): Process[I,O2] =
p flatMap f
}
// enable monadic syntax for `Process` type
implicit def toMonadic[I,O](a: Process[I,O]) = monad[I].toMonadic(a)
/**
* A helper function to await an element or fall back to another process
* if there is no input.
*/
def await[I,O](f: I => Process[I,O],
fallback: Process[I,O] = Halt[I,O]()): Process[I,O] =
Await[I,O] {
case Some(i) => f(i)
case None => fallback
}
/*
* We can convert any function `f: I => O` to a `Process[I,O]`. We
* simply `Await`, then `Emit` the value received, transformed by
* `f`.
*/
def liftOne[I,O](f: I => O): Process[I,O] =
Await {
case Some(i) => emit(f(i))
case None => Halt()
}
def lift[I,O](f: I => O): Process[I,O] =
liftOne(f).repeat
/*
* As an example of `repeat`, here's a definition of `filter` that
* uses `repeat`.
*/
def filter[I](f: I => Boolean): Process[I,I] =
Await[I,I] {
case Some(i) if f(i) => emit(i)
case _ => Halt()
}.repeat
/*
* Here's a typical `Process` definition that requires tracking some
* piece of state (in this case, the running total):
*/
def sum: Process[Double,Double] = {
def go(acc: Double): Process[Double,Double] =
await(d => emit(d+acc, go(d+acc)))
go(0.0)
}
/*
* Exercise 1: Implement `take`, `drop`, `takeWhile`, and `dropWhile`.
*/
def take[I](n: Int): Process[I,I] =
if (n <= 0) Halt()
else await(i => emit(i, take[I](n-1)))
def drop[I](n: Int): Process[I,I] =
if (n <= 0) id
else await(i => drop[I](n-1))
def takeWhile[I](f: I => Boolean): Process[I,I] =
await(i =>
if (f(i)) emit(i, takeWhile(f))
else Halt())
def dropWhile[I](f: I => Boolean): Process[I,I] =
await(i =>
if (f(i)) dropWhile(f)
else emit(i,id))
/* The identity `Process`, just repeatedly echos its input. */
def id[I]: Process[I,I] = lift(identity)
/*
* Exercise 2: Implement `count`.
*
* Here's one implementation, with three stages - we map all inputs
* to 1.0, compute a running sum, then finally convert the output
* back to `Int`. The three stages will be interleaved - as soon
* as the first element is examined, it will be converted to 1.0,
* then added to the running total, and then this running total
* will be converted back to `Int`, then the `Process` will examine
* the next element, and so on.
*/
def count[I]: Process[I,Int] =
lift((i: I) => 1.0) |> sum |> lift(_.toInt)
/* For comparison, here is an explicit recursive implementation. */
def count2[I]: Process[I,Int] = {
def go(n: Int): Process[I,Int] =
await((i: I) => emit(n+1, go(n+1)))
go(0)
}
/*
* Exercise 3: Implement `mean`.
*
* This is an explicit recursive definition. We'll factor out a
* generic combinator shortly.
*/
def mean: Process[Double,Double] = {
def go(sum: Double, count: Double): Process[Double,Double] =
await((d: Double) => emit((sum+d) / (count+1), go(sum+d,count+1)))
go(0.0, 0.0)
}
def loop[S,I,O](z: S)(f: (I,S) => (O,S)): Process[I,O] =
await((i: I) => f(i,z) match {
case (o,s2) => emit(o, loop(s2)(f))
})
/* Exercise 4: Implement `sum` and `count` in terms of `loop` */
def sum2: Process[Double,Double] =
loop(0.0)((d:Double, acc) => (acc+d,acc+d))
def count3[I]: Process[I,Int] =
loop(0)((_:I,n) => (n+1,n+1))
/*
* Exercise 7: Can you think of a generic combinator that would
* allow for the definition of `mean` in terms of `sum` and
* `count`?
*
* Yes, it is `zip`, which feeds the same input to two processes.
* The implementation is a bit tricky, as we have to make sure
* that input gets fed to both `p1` and `p2`.
*/
def zip[A,B,C](p1: Process[A,B], p2: Process[A,C]): Process[A,(B,C)] =
(p1, p2) match {
case (Halt(), _) => Halt()
case (_, Halt()) => Halt()
case (Emit(b, t1), Emit(c, t2)) => Emit((b,c), zip(t1, t2))
case (Await(recv1), _) =>
Await((oa: Option[A]) => zip(recv1(oa), feed(oa)(p2)))
case (_, Await(recv2)) =>
Await((oa: Option[A]) => zip(feed(oa)(p1), recv2(oa)))
}
def feed[A,B](oa: Option[A])(p: Process[A,B]): Process[A,B] =
p match {
case Halt() => p
case Emit(h,t) => Emit(h, feed(oa)(t))
case Await(recv) => recv(oa)
}
/*
* Using zip, we can then define `mean`. Again, this definition
* operates in a single pass.
*/
val mean2 = (sum zip count) |> lift { case (s,n) => s / n }
/*
* Exercise 6: Implement `zipWithIndex`.
*
* See definition on `Process` above.
*/
/*
* Exercise 8: Implement `exists`
*
* We choose to emit all intermediate values, and not halt.
* See `existsResult` below for a trimmed version.
*/
def exists[I](f: I => Boolean): Process[I,Boolean] =
lift(f) |> any
/* Emits whether a `true` input has ever been received. */
def any: Process[Boolean,Boolean] =
loop(false)((b:Boolean,s) => (s || b, s || b))
/* A trimmed `exists`, containing just the final result. */
def existsResult[I](f: I => Boolean) =
exists(f) |> takeThrough(!_) |> dropWhile(!_) |> echo.orElse(emit(false))
/*
* Like `takeWhile`, but includes the first element that tests
* false.
*/
def takeThrough[I](f: I => Boolean): Process[I,I] =
takeWhile(f) ++ echo
/* Awaits then emits a single value, then halts. */
def echo[I]: Process[I,I] = await(i => emit(i))
def skip[I,O]: Process[I,O] = await(i => Halt())
def ignore[I,O]: Process[I,O] = skip.repeat
def terminated[I]: Process[I,Option[I]] =
await((i: I) => emit(Some(i), terminated[I]), emit(None))
def processFile[A,B](f: java.io.File,
p: Process[String, A],
z: B)(g: (B, A) => B): IO[B] = IO {
@annotation.tailrec
def go(ss: Iterator[String], cur: Process[String, A], acc: B): B =
cur match {
case Halt() => acc
case Await(recv) =>
val next = if (ss.hasNext) recv(Some(ss.next))
else recv(None)
go(ss, next, acc)
case Emit(h, t) => go(ss, t, g(acc, h))
}
val s = io.Source.fromFile(f)
try go(s.getLines, p, z)
finally s.close
}
/*
* Exercise 9: Write a program that reads degrees fahrenheit as `Double` values from a file,
* converts each temperature to celsius, and writes results to another file.
*/
// This process defines the here is core logic, a transducer that converts input lines
// (assumed to be temperatures in degrees fahrenheit) to output lines (temperatures in
// degress celsius). Left as an exercise to supply another wrapper like `processFile`
// to actually do the IO and drive the process.
def convertFahrenheit: Process[String,String] =
filter((line: String) => !line.startsWith("#")) |>
filter(line => line.trim.nonEmpty) |>
lift(line => toCelsius(line.toDouble).toString)
def toCelsius(fahrenheit: Double): Double =
(5.0 / 9.0) * (fahrenheit - 32.0)
}
}
object GeneralizedStreamTransducers {
/*
Our generalized process type is parameterized on the protocol used for
communicating with the driver. This works similarly to the `IO` type
we defined in chapter 13. The `Await` constructor emits a request of
type `F[A]`, and receives a response of type `Either[Throwable,A]`:
trait Process[F,A]
case class Await[F[_],A,O](
req: F[A],
recv: Either[Throwable,A] => Process[F,O]) extends Process[F,O]
case class Halt[F[_],O](err: Throwable) extends Process[F,O]
case class Emit[F[_],O](head: O, tail: Process[F,O]) extends Process[F,O]
The `Await` constructor may now receive a successful result or an error.
The `Halt` constructor now has a _reason_ for termination, which may be
either normal termination indicated by the special exception `End`,
forceful terimation, indicated by the special exception `Kill`,
or some other error.
We'll use the improved `Await` and `Halt` cases together to ensure
that all resources get released, even in the event of exceptions.
*/
trait Process[F[_],O] {
import Process._
/*
* Many of the same operations can be defined for this generalized
* `Process` type, regardless of the choice of `F`.
*/
def map[O2](f: O => O2): Process[F,O2] = this match {
case Await(req,recv) =>
Await(req, recv andThen (_ map f))
case Emit(h, t) => Try { Emit(f(h), t map f) }
case Halt(err) => Halt(err)
}
def ++(p: => Process[F,O]): Process[F,O] =
this.onHalt {
case End => Try(p) // we consult `p` only on normal termination
case err => Halt(err)
}
/*
* Like `++`, but _always_ runs `p`, even if `this` halts with an error.
*/
def onComplete(p: => Process[F,O]): Process[F,O] =
this.onHalt {
case End => p.asFinalizer
case err => p.asFinalizer ++ Halt(err) // we always run `p`, but preserve any errors
}
def asFinalizer: Process[F,O] = this match {
case Emit(h, t) => Emit(h, t.asFinalizer)
case Halt(e) => Halt(e)
case Await(req,recv) => await(req) {
case Left(Kill) => this.asFinalizer
case x => recv(x)
}
}
def onHalt(f: Throwable => Process[F,O]): Process[F,O] = this match {
case Halt(e) => Try(f(e))
case Emit(h, t) => Emit(h, t.onHalt(f))
case Await(req,recv) => Await(req, recv andThen (_.onHalt(f)))
}
/*
* Anywhere we _call_ `f`, we catch exceptions and convert them to `Halt`.
* See the helper function `Try` defined below.
*/
def flatMap[O2](f: O => Process[F,O2]): Process[F,O2] =
this match {
case Halt(err) => Halt(err)
case Emit(o, t) => Try(f(o)) ++ t.flatMap(f)
case Await(req,recv) =>
Await(req, recv andThen (_ flatMap f))
}
def repeat: Process[F,O] =
this ++ this.repeat
def repeatNonempty: Process[F,O] = {
val cycle = (this.map(o => Some(o): Option[O]) ++ emit(None)).repeat
// cut off the cycle when we see two `None` values in a row, as this
// implies `this` has produced no values during an iteration
val trimmed = cycle |> window2 |> (takeWhile {
case (Some(None), None) => false
case _ => true
})
trimmed.map(_._2).flatMap {
case None => Halt(End)
case Some(o) => emit(o)
}
}
/*
* Exercise 10: This function is defined only if given a `MonadCatch[F]`.
* Unlike the simple `runLog` interpreter defined in the companion object
* below, this is not tail recursive and responsibility for stack safety
* is placed on the `Monad` instance.
*/
def runLog(implicit F: MonadCatch[F]): F[IndexedSeq[O]] = {
def go(cur: Process[F,O], acc: IndexedSeq[O]): F[IndexedSeq[O]] =
cur match {
case Emit(h,t) => go(t, acc :+ h)
case Halt(End) => F.unit(acc)
case Halt(err) => F.fail(err)
case Await(req,recv) => F.flatMap (F.attempt(req)) { e => go(Try(recv(e)), acc) }
}
go(this, IndexedSeq())
}
/*
* We define `Process1` as a type alias - see the companion object
* for `Process` below. Using that, we can then define `|>` once
* more. The definition is extremely similar to our previous
* definition. We again use the helper function, `feed`, to take
* care of the case where `this` is emitting values while `p2`
* is awaiting these values.
*
* The one subtlety is we make sure that if `p2` halts, we
* `kill` this process, giving it a chance to run any cleanup
* actions (like closing file handles, etc).
*/
def |>[O2](p2: Process1[O,O2]): Process[F,O2] = {
p2 match {
case Halt(e) => this.kill onHalt { e2 => Halt(e) ++ Halt(e2) }
case Emit(h, t) => Emit(h, this |> t)
case Await(req,recv) => this match {
case Halt(err) => Halt(err) |> recv(Left(err))
case Emit(h,t) => t |> Try(recv(Right(h)))
case Await(req0,recv0) => await(req0)(recv0 andThen (_ |> p2))
}
}
}
@annotation.tailrec
final def kill[O2]: Process[F,O2] = this match {
case Await(req,recv) => recv(Left(Kill)).drain.onHalt {
case Kill => Halt(End) // we convert the `Kill` exception back to normal termination
case e => Halt(e)
}
case Halt(e) => Halt(e)
case Emit(h, t) => t.kill
}
/** Alias for `this |> p2`. */
def pipe[O2](p2: Process1[O,O2]): Process[F,O2] =
this |> p2
final def drain[O2]: Process[F,O2] = this match {
case Halt(e) => Halt(e)
case Emit(h, t) => t.drain
case Await(req,recv) => Await(req, recv andThen (_.drain))
}
def filter(f: O => Boolean): Process[F,O] =
this |> Process.filter(f)
def take(n: Int): Process[F,O] =
this |> Process.take(n)
def once: Process[F,O] = take(1)
/*
* Use a `Tee` to interleave or combine the outputs of `this` and
* `p2`. This can be used for zipping, interleaving, and so forth.
* Nothing requires that the `Tee` read elements from each
* `Process` in lockstep. It could read fifty elements from one
* side, then two elements from the other, then combine or
* interleave these values in some way, etc.
*
* This definition uses two helper functions, `feedL` and `feedR`,
* which feed the `Tee` in a tail-recursive loop as long as
* it is awaiting input.
*/
def tee[O2,O3](p2: Process[F,O2])(t: Tee[O,O2,O3]): Process[F,O3] = {
t match {
case Halt(e) => this.kill onComplete p2.kill onComplete Halt(e)
case Emit(h,t) => Emit(h, (this tee p2)(t))
case Await(side, recv) => side.get match {
case Left(isO) => this match {
case Halt(e) => p2.kill onComplete Halt(e)
case Emit(o,ot) => (ot tee p2)(Try(recv(Right(o))))
case Await(reqL, recvL) =>
await(reqL)(recvL andThen (this2 => this2.tee(p2)(t)))
}
case Right(isO2) => p2 match {
case Halt(e) => this.kill onComplete Halt(e)
case Emit(o2,ot) => (this tee ot)(Try(recv(Right(o2))))
case Await(reqR, recvR) =>
await(reqR)(recvR andThen (p3 => this.tee(p3)(t)))
}
}
}
}
def zipWith[O2,O3](p2: Process[F,O2])(f: (O,O2) => O3): Process[F,O3] =
(this tee p2)(Process.zipWith(f))
def zip[O2](p2: Process[F,O2]): Process[F,(O,O2)] =
zipWith(p2)((_,_))
def to[O2](sink: Sink[F,O]): Process[F,Unit] =
join { (this zipWith sink)((o,f) => f(o)) }
def through[O2](p2: Channel[F, O, O2]): Process[F,O2] =
join { (this zipWith p2)((o,f) => f(o)) }
}
object Process {
case class Await[F[_],A,O](
req: F[A],
recv: Either[Throwable,A] => Process[F,O]) extends Process[F,O]
case class Emit[F[_],O](
head: O,
tail: Process[F,O]) extends Process[F,O]
case class Halt[F[_],O](err: Throwable) extends Process[F,O]
def emit[F[_],O](
head: O,
tail: Process[F,O] = Halt[F,O](End)): Process[F,O] =
Emit(head, tail)
def await[F[_],A,O](req: F[A])(recv: Either[Throwable,A] => Process[F,O]): Process[F,O] =
Await(req, recv)
/**
* Helper function to safely produce `p`, or gracefully halt
* with an error if an exception is thrown.
*/
def Try[F[_],O](p: => Process[F,O]): Process[F,O] =
try p
catch { case e: Throwable => Halt(e) }
/*
* Safely produce `p`, or run `cleanup` and halt gracefully with the
* exception thrown while evaluating `p`.
*/
def TryOr[F[_],O](p: => Process[F,O])(cleanup: Process[F,O]): Process[F,O] =
try p
catch { case e: Throwable => cleanup ++ Halt(e) }
/*
* Safely produce `p`, or run `cleanup` or `fallback` if an exception
* occurs while evaluating `p`.
*/
def TryAwait[F[_],O](p: => Process[F,O])(fallback: Process[F,O], cleanup: Process[F,O]): Process[F,O] =
try p
catch {
case End => fallback
case e: Throwable => cleanup ++ Halt(e)
}
/* Our generalized `Process` type can represent sources! */
import fpinscala.iomonad.IO
/* Special exception indicating normal termination */
case object End extends Exception
/* Special exception indicating forceful termination */
case object Kill extends Exception
/*
* A `Process[F,O]` where `F` is a monad like `IO` can be thought of
* as a source.
*/
/*
* Here is a simple tail recursive function to collect all the
* output of a `Process[IO,O]`. Notice we are using the fact
* that `IO` can be `run` to produce either a result or an
* exception.
*/
def runLog[O](src: Process[IO,O]): IO[IndexedSeq[O]] = IO {
val E = java.util.concurrent.Executors.newFixedThreadPool(4)
@annotation.tailrec
def go(cur: Process[IO,O], acc: IndexedSeq[O]): IndexedSeq[O] =
cur match {
case Emit(h,t) => go(t, acc :+ h)
case Halt(End) => acc
case Halt(err) => throw err
case Await(req,recv) =>
val next =
try recv(Right(fpinscala.iomonad.unsafePerformIO(req)(E)))
catch { case err: Throwable => recv(Left(err)) }
go(next, acc)
}
try go(src, IndexedSeq())
finally E.shutdown
}
/*
* We can write a version of collect that works for any `Monad`.
* See the definition in the body of `Process`.
*/
import java.io.{BufferedReader,FileReader}
val p: Process[IO, String] =
await(IO(new BufferedReader(new FileReader("lines.txt")))) {
case Right(b) =>
lazy val next: Process[IO,String] = await(IO(b.readLine)) {
case Left(e) => await(IO(b.close))(_ => Halt(e))
case Right(line) => Emit(line, next)
}
next
case Left(e) => Halt(e)
}
/*
* Generic combinator for producing a `Process[IO,O]` from some
* effectful `O` source. The source is tied to some resource,
* `R` (like a file handle) that we want to ensure is released.
* See `lines` below for an example use.
*/
def resource[R,O](acquire: IO[R])(
use: R => Process[IO,O])(
release: R => Process[IO,O]): Process[IO,O] =
eval(acquire) flatMap { r => use(r).onComplete(release(r)) }
/*
* Like `resource`, but `release` is a single `IO` action.
*/
def resource_[R,O](acquire: IO[R])(
use: R => Process[IO,O])(
release: R => IO[Unit]): Process[IO,O] =
resource(acquire)(use)(release andThen (eval_[IO,Unit,O]))
/*
* Create a `Process[IO,O]` from the lines of a file, using
* the `resource` combinator above to ensure the file is closed
* when processing the stream of lines is finished.
*/
def lines(filename: String): Process[IO,String] =
resource
{ IO(io.Source.fromFile(filename)) }
{ src =>
lazy val iter = src.getLines // a stateful iterator
def step = if (iter.hasNext) Some(iter.next) else None
lazy val lines: Process[IO,String] = eval(IO(step)).flatMap {
case None => Halt(End)
case Some(line) => Emit(line, lines)
}
lines
}
{ src => eval_ { IO(src.close) } }
/* Exercise 11: Implement `eval`, `eval_`, and use these to implement `lines`. */
def eval[F[_],A](a: F[A]): Process[F,A] =
await[F,A,A](a) {
case Left(err) => Halt(err)
case Right(a) => Emit(a, Halt(End))
}
/* Evaluate the action purely for its effects. */
def eval_[F[_],A,B](a: F[A]): Process[F,B] =
eval[F,A](a).drain[B]
/* Helper function with better type inference. */
def evalIO[A](a: IO[A]): Process[IO,A] =
eval[IO,A](a)
/*
* We now have nice, resource safe effectful sources, but we don't
* have any way to transform them or filter them. Luckily we can
* still represent the single-input `Process` type we introduced
* earlier, which we'll now call `Process1`.
*/
case class Is[I]() {
sealed trait f[X]
val Get = new f[I] {}
}
def Get[I] = Is[I]().Get
type Process1[I,O] = Process[Is[I]#f, O]
/* Some helper functions to improve type inference. */
def await1[I,O](
recv: I => Process1[I,O],
fallback: => Process1[I,O] = halt1[I,O]): Process1[I, O] =
Await(Get[I], (e: Either[Throwable,I]) => e match {
case Left(End) => fallback
case Left(err) => Halt(err)
case Right(i) => Try(recv(i))
})
def emit1[I,O](h: O, tl: Process1[I,O] = halt1[I,O]): Process1[I,O] =
emit(h, tl)
def halt1[I,O]: Process1[I,O] = Halt[Is[I]#f, O](End)
def lift[I,O](f: I => O): Process1[I,O] =
await1[I,O]((i:I) => emit(f(i))) repeat
def filter[I](f: I => Boolean): Process1[I,I] =
await1[I,I](i => if (f(i)) emit(i) else halt1) repeat
// we can define take, takeWhile, and so on as before
def take[I](n: Int): Process1[I,I] =
if (n <= 0) halt1
else await1[I,I](i => emit(i, take(n-1)))
def takeWhile[I](f: I => Boolean): Process1[I,I] =
await1(i =>
if (f(i)) emit(i, takeWhile(f))
else halt1)
def dropWhile[I](f: I => Boolean): Process1[I,I] =
await1(i =>
if (f(i)) dropWhile(f)
else emit(i,id))
def id[I]: Process1[I,I] =
await1((i: I) => emit(i, id))
def window2[I]: Process1[I,(Option[I],I)] = {
def go(prev: Option[I]): Process1[I,(Option[I],I)] =
await1[I,(Option[I],I)](i => emit(prev -> i) ++ go(Some(i)))
go(None)
}
/** Emits `sep` in between each input received. */
def intersperse[I](sep: I): Process1[I,I] =
await1[I,I](i => emit1(i) ++ id.flatMap(i => emit1(sep) ++ emit1(i)))
/*
We sometimes need to construct a `Process` that will pull values
from multiple input sources. For instance, suppose we want to
'zip' together two files, `f1.txt` and `f2.txt`, combining
corresponding lines in some way. Using the same trick we used for
`Process1`, we can create a two-input `Process` which can request
values from either the 'left' stream or the 'right' stream. We'll
call this a `Tee`, after the letter 'T', which looks like a
little diagram of two inputs being combined into one output.
*/
case class T[I,I2]() {
sealed trait f[X] { def get: Either[I => X, I2 => X] }
val L = new f[I] { def get = Left(identity) }
val R = new f[I2] { def get = Right(identity) }
}
def L[I,I2] = T[I,I2]().L
def R[I,I2] = T[I,I2]().R
type Tee[I,I2,O] = Process[T[I,I2]#f, O]
/* Again some helper functions to improve type inference. */
def haltT[I,I2,O]: Tee[I,I2,O] =
Halt[T[I,I2]#f,O](End)
def awaitL[I,I2,O](recv: I => Tee[I,I2,O],
fallback: => Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
await[T[I,I2]#f,I,O](L) {
case Left(End) => fallback
case Left(err) => Halt(err)
case Right(a) => Try(recv(a))
}
def awaitR[I,I2,O](recv: I2 => Tee[I,I2,O],
fallback: => Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
await[T[I,I2]#f,I2,O](R) {
case Left(End) => fallback
case Left(err) => Halt(err)
case Right(a) => Try(recv(a))
}
def emitT[I,I2,O](h: O, tl: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
emit(h, tl)
def zipWith[I,I2,O](f: (I,I2) => O): Tee[I,I2,O] =
awaitL[I,I2,O](i =>
awaitR (i2 => emitT(f(i,i2)))) repeat
def zip[I,I2]: Tee[I,I2,(I,I2)] = zipWith((_,_))
/* Ignores all input from left. */
def passR[I,I2]: Tee[I,I2,I2] = awaitR(emitT(_, passR))
/* Ignores input from the right. */
def passL[I,I2]: Tee[I,I2,I] = awaitL(emitT(_, passL))
/* Alternate pulling values from the left and the right inputs. */
def interleaveT[I]: Tee[I,I,I] =
awaitL[I,I,I](i =>
awaitR (i2 => emitT(i) ++ emitT(i2))) repeat
/*
Our `Process` type can also represent effectful sinks (like a file).
A `Sink` is simply a source of effectful functions! See the
definition of `to` in `Process` for an example of how to feed a
`Process` to a `Sink`.
*/
type Sink[F[_],O] = Process[F, O => Process[F,Unit]]
import java.io.FileWriter
/* A `Sink` which writes input strings to the given file. */
def fileW(file: String, append: Boolean = false): Sink[IO,String] =
resource[FileWriter, String => Process[IO,Unit]]
{ IO { new FileWriter(file, append) }}
{ w => constant { (s: String) => eval[IO,Unit](IO(w.write(s))) }}
{ w => eval_(IO(w.close)) }
/* The infinite, constant stream. */
def constant[A](a: A): Process[IO,A] =
eval(IO(a)).flatMap { a => Emit(a, constant(a)) }
/* Exercise 12: Implement `join`. Notice this is the standard monadic combinator! */
def join[F[_],A](p: Process[F,Process[F,A]]): Process[F,A] =
p.flatMap(pa => pa)
/*
* An example use of the combinators we have so far: incrementally
* convert the lines of a file from fahrenheit to celsius.
*/
import fpinscala.iomonad.IO0.fahrenheitToCelsius
val converter: Process[IO,Unit] =
lines("fahrenheit.txt").
filter(line => !line.startsWith("#") && !line.trim.isEmpty).
map(line => fahrenheitToCelsius(line.toDouble).toString).
pipe(intersperse("\\n")).
to(fileW("celsius.txt")).
drain
/*
More generally, we can feed a `Process` through an effectful
channel which returns a value other than `Unit`.
*/
type Channel[F[_],I,O] = Process[F, I => Process[F,O]]
/*
* Here is an example, a JDBC query runner which returns the
* stream of rows from the result set of the query. We have
* the channel take a `Connection => PreparedStatement` as
* input, so code that uses this channel does not need to be
* responsible for knowing how to obtain a `Connection`.
*/
import java.sql.{Connection, PreparedStatement, ResultSet}
def query(conn: IO[Connection]):
Channel[IO, Connection => PreparedStatement, Map[String,Any]] =
resource_
{ conn }
{ conn => constant { (q: Connection => PreparedStatement) =>
resource_
{ IO {
val rs = q(conn).executeQuery
val ncols = rs.getMetaData.getColumnCount
val cols = (1 to ncols).map(rs.getMetaData.getColumnName)
(rs, cols)
}}
{ case (rs, cols) =>
def step =
if (!rs.next) None
else Some(cols.map(c => (c, rs.getObject(c): Any)).toMap)
lazy val rows: Process[IO,Map[String,Any]] =
eval(IO(step)).flatMap {
case None => Halt(End)
case Some(row) => Emit(row, rows)
}
rows
}
{ p => IO { p._1.close } } // close the ResultSet
}}
{ c => IO(c.close) }
/*
* We can allocate resources dynamically when defining a `Process`.
* As an example, this program reads a list of filenames to process
* _from another file_, opening each file, processing it and closing
* it promptly.
*/
val convertAll: Process[IO,Unit] = (for {
out <- fileW("celsius.txt").once
file <- lines("fahrenheits.txt")
_ <- lines(file).
map(line => fahrenheitToCelsius(line.toDouble)).
flatMap(celsius => out(celsius.toString))
} yield ()) drain
/*
* Just by switching the order of the `flatMap` calls, we can output
* to multiple files.
*/
val convertMultisink: Process[IO,Unit] = (for {
file <- lines("fahrenheits.txt")
_ <- lines(file).
map(line => fahrenheitToCelsius(line.toDouble)).
map(_ toString).
to(fileW(file + ".celsius"))
} yield ()) drain
/*
* We can attach filters or other transformations at any point in the
* program, for example:
*/
val convertMultisink2: Process[IO,Unit] = (for {
file <- lines("fahrenheits.txt")
_ <- lines(file).
filter(!_.startsWith("#")).
map(line => fahrenheitToCelsius(line.toDouble)).
filter(_ > 0). // ignore below zero temperatures
map(_ toString).
to(fileW(file + ".celsius"))
} yield ()) drain
}
}
object ProcessTest extends App {
import GeneralizedStreamTransducers._
import fpinscala.iomonad.IO
import Process._
val p = eval(IO { println("woot"); 1 }).repeat
val p2 = eval(IO { println("cleanup"); 2 } ).onHalt {
case Kill => println { "cleanup was killed, instead of bring run" }; Halt(Kill)
case e => Halt(e)
}
println { Process.runLog { p2.onComplete(p2).onComplete(p2).take(1).take(1) } }
println { Process.runLog(converter) }
// println { Process.collect(Process.convertAll) }
}
| ud3sh/coursework | functional-programming-in-scala-textbook/answers/src/main/scala/fpinscala/streamingio/StreamingIO.scala | Scala | unlicense | 37,806 |
package mesosphere.marathon
package core.deployment.impl
import akka.Done
import akka.actor._
import akka.event.EventStream
import com.typesafe.scalalogging.StrictLogging
import mesosphere.marathon.core.deployment._
import mesosphere.marathon.core.deployment.impl.DeploymentActor.{ Cancel, Fail, NextStep }
import mesosphere.marathon.core.deployment.impl.DeploymentManagerActor.DeploymentFinished
import mesosphere.marathon.core.event.{ DeploymentStatus, DeploymentStepFailure, DeploymentStepSuccess }
import mesosphere.marathon.core.health.HealthCheckManager
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.pod.PodDefinition
import mesosphere.marathon.core.readiness.ReadinessCheckExecutor
import mesosphere.marathon.core.task.termination.{ KillReason, KillService }
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.state.{ AppDefinition, RunSpec }
import mesosphere.mesos.Constraints
import scala.async.Async._
import scala.concurrent.{ Future, Promise }
import scala.util.control.NonFatal
import scala.util.{ Failure, Success }
private class DeploymentActor(
deploymentManager: ActorRef,
promise: Promise[Done],
killService: KillService,
scheduler: SchedulerActions,
plan: DeploymentPlan,
instanceTracker: InstanceTracker,
launchQueue: LaunchQueue,
healthCheckManager: HealthCheckManager,
eventBus: EventStream,
readinessCheckExecutor: ReadinessCheckExecutor) extends Actor with StrictLogging {
import context.dispatcher
val steps = plan.steps.iterator
var currentStepNr: Int = 0
// Default supervision strategy is overridden here to restart deployment child actors (responsible for individual
// deployment steps e.g. AppStartActor, TaskStartActor etc.) even if any exception occurs (even during initialisation).
// This is due to the fact that child actors tend to gather information during preStart about the tasks that are
// already running from the TaskTracker and LaunchQueue and those calls can timeout. In general deployment child
// actors are built idempotent which should make restarting them possible.
// Additionally a BackOffSupervisor is used to make sure child actor failures are not overloading other parts of the system
// (like LaunchQueue and InstanceTracker) and are not filling the log with exceptions.
import scala.concurrent.duration._
import akka.pattern.{ Backoff, BackoffSupervisor }
def childSupervisor(props: Props, name: String): Props = {
BackoffSupervisor.props(
Backoff.onFailure(
childProps = props,
childName = name,
minBackoff = 5.seconds,
maxBackoff = 1.minute,
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
).withSupervisorStrategy(
OneForOneStrategy() {
case NonFatal(_) => SupervisorStrategy.Restart
case _ => SupervisorStrategy.Escalate
}
))
}
override def preStart(): Unit = {
self ! NextStep
}
override def postStop(): Unit = {
deploymentManager ! DeploymentFinished(plan)
}
def receive: Receive = {
case NextStep if steps.hasNext =>
val step = steps.next()
currentStepNr += 1
logger.debug(s"Process next deployment step: stepNumber=$currentStepNr step=$step planId=${plan.id}")
deploymentManager ! DeploymentStepInfo(plan, step, currentStepNr)
performStep(step) onComplete {
case Success(_) => self ! NextStep
case Failure(t) => self ! Fail(t)
}
case NextStep =>
// no more steps, we're done
logger.debug(s"No more deployment steps to process: planId=${plan.id}")
promise.success(Done)
context.stop(self)
case Cancel(t) =>
promise.failure(t)
context.stop(self)
case Fail(t) =>
logger.debug(s"Deployment failed: planId=${plan.id}", t)
promise.failure(t)
context.stop(self)
}
// scalastyle:off
def performStep(step: DeploymentStep): Future[Unit] = {
logger.debug(s"Perform deployment step: step=$step planId=${plan.id}")
if (step.actions.isEmpty) {
Future.successful(())
} else {
val status = DeploymentStatus(plan, step)
eventBus.publish(status)
val futures = step.actions.map { action =>
action.runSpec match {
case app: AppDefinition => healthCheckManager.addAllFor(app, Seq.empty)
case pod: PodDefinition => //ignore: no marathon based health check for pods
}
action match {
case StartApplication(run, scaleTo) => startRunnable(run, scaleTo, status)
case ScaleApplication(run, scaleTo, toKill) => scaleRunnable(run, scaleTo, toKill, status)
case RestartApplication(run) => restartRunnable(run, status)
case StopApplication(run) => stopRunnable(run.withInstances(0))
}
}
Future.sequence(futures).map(_ => ()) andThen {
case Success(_) =>
logger.debug(s"Deployment step successful: step=$step plandId=${plan.id}")
eventBus.publish(DeploymentStepSuccess(plan, step))
case Failure(e) =>
logger.debug(s"Deployment step failed: step=$step plandId=${plan.id}", e)
eventBus.publish(DeploymentStepFailure(plan, step))
}
}
}
// scalastyle:on
def startRunnable(runnableSpec: RunSpec, scaleTo: Int, status: DeploymentStatus): Future[Unit] = {
val promise = Promise[Unit]()
instanceTracker.specInstances(runnableSpec.id).map { instances =>
context.actorOf(childSupervisor(AppStartActor.props(deploymentManager, status, scheduler, launchQueue, instanceTracker,
eventBus, readinessCheckExecutor, runnableSpec, scaleTo, instances, promise), s"AppStart-${plan.id}"))
}
promise.future
}
@SuppressWarnings(Array("all")) /* async/await */
def scaleRunnable(runnableSpec: RunSpec, scaleTo: Int,
toKill: Option[Seq[Instance]],
status: DeploymentStatus): Future[Done] = {
logger.debug("Scale runnable {}", runnableSpec)
def killToMeetConstraints(notSentencedAndRunning: Seq[Instance], toKillCount: Int) = {
Constraints.selectInstancesToKill(runnableSpec, notSentencedAndRunning, toKillCount)
}
async {
val instances = await(instanceTracker.specInstances(runnableSpec.id))
val runningInstances = instances.filter(_.state.condition.isActive)
val ScalingProposition(tasksToKill, tasksToStart) = ScalingProposition.propose(
runningInstances, toKill, killToMeetConstraints, scaleTo, runnableSpec.killSelection)
def killTasksIfNeeded: Future[Done] = {
logger.debug("Kill tasks if needed")
tasksToKill.fold(Future.successful(Done)) { tasks =>
logger.debug("Kill tasks {}", tasks)
killService.killInstances(tasks, KillReason.DeploymentScaling).map(_ => Done)
}
}
await(killTasksIfNeeded)
def startTasksIfNeeded: Future[Done] = {
tasksToStart.fold(Future.successful(Done)) { tasksToStart =>
logger.debug(s"Start next $tasksToStart tasks")
val promise = Promise[Unit]()
context.actorOf(childSupervisor(TaskStartActor.props(deploymentManager, status, scheduler, launchQueue, instanceTracker, eventBus,
readinessCheckExecutor, runnableSpec, scaleTo, promise), s"TaskStart-${plan.id}"))
promise.future.map(_ => Done)
}
}
await(startTasksIfNeeded)
}
}
@SuppressWarnings(Array("all")) /* async/await */
def stopRunnable(runnableSpec: RunSpec): Future[Done] = async {
val instances = await(instanceTracker.specInstances(runnableSpec.id))
val launchedInstances = instances.filter(_.isLaunched)
// TODO: the launch queue is purged in stopRunnable, but it would make sense to do that before calling kill(tasks)
await(killService.killInstances(launchedInstances, KillReason.DeletingApp))
// Note: This is an asynchronous call. We do NOT wait for the run spec to stop. If we do, the DeploymentActorTest
// fails.
scheduler.stopRunSpec(runnableSpec)
Done
}
def restartRunnable(run: RunSpec, status: DeploymentStatus): Future[Done] = {
if (run.instances == 0) {
Future.successful(Done)
} else {
val promise = Promise[Unit]()
context.actorOf(childSupervisor(TaskReplaceActor.props(deploymentManager, status, killService,
launchQueue, instanceTracker, eventBus, readinessCheckExecutor, run, promise), s"TaskReplace-${plan.id}"))
promise.future.map(_ => Done)
}
}
}
object DeploymentActor {
case object NextStep
case object Finished
case class Cancel(reason: Throwable)
case class Fail(reason: Throwable)
case class DeploymentActionInfo(plan: DeploymentPlan, step: DeploymentStep, action: DeploymentAction)
@SuppressWarnings(Array("MaxParameters"))
def props(
deploymentManager: ActorRef,
promise: Promise[Done],
killService: KillService,
scheduler: SchedulerActions,
plan: DeploymentPlan,
taskTracker: InstanceTracker,
launchQueue: LaunchQueue,
healthCheckManager: HealthCheckManager,
eventBus: EventStream,
readinessCheckExecutor: ReadinessCheckExecutor): Props = {
Props(new DeploymentActor(
deploymentManager,
promise,
killService,
scheduler,
plan,
taskTracker,
launchQueue,
healthCheckManager,
eventBus,
readinessCheckExecutor
))
}
}
| Caerostris/marathon | src/main/scala/mesosphere/marathon/core/deployment/impl/DeploymentActor.scala | Scala | apache-2.0 | 9,504 |
package otherstuff
import akka.actor.ActorSystem
import akka.actor.Actor
import akka.actor.ActorLogging
import java.time.LocalDateTime
import akka.actor.Props
import scala.concurrent.duration._
class TickActor extends Actor with ActorLogging {
def receive: Actor.Receive = {
case "Tick" =>
log.debug(s"Received a tick at ${LocalDateTime.now}")
// log.info(s"Received a tick at ${LocalDateTime.now}")
// log.warning(s"Received a tick at ${LocalDateTime.now}")
// log.error(s"Received a tick at ${LocalDateTime.now}")
}
}
object ActorLoggingTest extends App {
val system = ActorSystem("ActorLoggingTest")
import system.dispatcher
val tickRef = system.actorOf(Props[TickActor])
system.scheduler.schedule(0.seconds, 1.second)(tickRef ! "Tick")
Thread.sleep(30000)
system.terminate
}
| jthompson-hiya/akka-streams-sandbox | src/main/scala/otherstuff/ActorLoggingTest.scala | Scala | mit | 828 |
package coursier.cli.publish.params
import java.nio.file.{Files, Path, Paths}
import cats.data.{Validated, ValidatedNel}
import cats.implicits._
import coursier.publish.dir.Dir
import coursier.cli.publish.options.DirectoryOptions
import coursier.publish.sbt.Sbt
final case class DirectoryParams(
directories: Seq[Path],
sbtDirectories: Seq[Path]
)
object DirectoryParams {
def apply(options: DirectoryOptions, args: Seq[String]): ValidatedNel[String, DirectoryParams] = {
val dirsV = options.dir.traverse { d =>
val dir0 = Paths.get(d)
if (Files.exists(dir0))
if (Files.isDirectory(dir0))
Validated.validNel(dir0)
else
Validated.invalidNel(s"$d not a directory")
else
Validated.invalidNel(s"$d not found")
}
val sbtDirsV = ((if (options.sbt) List(".") else Nil) ::: options.sbtDir).traverse { d =>
val dir0 = Paths.get(d)
if (Files.exists(dir0))
if (Files.isDirectory(dir0)) {
val buildProps = dir0.resolve("project/build.properties")
if (Files.exists(buildProps))
Validated.validNel(dir0)
else
Validated.invalidNel(s"project/build.properties not found under sbt directory $d")
}
else
Validated.invalidNel(s"$d not a directory")
else
Validated.invalidNel(s"$d not found")
}
val extraV = args
.toList
.traverse { a =>
val p = Paths.get(a)
if (Sbt.isSbtProject(p))
Validated.validNel((None, Some(p)))
else if (Dir.isRepository(p))
Validated.validNel((Some(p), None))
else
Validated.invalidNel(s"$a is neither an sbt project or a local repository")
}
(dirsV, sbtDirsV, extraV).mapN {
case (dirs, sbtDirs, extra) =>
DirectoryParams(
dirs ++ extra.flatMap(_._1),
sbtDirs ++ extra.flatMap(_._2)
)
}
}
}
| coursier/coursier | modules/cli/src/main/scala/coursier/cli/publish/params/DirectoryParams.scala | Scala | apache-2.0 | 1,939 |
package piecewise.utils
import java.nio.file._
import piecewise._
/**
* export splines to gnuplot
*/
object Gnuplot {
implicit val spline2Gnuplot: Spline[PieceFunction] => (Option[String], Option[String]) =
(get: Spline[PieceFunction]) => {
val defNote = {
val alphabet = 'a' to 'z'
alphabet.map(ch => ch + "(x)")
}
val changeable =
(get.sources zip defNote).map{t => {
val ((((lower, upper)), func), note) = t
val definition = s"${note}=${func.toString.replace("^", "**").replace(",", ".")}"
val interval = f"[${lower}%.10f:${upper}%.10f]".replace(",", ".")
val plot = interval + " " + note + " ls 1"
(definition, plot)
}
}
if(changeable.isEmpty) (None, None)
else{
val definitions = changeable.map(_._1).reduce(_ + System.lineSeparator() + _)
val plots = "plot sample " + changeable.map(_._2).reduce(_ + ", " + _)
(Some(definitions), Some(plots))
}}
implicit class Changeable[T](val get: T) extends AnyVal{
def apply(implicit fn: T => (Option[String], Option[String])): (Option[String], Option[String]) = {
fn(get)
}
}
abstract class LabelOptions(val whereIs: Char) {
val label: Option[String]
val scale: Option[Double]
val format: Option[String]
private def lab: Option[String] = label.map(l => s"set ${whereIs}label "+"\\""+s"${l}"+"\\"")
private def sc: Option[String] = scale.map(s => s"set ${whereIs}tics scale ${scale.map(a => f"$a%.2f".replace(",","."))}")
private def frmt: Option[String] = format.map(f => s"set ${whereIs}tics format " + "\\"" + f + "\\"")
def all: Option[String] = {
val res = lab ++ sc ++ frmt
if(res.isEmpty) None
else Some(res.reduce(_ + System.lineSeparator() + _))
}
}
case class XLabel(label: Option[String], scale: Option[Double], format: Option[String]) extends LabelOptions('x')
case class YLabel(label: Option[String], scale: Option[Double], format: Option[String]) extends LabelOptions('y')
case class GPBuilder[T](
val source: T,
val output: Path,
val size: (Double, Double),
val xLabel: XLabel = XLabel(None, None, None),
val yLabel: YLabel = YLabel(None, None, None),
val decSep: Option[Char] = None,
val encoding: String = "UTF-8"
)(implicit f: T => (Option[String], Option[String])){
def xlabel(lab: String): GPBuilder[T] = this.copy(xLabel = this.xLabel.copy(label = Some(lab)))
def ylabel(lab: String): GPBuilder[T] = this.copy(yLabel = this.yLabel.copy(label= Some(lab)))
def xylabels(xLab: String, yLab: String) = this.copy(xLabel = this.xLabel.copy(label = Some(xLab)),
yLabel = this.yLabel.copy(label = Some(yLab)))
def decimalsign(ds: Char): GPBuilder[T] = this.copy(decSep = Some(ds))
private def decimalSign: Option[String] = decSep.map{ds => s"set decimalsign \\'${ds}\\'"}
def run = {
val dir = Some(s"cd \\'${output.getParent.toString}\\'")
val out = Some(s"set output "+"\\""+s"${output.getFileName}"+"\\"")
val key = Some("unset key")
val lineStyle = Some("set linestyle 1 lw 0.5 lc -1 pt 1 ps 4")
val (definitions, plots) = new Changeable(source).apply
val terminal = Some(s"set terminal pdfcairo size ${size._1}cm, ${size._2}cm font" +
"\\"" + "Times-New-Roman,12" + "\\"" + " linewidth 1.0 fontscale 0.5")
val exit = Some("unset output")
val plottable = {dir :: decimalSign :: xLabel.all :: yLabel.all :: key ::
lineStyle :: definitions :: terminal :: out :: plots :: exit :: Nil}
.collect{
case Some(value) => value
}
if(Files.notExists(output.getParent)) Files.createDirectories(output.getParent)
val fName = output.getFileName.toString
val gpFileName = fName.take(fName.indexOf('.', fName.size - 5)) + ".gp"
val gp = output.getParent.resolve(gpFileName)
if(Files.notExists(gp)) Files.createFile(gp)
import java.nio.charset._
val writer = Files.newBufferedWriter(
gp, Charset.forName("UTF-8"),
StandardOpenOption.TRUNCATE_EXISTING
)
try{
writer.write(plottable.reduce(_ + System.lineSeparator() + _))
}
finally writer.close()
import scala.sys.process._
Process({"gnuplot " + "\\"" + gpFileName + "\\""} :: Nil, output.getParent.toFile).run()
}
}
def apply[T](data: T, path: Path, size: (Double, Double), encoding: String = "UTF-8")(
implicit f: T => (Option[String], Option[String])): GPBuilder[T] = {
new GPBuilder(data, path, size, encoding = encoding)
}
object Spline{
import java.nio.file._
def apply(spl: Spline[PieceFunction],
xLabel: String, yLabel: String,
path: Path, size: (Double, Double),
encoding: String = "UTF-8"
) = {
val defNote = {
val alphabet = 'a' to 'z'
alphabet.map(ch => ch + "(x)")
}
val changeable =
(spl.sources zip defNote).map{t => {
val ((((lower, upper)), func), note) = t
val definition = s"${note}=${func.toString.replace("^", "**").replace(",", ".")}"
val interval = f"[${lower}%.10f:${upper}%.10f]".replace(",", ".")
val plot = interval + " " + note + " ls 1"
(definition, plot)
}
}
val dir = s"cd \\'${path.getParent.toString}\\'"
val decimalSign = "set decimalsign \\',\\'"
val xLab = s"set xlabel "+"\\""+s"${xLabel}"+"\\"" +
System.lineSeparator() +
"set xtics scale 0.5" +
System.lineSeparator() +
"set xtics format "+"\\""+"%g"+"\\""
val yLab = s"set ylabel "+"\\""+s"${yLabel}"+"\\"" +
System.lineSeparator() +
"set ytics scale 0.5" +
System.lineSeparator() +
"set xtics format " + "\\"" + "%g" + "\\""
val key = "unset key"
val lineStyle = "set linestyle 1 lw 0.5 lc -1 pt 1 ps 4"
val definitions = changeable.map(_._1).reduce(_ + System.lineSeparator() + _)
val terminal = s"set terminal pdfcairo size ${size._1}cm, ${size._2}cm font" +
"\\"" + "Times-New-Roman,12" + "\\"" + " linewidth 1.0 fontscale 0.5"
val output = s"set output "+"\\""+s"${path.getFileName}"+"\\""
val plots = "plot sample " + changeable.map(_._2).reduce(_ + ", " + _)
val exit = "unset output"
val plottable = dir :: decimalSign :: xLab :: yLab :: key ::
lineStyle :: definitions :: terminal :: output :: plots :: exit :: Nil
if(Files.notExists(path.getParent)) Files.createDirectories(path.getParent)
val fName = path.getFileName.toString
val gpFileName = fName.take(fName.indexOf('.', fName.size - 5)) + ".gp"
val gp = path.getParent.resolve(gpFileName)
if(Files.notExists(gp)) Files.createFile(gp)
import java.nio.charset._
val writer = Files.newBufferedWriter(gp, Charset.forName(encoding),
StandardOpenOption.TRUNCATE_EXISTING)
try{
writer.write(plottable.reduce(_ + System.lineSeparator() + _))
}
finally writer.close()
import scala.sys.process._
Process({"gnuplot " + "\\"" + gpFileName + "\\""} :: Nil, path.getParent.toFile).run()
}
}
}
| daniil-timofeev/gridsplines | piecewise/src/main/scala/piecewise/utils/Gnuplot.scala | Scala | apache-2.0 | 7,418 |
//
// License: https://github.com/cadeworks/cade/blob/master/LICENSE
//
package works.cade
case class ImageReference(
registry: String,
vendor: Option[String],
name: String
) {
val reference: String = if (vendor.isDefined) {
s"${vendor.get}/$name"
} else {
s"$name"
}
}
object ImageReference {
def apply(str: String): ImageReference = {
val parts = str.split('/')
parts.length match {
case 1 => ImageReference("registry.hub.docker.com", None, parts(0))
case 2 => ImageReference("registry.hub.docker.com", Some(parts(0)), parts(1))
case _ => ImageReference(parts(0), Some(parts(1)), parts(2))
}
}
}
| webintrinsics/clusterlite | src/main/scala/works/cade/ImageReference.scala | Scala | mit | 709 |
package org.clulab.embeddings
import java.io.PrintWriter
import org.clulab.processors.clu.tokenizer.EnglishLemmatizer
import org.clulab.struct.Counter
import scala.collection.mutable
/**
* Generates embeddings for lemmas, by averaging GloVe embeddings for words that have the same lemma
* The averaging of embedding vectors is weighted by the frequency of the corresponding words in Gigaword
*
* @author Mihai
*/
class LemmatizeEmbeddings(val frequencyFile:String,
val embeddingFile:String) {
val frequencies = loadFreqFile()
val wordEmbeddings = loadEmbeddings()
def loadFreqFile(): Map[String, Double] = {
val f = new mutable.HashMap[String, Double]()
for(line <- io.Source.fromFile(frequencyFile).getLines()) {
val toks = line.split("\\\\s+")
assert(toks.length == 2)
val word = toks(0)
val freq = toks(1).toDouble
f += word -> freq
}
println(s"Loaded frequencies for ${f.keySet.size} words.")
f.toMap
}
def loadEmbeddings(): Map[String, Array[Double]] = {
val e = new mutable.HashMap[String, Array[Double]]()
for(line <- io.Source.fromFile(embeddingFile).getLines()) {
val toks = line.split("\\\\s+")
assert(toks.length > 2)
val word = toks(0)
val vector = new Array[Double](toks.length - 1)
for(i <- 1 until toks.length) {
vector(i - 1) = toks(i).toDouble
}
//println(s"Loaded embedding for ${word}")
e += word -> vector
}
println(s"Loaded embeddings for ${e.keySet.size} words.")
e.toMap
}
def lemmatize(): Map[String, Array[Double]] = {
val lemmatizer = new EnglishLemmatizer
val ne = new mutable.HashMap[String, Array[Double]]()
val totalWeights = new Counter[String]()
var totalUnk = 0
for(word <- wordEmbeddings.keySet) {
val lemma = lemmatizer.lemmatizeWord(word)
val lowerCaseWord = word.toLowerCase()
val vector = wordEmbeddings(word)
// known word
if(frequencies.contains(lowerCaseWord)) { // our counts are for lower case words...
// println(s"[$word] lemmatized to [$lemma]")
val weight = frequencies(lowerCaseWord)
multiply(vector, weight) // in place, but this is Ok: we only see each vector once
totalWeights.incrementCount(lemma, weight)
add(ne, lemma, vector)
}
// unknown word in Gigaword => add vector to the UNK token
else {
totalWeights.incrementCount(SanitizedWordEmbeddingMap.UNK)
add(ne, SanitizedWordEmbeddingMap.UNK, vector)
}
}
// normalize
for(lemma <- ne.keySet) {
val totalWeight = totalWeights.getCount(lemma)
val vector = ne(lemma)
divide(vector, totalWeight)
}
println(s"Processed ${wordEmbeddings.keySet.size} words, and found $totalUnk unknown words.")
ne.toMap
}
def multiply(v:Array[Double], s:Double): Unit = {
for(i <- v.indices) {
v(i) *= s
}
}
def divide(v:Array[Double], s:Double): Unit = {
for(i <- v.indices) {
v(i) /= s
}
}
def add(e: mutable.HashMap[String, Array[Double]], lemma:String, v:Array[Double]) {
if(e.contains(lemma)) {
val ev = e(lemma)
assert(ev.length == v.length)
for(i <- ev.indices) {
ev(i) += v(i)
}
} else {
val nv = new Array[Double](v.length)
for(i <- v.indices) {
nv(i) = v(i)
}
e += lemma -> nv
}
}
}
object LemmatizeEmbeddings {
def main(args: Array[String]): Unit = {
val freqFile = args(0)
val embedFile = args(1)
val outputFile = embedFile + "_lemmas"
val le = new LemmatizeEmbeddings(freqFile, embedFile)
val lemmaEmbeddings = le.lemmatize()
val pw = new PrintWriter(outputFile)
for(lemma <- lemmaEmbeddings.keySet) {
pw.print(lemma)
val v = lemmaEmbeddings(lemma)
for(i <- v.indices) {
pw.print(" " + v(i))
}
pw.println()
}
pw.close()
}
}
| sistanlp/processors | main/src/main/scala/org/clulab/embeddings/LemmatizeEmbeddings.scala | Scala | apache-2.0 | 3,972 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions.WeightedAvgWithRetract
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.utils.Func1
import org.apache.flink.table.api.{Over, Table}
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.{StreamTableTestUtil, TableTestBase}
import org.junit.Test
class OverWindowTest extends TableTestBase {
private val streamUtil: StreamTableTestUtil = streamTestUtil()
val table: Table = streamUtil.addTable[(Int, String, Long)]("MyTable",
'a, 'b, 'c, 'proctime.proctime, 'rowtime.rowtime)
@Test
def testScalarFunctionsOnOverWindow() = {
val weightedAvg = new WeightedAvgWithRetract
val plusOne = Func1
val result = table
.window(Over partitionBy 'b orderBy 'proctime preceding UNBOUNDED_ROW as 'w)
.select(
plusOne('a.sum over 'w as 'wsum) as 'd,
('a.count over 'w).exp(),
(weightedAvg('c, 'a) over 'w) + 1,
"AVG:".toExpr + (weightedAvg('c, 'a) over 'w),
array(weightedAvg('c, 'a) over 'w, 'a.count over 'w))
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
// RexSimplify didn't simplify "CAST(1):BIGINT NOT NULL", see [CALCITE-2862]
term("select", "a", "b", "c", "proctime", "1 AS $4")
),
term("partitionBy", "b"),
term("orderBy", "proctime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "b", "c", "proctime", "$4",
"SUM(a) AS w0$o0",
"COUNT(a) AS w0$o1",
"WeightedAvgWithRetract(c, a) AS w0$o2")
),
term("select",
s"Func1$$(w0$$o0) AS d",
"EXP(CAST(w0$o1)) AS _c1",
"+(w0$o2, $4) AS _c2",
"||('AVG:', CAST(w0$o2)) AS _c3",
"ARRAY(w0$o2, w0$o1) AS _c4")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'b orderBy 'proctime preceding 2.rows following CURRENT_ROW as 'w)
.select('c, weightedAvg('c, 'a) over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "b", "c", "proctime")
),
term("partitionBy", "b"),
term("orderBy", "proctime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "b", "c", "proctime", "WeightedAvgWithRetract(c, a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'a orderBy 'proctime preceding 2.hours following CURRENT_RANGE as 'w)
.select('a, weightedAvg('c, 'a) over 'w as 'myAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("partitionBy", "a"),
term("orderBy", "proctime"),
term("range", "BETWEEN 7200000 PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"proctime",
"WeightedAvgWithRetract(c, a) AS w0$o0"
)
),
term("select", "a", "w0$o0 AS myAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedNonPartitionedRangeOver() = {
val result = table
.window(Over orderBy 'proctime preceding 10.second as 'w)
.select('a, 'c.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("range", "BETWEEN 10000 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime", "COUNT(c) AS w0$o0")
),
term("select", "a", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'proctime preceding 2.rows as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'c orderBy 'proctime preceding UNBOUNDED_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, weightedAvg('c, 'a) over 'w)
val result2 = table
.window(Over partitionBy 'c orderBy 'proctime as 'w)
.select('a, 'c, 'a.count over 'w, weightedAvg('c, 'a) over 'w)
streamUtil.verify2Tables(result, result2)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("partitionBy", "c"),
term("orderBy", "proctime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"proctime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS _c3"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'c orderBy 'proctime preceding UNBOUNDED_ROW following CURRENT_ROW as 'w)
.select('c, 'a.count over 'w, weightedAvg('c, 'a) over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("partitionBy", "c"),
term("orderBy", "proctime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1")
),
term("select", "c", "w0$o0 AS _c1", "w0$o1 AS _c2")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedNonPartitionedRangeOver() = {
val result = table
.window(
Over orderBy 'proctime preceding UNBOUNDED_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, 'a.sum over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"proctime",
"COUNT(a) AS w0$o0",
"SUM(a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS _c3"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'proctime preceding UNBOUNDED_ROW as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'b orderBy 'rowtime preceding 2.rows following CURRENT_ROW as 'w)
.select('c, 'b.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "b", "c", "rowtime")
),
term("partitionBy", "b"),
term("orderBy", "rowtime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "b", "c", "rowtime",
"COUNT(b) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1")
),
term("select", "c", "w0$o0 AS _c1", "w0$o1 AS wAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'a orderBy 'rowtime preceding 2.hours following CURRENT_RANGE as 'w)
.select('a, 'c.avg over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("partitionBy", "a"),
term("orderBy", "rowtime"),
term("range", "BETWEEN 7200000 PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"rowtime",
"AVG(c) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1"
)
),
term("select", "a", "w0$o0 AS _c1", "w0$o1 AS wAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedNonPartitionedRangeOver() = {
val result = table
.window(Over orderBy 'rowtime preceding 10.second as 'w)
.select('a, 'c.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("range", "BETWEEN 10000 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime", "COUNT(c) AS w0$o0")
),
term("select", "a", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'rowtime preceding 2.rows as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding UNBOUNDED_RANGE following
CURRENT_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val result2 = table
.window(Over partitionBy 'c orderBy 'rowtime as 'w)
.select('a, 'c, 'a.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
streamUtil.verify2Tables(result, result2)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("partitionBy", "c"),
term("orderBy", "rowtime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"rowtime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS wAvg"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding UNBOUNDED_ROW following
CURRENT_ROW as 'w)
.select('c, 'a.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("partitionBy", "c"),
term("orderBy", "rowtime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1")
),
term("select", "c", "w0$o0 AS _c1", "w0$o1 AS wAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedNonPartitionedRangeOver() = {
val result = table
.window(
Over orderBy 'rowtime preceding UNBOUNDED_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, 'a.sum over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"rowtime",
"COUNT(a) AS w0$o0",
"SUM(a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS _c3"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'rowtime preceding UNBOUNDED_ROW as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/OverWindowTest.scala | Scala | apache-2.0 | 17,859 |
package org.skycastle.util.grid
import org.skycastle.util.Vec3i
import com.jme3.math.Vector3f
/**
* A 3D position within a grid with some specified gridsize.
*/
case class GridPos(gridSize: GridSize, pos: Vec3i) extends GridBounds(gridSize, pos, pos + Vec3i.ONES) {
/**
* True if this pos is inside the specified cell, or equal to it.
*/
def isInside(other: GridPos): Boolean = {
other.gridSize <= this.gridSize && other.contains(this.center)
}
/**
* Grid in larger grid that contains this grid cell
*/
def parentPos: GridPos = GridPos(gridSize.largerSize, pos / 2)
/**
* Grid in smaller grid that is at bottom left near corner (or whichever corner is closer to origo)
*/
def childPos: GridPos = GridPos(gridSize.smallerSize, pos * 2)
/**
* The grids in the next more detailed grid that are contained in this grid cell.
*/
def childPositions: List[GridPos] = {
val smallerSize: GridSize = gridSize.smallerSize
val basePos: Vec3i = pos * 2
Vec3i.ONE_PERMUTATIONS map {corner => GridPos(smallerSize, basePos + corner)}
}
/**
* The grid pos that this position is in at the specified grid size.
*/
def ancestorPos(rootSize: GridSize): GridPos = {
if (gridSize == rootSize) this
else if (rootSize < gridSize) throw new UnsupportedOperationException("Ancestor pos not supported for smaller sizes than own size.")
else parentPos.ancestorPos(rootSize)
}
/**
* The base position of this grid pos with the specified grid size.
*/
def toGrid(targetSize: GridSize): GridPos = {
if (targetSize < gridSize) childPos.toGrid(targetSize)
else if (targetSize > gridSize) parentPos.toGrid(targetSize)
else this
}
}
| zzorn/skycastle | src/main/scala/org/skycastle/util/grid/GridPos.scala | Scala | gpl-2.0 | 1,716 |
//
// Scaled Scala Compiler - a front-end for Zinc used by Scaled's Scala support
// http://github.com/scaled/scala-compiler/blob/master/LICENSE
package scaled.zinc
import java.io.{File, PrintWriter, StringWriter}
import java.util.{Map => JMap, HashMap}
import scala.collection.mutable.ArrayBuffer
import scaled.prococol.{Receiver, Sender}
import sbt.util.{Logger, Level}
import xsbti.{CompileFailed, Position, Problem, Severity, Reporter}
class Server (sender :Sender) extends Receiver.Listener {
import scala.collection.JavaConverters._
val defScalacVersion = "2.12.4"
val defSbtVersion = "0.13.5"
// TODO: cap these at one or two in memory
val setups = new HashMap[String, Zinc.CompilerSetup]()
private def send (msg :String, args :Map[String, String]) = sender.send(msg, args.asJava)
private def sendLog (msg :String) = send("log", Map("msg" -> msg))
def onMessage (command :String, data :JMap[String,String]) = command match {
case "compile" => compile(data)
case "status" => send("status", Map("text" -> status))
case _ => send("error", Map("cause" -> s"Unknown command: $command"))
}
private def status :String = {
val sw = new StringWriter ; val out = new PrintWriter(sw)
out.println("Zinc daemon status:")
setups.entrySet.asScala foreach { entry =>
val id = entry.getKey ; val setup = entry.getValue
out.println(s"* $id:")
out.println(" Last analysis:")
out.println(s" - ${setup.lastAnalysis}")
out.println(" Last compiled:")
setup.lastCompiledUnits.foreach { path => out.println(s" - $path") }
}
if (setups.size == 0) out.println("No cached compiler setups.")
sw.toString
}
private def compile (data :JMap[String,String]) {
def get[T] (key :String, defval :T, fn :String => T) = data.get(key) match {
case null => defval
case text => fn(text)
}
def untabsep (text :String) = if (text == "") Array[String]() else text.split("\\t")
val cwd = get("cwd", null, new File(_))
val sessionId = get("sessionid", "<none>", s => s)
val target = get("target", null, new File(_))
val output = get("output", null, new File(_))
val classpath = get("classpath", Array[File](), untabsep(_).map(new File(_)))
val javacOpts = get("jcopts", Array[String](), untabsep(_))
val scalacOpts = get("scopts", Array[String](), untabsep(_))
val scalacVersion = get("scvers", defScalacVersion, s => s)
val sbtVersion = get("sbtvers", defSbtVersion, s => s)
val incremental = get("increment", false, _.toBoolean)
val logTrace = get("trace", false, _.toBoolean)
val logger = new Logger {
def trace (t : =>Throwable) :Unit = if (logTrace) sendLog(exToString(t))
def log (level :Level.Value, message : =>String) :Unit =
if (logTrace || level >= Level.Info) sendLog(s"scalac ($level): $message")
def success (message : =>String) :Unit = sendLog(s"scalac: $message")
}
val reporter = new Reporter {
var _problems = ArrayBuffer[Problem]()
def reset () :Unit = _problems.clear()
def hasWarnings = _problems.exists(_.severity == Severity.Warn)
def hasErrors = _problems.exists(_.severity == Severity.Error)
def problems :Array[Problem] = _problems.toArray
def log (problem :Problem) {
sendLog(s"P $problem")
_problems += problem
}
def printSummary () {}
def comment (pos :Position, msg :String) :Unit = sendLog(s"Reporter.comment $pos $msg")
}
val sources = get("sources", Array[File](), _.split("\\t").map(new File(_)))
val sourceFiles = expand(sources, ArrayBuffer[File]()).toArray
try {
def newSetup = {
val newSetup = Zinc.CompilerSetup(logger, reporter, target, scalacVersion)
setups.put(sessionId, newSetup)
newSetup
}
val setup = if (!incremental) newSetup else setups.get(sessionId) match {
case null => newSetup
// TODO: validate config still matches (not likely, but if scalaVersion or target directory
// somehow changed, we'd want to reset)
case setup => setup
}
val options = setup.mkOptions(classpath, sourceFiles, output, scalacOpts, javacOpts)
val result = setup.doCompile(options, reporter)
if (logTrace) sendLog(s"Compile result: $result")
send("compile", Map("result" -> "success"))
} catch {
case f :CompileFailed =>
send("compile", Map("result" -> "failure"))
case e :Exception =>
sendLog(exToString(e))
send("compile", Map("result" -> "failure"))
}
}
private def exToString (ex :Throwable) = {
val sw = new java.io.StringWriter()
ex.printStackTrace(new java.io.PrintWriter(sw))
sw.toString
}
private def expand (sources :Array[File], into :ArrayBuffer[File]) :ArrayBuffer[File] = {
sources foreach { s =>
if (s.isDirectory) expand(s.listFiles, into)
else {
val name = s.getName
if ((name endsWith ".scala") || (name endsWith ".java")) into += s
}
}
into
}
}
| scaled/scala-compiler | src/scala/scaled/zinc/Server.scala | Scala | bsd-3-clause | 5,078 |
package breeze.linalg.support
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
*
* This trait is for multi-dimensional tensors that can logically have one of their
* dimensions "collapsed", e.g. summing out all columns of a matrix to give a column
* vector.
*
* @author dlwh
* @tparam From the tensor being collapsed
* @tparam Axis which axis is being collapsed. Usually a subtype of [[breeze.linalg.Axis.Value]]
* @tparam ColType the type of the "column" (or row or...) being collapsed.
*/
trait CanIterateAxis[From, Axis, ColType] {
def apply[A](from: From, axis: Axis):Iterator[ColType]
}
| claydonkey/breeze | math/src/main/scala/breeze/linalg/support/CanIterateAxis.scala | Scala | apache-2.0 | 1,122 |
/*
* #%L
* Scalaz Instances for Shapeless
* %%
* Copyright (C) 2012 Instancez
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.instancez.shapeless
import scalaz.Monoid
import scalaz.syntax.monoid._
import shapeless.{ HList, Iso }
trait CaseClassInstances {
implicit def caseClassMonoid[C, L <: HList](implicit
iso: Iso[C, L],
ml: Monoid[L]
) = new Monoid[C] {
val zero = iso from ml.zero
def append(a: C, b: => C) = iso from (iso.to(a) |+| iso.to(b))
}
}
| travisbrown/instancez | shapeless/src/main/scala/org/instancez/shapeless/cc.scala | Scala | apache-2.0 | 1,022 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.core.commands
import reactivemongo.bson._
import DefaultBSONHandlers._
import reactivemongo.core.protocol.Response
/** Drop a database. */
class DropDatabase() extends Command[Boolean] {
def makeDocuments =
BSONDocument("dropDatabase" -> BSONInteger(1))
object ResultMaker extends BSONCommandResultMaker[Boolean] {
def apply(doc: BSONDocument) = {
CommandError.checkOk(doc, Some("dropDatabase")).toLeft(true)
}
}
} | qubell/ReactiveMongo | driver/src/main/scala/core/commands/administrative.scala | Scala | apache-2.0 | 1,095 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.core.db.dao
import java.time.Instant
import java.util.UUID
import cats.data.OptionT
import com.tle.core.db._
import com.tle.core.db.tables.OEQEntity
import com.tle.core.db.types.{DbUUID, LocaleStrings, UserId}
import fs2.Stream
import io.circe.Json
import io.doolse.simpledba.jdbc._
import io.doolse.simpledba.{Iso, WriteOp}
import io.doolse.simpledba.syntax._
trait EntityDBExt[A] {
def iso: Iso[OEQEntity, A]
def typeId: String
}
object EntityDB {
val queries = DBSchema.queries.entityQueries
def newEntity[A](uuid: UUID)(implicit ee: EntityDBExt[A]): DB[OEQEntity] =
withContext(
uc =>
OEQEntity(
uuid = DbUUID(uuid),
inst_id = uc.inst,
typeid = ee.typeId,
"",
LocaleStrings.empty,
None,
LocaleStrings.empty,
UserId(uc.user.getUserBean.getUniqueID),
Instant.now(),
Instant.now(),
Json.obj()
)
)
def readAll[A](implicit ee: EntityDBExt[A]): Stream[DB, A] =
dbStream { uc =>
queries.allByType(uc.inst, ee.typeId).map(ee.iso.to)
}
def delete(uuid: UUID): Stream[DB, Unit] = {
dbStream { uc =>
for {
oeq <- queries.byId(uc.inst, DbUUID(uuid))
_ <- queries.write.delete(oeq).flush
} yield ()
}
}
def readOne[A](uuid: UUID)(implicit ee: EntityDBExt[A]): OptionT[DB, A] =
OptionT {
dbStream { uc =>
queries.byId(uc.inst, DbUUID(uuid)).map(ee.iso.to)
}.compile.last
}
def update[A](original: OEQEntity, editedData: A)(
implicit ee: EntityDBExt[A]
): Stream[JDBCIO, WriteOp] = {
queries.write.update(original, ee.iso.from(editedData))
}
def create[A](newEntity: A)(implicit ee: EntityDBExt[A]): Stream[JDBCIO, WriteOp] = {
queries.write.insert(ee.iso.from(newEntity))
}
}
| equella/Equella | Source/Plugins/Core/com.equella.serverbase/scalasrc/com/tle/core/db/dao/EntityDB.scala | Scala | apache-2.0 | 2,657 |
package org.jetbrains.plugins.scala.lang
package parser
package scala3
import com.intellij.lang.Language
import org.jetbrains.plugins.scala.Scala3Language
trait SimpleScala3ParserTestBase extends SimpleScalaParserTestBase {
override protected def language: Language = Scala3Language.INSTANCE
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/parser/scala3/SimpleScala3ParserTestBase.scala | Scala | apache-2.0 | 298 |
/**
* Copyright (C) 2014 TU Berlin (peel@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.peelframework.core.beans.system
import java.nio.file.{Files, Paths}
import com.samskivert.mustache.Mustache
import com.typesafe.config.ConfigFactory
import org.peelframework.core.beans.system.Lifespan.Lifespan
import org.peelframework.core.beans.experiment.Experiment.Run
import org.peelframework.core.config.{Configurable, SystemConfig}
import org.peelframework.core.graph.Node
import org.peelframework.core.util.{Version, shell}
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.BeanNameAware
/** This class represents a System in the Peel framework.
*
* Most nodes in the Peel dependency-graph are systems. A [[System]] can specify it's dependencies which are then set
* up and torn down automatically, according to their [[Lifespan]] values.
*
* @param name The name of this bean. Deafults to the system name (e.g. "Flink")
* @param version Version of the system (e.g. "7.1")
* @param configKey The system configuration resides under `system.\${configKey}`.
* @param lifespan `Lifespan` of the system
* @param dependencies Set of dependencies that this system needs
* @param mc The moustache compiler to compile the templates that are used to generate property files for the system
*/
abstract class System(
val name : String,
val version : String,
val configKey : String,
val lifespan : Lifespan,
val dependencies : Set[System],
val mc : Mustache.Compiler) extends Node with Configurable with BeanNameAware {
import scala.language.postfixOps
final val logger = LoggerFactory.getLogger(this.getClass)
override var config = ConfigFactory.empty()
var isUp = lifespan == Lifespan.PROVIDED
/** The name of this bean. Deafults to the system name. */
var beanName = name
/** Creates a complete system installation with updated configuration and starts the system. */
def setUp(): Unit = {
if (isRunning) {
if (isUp)
logger.info(s"System '$toString' is already up and running")
else
logger.warn(s"System '$toString' appears to be running, but is not marked as 'up'. Shut down manually '$toString' or set its bean lifecycle to PROVIDED.")
} else {
logger.info(s"Starting system '$toString'")
if (!Files.exists(Paths.get(config.getString(s"system.$configKey.path.home")))) {
materializeHome()
}
configuration().update()
start()
logger.info(s"System '$toString' is now up and running")
}
}
/** Cleans up and shuts down the system. */
def tearDown(): Unit = {
if (!isRunning) {
logger.info(s"System '$toString' is already down")
} else {
logger.info(s"Tearing down system '$toString'")
stop()
awaitShutdown()
}
}
/** Restarts the system if the system configuration has changed. */
def update(): Unit = {
val c = configuration()
if (!c.hasChanged) {
logger.info(s"System configuration of '$toString' did not change")
} else {
logger.info(s"System configuration of '$toString' changed, restarting...")
tearDown()
setUp()
}
}
/** Executed before each experiment run that depends on this system. */
def beforeRun(run: Run[System]): Unit = {
}
/** Executed after each experiment run that depends on this system. */
def afterRun(run: Run[System]): Unit = {
}
/** Waits until the system is shut down (blocking). */
private def awaitShutdown(): Unit = {
var maxAttempts = config.getInt("system.default.stop.max.attempts")
val wait = config.getInt("system.default.stop.polling.interval")
while (isRunning) {
// wait a bit
Thread.sleep(wait)
if (maxAttempts <= 0) {
throw new RuntimeException(s"Unable to shut down system '$toString' in time (waited ${config.getInt("system.default.stop.max.attempts") * wait} ms).")
}
maxAttempts = maxAttempts - 1
}
logger.info(s"Shut down system '$toString'.")
}
/** Bean name setter.
*
* @param n The configured bean name
*/
override def setBeanName(n: String) = beanName = n
/** Alias of name.
*
* @return name of the bean
*/
override def toString: String = beanName
// ---------------------------------------------------
// Helper methods.
// ---------------------------------------------------
/** Returns an of the system configuration using the current Config */
protected def configuration(): SystemConfig
/** Starts up the system and polls to check whether everything is up.
*
* @throws SetUpTimeoutException If the system was not brought after {startup.pollingCounter} times {startup.pollingInterval} milliseconds.
*/
protected def start(): Unit
/** Stops the system. */
protected def stop(): Unit
/** Checks whether a process for this system is already running.
*
* This is different from the value of `isUp`, as a system can be running, but not yet up and operational (i.e. if
* not all worker nodes of a distributed have connected).
*
* @return True if a system process for this system exists.
*/
def isRunning: Boolean
/** Materializes the system home from an archive.
*
* Depends on the following system parameters:
*
* * `system.$configKey.path.archive.url` - A URL where the system binary archive can be found online (optional).
* * `system.$configKey.path.archive.md5` - The md5 sum of the system binary archive.
* * `system.$configKey.path.archive.src` - The path where the system binary archive should be stored locally.
* * `system.$configKey.path.archive.dst` - The path where the system binary archive should be extracted.
*/
private def materializeHome() = {
val archiveMD5 = BigInt(config.getString(s"system.$configKey.path.archive.md5"), 16)
val archiveSrc = config.getString(s"system.$configKey.path.archive.src")
val archiveDst = config.getString(s"system.$configKey.path.archive.dst")
if (!Files.exists(Paths.get(archiveSrc))) {
if (config.hasPath(s"system.$configKey.path.archive.url")) {
val archiveUrl = config.getString(s"system.$configKey.path.archive.url")
logger.info(s"Downloading archive '$archiveSrc' from '$archiveUrl' (md5: '$archiveMD5')")
shell.download(archiveUrl, archiveSrc, archiveMD5)
}
else {
throw new RuntimeException(s"Cannot lazy-load archive for system '$configKey'. Please set an 'archive.url' configuration value.")
}
} else {
logger.info(s"Validating archive '$archiveSrc' (md5: '$archiveMD5')")
shell.checkMD5(archiveSrc, archiveMD5)
}
logger.info(s"Extracting archive '$archiveSrc' to '$archiveDst'")
shell.extract(archiveSrc, archiveDst)
logger.info(s"Changing owner of '${config.getString(s"system.$configKey.path.home")}' to ${config.getString(s"system.$configKey.user")}:${config.getString(s"system.$configKey.group")}")
shell ! "chown -R %s:%s %s".format(
config.getString(s"system.$configKey.user"),
config.getString(s"system.$configKey.group"),
config.getString(s"system.$configKey.path.home"))
}
/** Returns the template path closest to the given system and version. */
protected def templatePath(path: String) = {
// find closest version prefix with existing template path
val prefix = Version(version).prefixes.find(prefix => {
Option(this.getClass.getResource(s"/templates/$configKey/$prefix/$path.mustache")).isDefined
})
prefix
.map(p => s"/templates/$configKey/$p/$path.mustache") // template path for closest version prefix
.getOrElse(s"/templates/$configKey/$path.mustache") // base template path
}
} | carabolic/peel | peel-core/src/main/scala/org/peelframework/core/beans/system/System.scala | Scala | apache-2.0 | 8,325 |
package io.expressier
import java.util.regex.Pattern
import scala.reflect.api.Universe
/**
* Pattern parsing functionality.
*/
trait PatternParser {
case class ResultItem[U <: Universe](
name: Option[String],
tpe: U#Type,
converter: U#Tree => U#Tree
)
def parsePattern(u: Universe)(p: Pattern): Option[List[ResultItem[u.type]]]
def stringResult(u: Universe)(name: Option[String]): ResultItem[u.type] =
ResultItem[u.type](name, u.typeOf[String], tree => tree)
def integerResult(u: Universe)(name: Option[String]): ResultItem[u.type] = {
import u._
ResultItem[u.type](name, typeOf[Int], tree => q"$tree.toInt")
}
def characterResult(u: Universe)(name: Option[String]): ResultItem[u.type] = {
import u._
ResultItem[u.type](name, typeOf[Char], tree => q"$tree.head")
}
}
| travisbrown/expressier | core/shared/src/main/scala/io/expressier/PatternParser.scala | Scala | apache-2.0 | 823 |
package org.sisioh.aws4s.s3
import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.{ NoncurrentVersionTransition, Rule, Transition }
import com.amazonaws.services.s3.model._
import com.amazonaws.services.s3.{ AmazonS3Client, AmazonS3EncryptionClient, UploadObjectObserver }
import org.sisioh.aws4s.s3.model._
object Implicits extends Implicits
trait Implicits extends ModelImplicits {
implicit def richAmazonS3Client(underlying: AmazonS3Client): RichAmazonS3Client =
new RichAmazonS3Client(underlying)
implicit def richAmazonS3EncryptionClient(underlying: AmazonS3EncryptionClient): RichAmazonS3EncryptionClient =
new RichAmazonS3EncryptionClient(underlying)
implicit def richUploadObjectObserver(underlying: UploadObjectObserver): RichUploadObjectObserver =
new RichUploadObjectObserver(underlying)
}
trait ModelImplicits {
implicit def richAbortMultipartUploadRequest(
underlying: AbortMultipartUploadRequest
): RichAbortMultipartUploadRequest =
new RichAbortMultipartUploadRequest(underlying)
implicit def richAbstractPutObjectRequest(underlying: AbstractPutObjectRequest): RichAbstractPutObjectRequest =
new RichAbstractPutObjectRequest(underlying)
implicit def richBucket(underlying: Bucket): RichBucket =
new RichBucket(underlying)
implicit def richBucketCrossOriginConfiguration(
underlying: BucketCrossOriginConfiguration
): RichBucketCrossOriginConfiguration =
new RichBucketCrossOriginConfiguration(underlying)
implicit def richBucketLifecycleConfiguration(
underlying: BucketLifecycleConfiguration
): RichBucketLifecycleConfiguration =
new RichBucketLifecycleConfiguration(underlying)
implicit def richBucketLoggingConfiguration(underlying: BucketLoggingConfiguration): RichBucketLoggingConfiguration =
new RichBucketLoggingConfiguration(underlying)
implicit def richBucketNotificationConfiguration(
underlying: BucketNotificationConfiguration
): RichBucketNotificationConfiguration =
new RichBucketNotificationConfiguration(underlying)
implicit def richBucketPolicy(underlying: BucketPolicy): RichBucketPolicy =
new RichBucketPolicy(underlying)
implicit def richBucketTaggingConfiguration(underlying: BucketTaggingConfiguration): RichBucketTaggingConfiguration =
new RichBucketTaggingConfiguration(underlying)
implicit def richBucketVersioningConfiguration(
underlying: BucketVersioningConfiguration
): RichBucketVersioningConfiguration =
new RichBucketVersioningConfiguration(underlying)
implicit def richBucketWebsiteConfiguration(underlying: BucketWebsiteConfiguration): RichBucketWebsiteConfiguration =
new RichBucketWebsiteConfiguration(underlying)
implicit def richCORSRule(underlying: CORSRule): RichCORSRule =
new RichCORSRule(underlying)
implicit def richCanonicalGrantee(underlying: CanonicalGrantee): RichCanonicalGrantee =
new RichCanonicalGrantee(underlying)
implicit def richCompleteMultipartUploadRequest(
underlying: CompleteMultipartUploadRequest
): RichCompleteMultipartUploadRequest =
new RichCompleteMultipartUploadRequest(underlying)
implicit def richCompleteMultipartUploadResult(
underlying: CompleteMultipartUploadResult
): RichCompleteMultipartUploadResult =
new RichCompleteMultipartUploadResult(underlying)
implicit def richCopyObjectRequest(underlying: CopyObjectRequest): RichCopyObjectRequest =
new RichCopyObjectRequest(underlying)
implicit def richCopyObjectResult(underlying: CopyObjectResult): RichCopyObjectResult =
new RichCopyObjectResult(underlying)
implicit def richCopyPartRequest(underlying: CopyPartRequest): RichCopyPartRequest =
new RichCopyPartRequest(underlying)
implicit def richCopyPartResult(underlying: CopyPartResult): RichCopyPartResult =
new RichCopyPartResult(underlying)
implicit def richCreateBucketRequest(underlying: CreateBucketRequest): RichCreateBucketRequest =
new RichCreateBucketRequest(underlying)
implicit def richCryptoConfiguration(underlying: CryptoConfiguration): RichCryptoConfiguration =
new RichCryptoConfiguration(underlying)
implicit def richDeleteBucketPolicyRequest(underlying: DeleteBucketPolicyRequest): RichDeleteBucketPolicyRequest =
new RichDeleteBucketPolicyRequest(underlying)
implicit def richDeleteBucketRequest(underlying: DeleteBucketRequest): RichDeleteBucketRequest =
new RichDeleteBucketRequest(underlying)
implicit def richDeleteObjectRequest(underlying: DeleteObjectRequest): RichDeleteObjectRequest =
new RichDeleteObjectRequest(underlying)
implicit def richDeleteObjectsRequest(underlying: DeleteObjectsRequest): RichDeleteObjectsRequest =
new RichDeleteObjectsRequest(underlying)
implicit def richDeleteObjectsResult(underlying: DeleteObjectsResult): RichDeleteObjectsResult =
new RichDeleteObjectsResult(underlying)
implicit def richDeleteVersionRequest(underlying: DeleteVersionRequest): RichDeleteVersionRequest =
new RichDeleteVersionRequest(underlying)
implicit def richEmailAddressGrantee(underlying: EmailAddressGrantee): RichEmailAddressGrantee =
new RichEmailAddressGrantee(underlying)
implicit def richEncryptedGetObjectRequest(underlying: EncryptedGetObjectRequest): RichEncryptedGetObjectRequest =
new RichEncryptedGetObjectRequest(underlying)
implicit def richEncryptedInitiateMultipartUploadRequest(
underlying: EncryptedInitiateMultipartUploadRequest
): RichEncryptedInitiateMultipartUploadRequest =
new RichEncryptedInitiateMultipartUploadRequest(underlying)
implicit def richEncryptedPutObjectRequest(underlying: EncryptedPutObjectRequest): RichEncryptedPutObjectRequest =
new RichEncryptedPutObjectRequest(underlying)
implicit def richEncryptionMaterials(underlying: EncryptionMaterials): RichEncryptionMaterials =
new RichEncryptionMaterials(underlying)
implicit def richExtraMaterialsDescription(underlying: ExtraMaterialsDescription): RichExtraMaterialsDescription =
new RichExtraMaterialsDescription(underlying)
implicit def richGeneratePresignedUrlRequest(
underlying: GeneratePresignedUrlRequest
): RichGeneratePresignedUrlRequest =
new RichGeneratePresignedUrlRequest(underlying)
implicit def richGenericBucketRequest(underlying: GenericBucketRequest): RichGenericBucketRequest =
new RichGenericBucketRequest(underlying)
implicit def richGetBucketAclRequest(underlying: GetBucketAclRequest): RichGetBucketAclRequest =
new RichGetBucketAclRequest(underlying)
implicit def richGetBucketLocationRequest(underlying: GetBucketLocationRequest): RichGetBucketLocationRequest =
new RichGetBucketLocationRequest(underlying)
implicit def richGetBucketPolicyRequest(underlying: GetBucketPolicyRequest): RichGetBucketPolicyRequest =
new RichGetBucketPolicyRequest(underlying)
implicit def richGetBucketWebsiteConfigurationRequest(
underlying: GetBucketWebsiteConfigurationRequest
): RichGetBucketWebsiteConfigurationRequest =
new RichGetBucketWebsiteConfigurationRequest(underlying)
implicit def richGetObjectMetadataRequest(underlying: GetObjectMetadataRequest): RichGetObjectMetadataRequest =
new RichGetObjectMetadataRequest(underlying)
implicit def richGetObjectRequest(underlying: GetObjectRequest): RichGetObjectRequest =
new RichGetObjectRequest(underlying)
implicit def richGetRequestPaymentConfigurationRequest(
underlying: GetRequestPaymentConfigurationRequest
): RichGetRequestPaymentConfigurationRequest =
new RichGetRequestPaymentConfigurationRequest(underlying)
implicit def richGrant(underlying: Grant): RichGrant =
new RichGrant(underlying)
implicit def richHeadBucketRequest(underlying: HeadBucketRequest): RichHeadBucketRequest =
new RichHeadBucketRequest(underlying)
implicit def richInitiateMultipartUploadRequest(
underlying: InitiateMultipartUploadRequest
): RichInitiateMultipartUploadRequest =
new RichInitiateMultipartUploadRequest(underlying)
implicit def richInitiateMultipartUploadResult(
underlying: InitiateMultipartUploadResult
): RichInitiateMultipartUploadResult =
new RichInitiateMultipartUploadResult(underlying)
implicit def richListMultipartUploadsRequest(
underlying: ListMultipartUploadsRequest
): RichListMultipartUploadsRequest =
new RichListMultipartUploadsRequest(underlying)
implicit def richListObjectsRequest(underlying: ListObjectsRequest): RichListObjectsRequest =
new RichListObjectsRequest(underlying)
implicit def richListPartsRequest(underlying: ListPartsRequest): RichListPartsRequest =
new RichListPartsRequest(underlying)
implicit def richListVersionsRequest(underlying: ListVersionsRequest): RichListVersionsRequest =
new RichListVersionsRequest(underlying)
implicit def richMultiFactorAuthentication(underlying: MultiFactorAuthentication): RichMultiFactorAuthentication =
new RichMultiFactorAuthentication(underlying)
implicit def richMultipartUpload(underlying: MultipartUpload): RichMultipartUpload =
new RichMultipartUpload(underlying)
implicit def richMultipartUploadListing(underlying: MultipartUploadListing): RichMultipartUploadListing =
new RichMultipartUploadListing(underlying)
implicit def richNonCurrentVersionTransition(
underlying: NoncurrentVersionTransition
): RichNonCurrentVersionTransition =
new RichNonCurrentVersionTransition(underlying)
implicit def richNotificationConfiguration(underlying: NotificationConfiguration): RichNotificationConfiguration =
new RichNotificationConfiguration(underlying)
implicit def richObjectListing(underlying: ObjectListing): RichObjectListing =
new RichObjectListing(underlying)
implicit def richObjectMetadata(underlying: ObjectMetadata): RichObjectMetadata =
new RichObjectMetadata(underlying)
implicit def richOwner(underlying: Owner): RichOwner =
new RichOwner(underlying)
implicit def richPartETag(underlying: PartETag): RichPartETag =
new RichPartETag(underlying)
implicit def richPartListing(underlying: PartListing): RichPartListing =
new RichPartListing(underlying)
implicit def richPartSummary(underlying: PartSummary): RichPartSummary =
new RichPartSummary(underlying)
implicit def richPutInstructionFileRequest(underlying: PutInstructionFileRequest): RichPutInstructionFileRequest =
new RichPutInstructionFileRequest(underlying)
implicit def richPutObjectRequest(underlying: PutObjectRequest): RichPutObjectRequest =
new RichPutObjectRequest(underlying)
implicit def richPutObjectResult(underlying: PutObjectResult): RichPutObjectResult =
new RichPutObjectResult(underlying)
implicit def richQueueConfiguration(underlying: QueueConfiguration): RichQueueConfiguration =
new RichQueueConfiguration(underlying)
implicit def richRedirectRule(underlying: RedirectRule): RichRedirectRule =
new RichRedirectRule(underlying)
implicit def richRequestPaymentConfiguration(
underlying: RequestPaymentConfiguration
): RichRequestPaymentConfiguration =
new RichRequestPaymentConfiguration(underlying)
implicit def richResponseHeaderOverrides(underlying: ResponseHeaderOverrides): RichResponseHeaderOverrides =
new RichResponseHeaderOverrides(underlying)
implicit def richRestoreObjectRequest(underlying: RestoreObjectRequest): RichRestoreObjectRequest =
new RichRestoreObjectRequest(underlying)
implicit def richRoutingRule(underlying: RoutingRule): RichRoutingRule =
new RichRoutingRule(underlying)
implicit def richRoutingRuleCondition(underlying: RoutingRuleCondition): RichRoutingRuleCondition =
new RichRoutingRuleCondition(underlying)
implicit def richRule(underlying: Rule): RichRule = new RichRule(underlying)
implicit def richS3Object(underlying: S3Object): RichS3Object =
new RichS3Object(underlying)
implicit def richS3ObjectId(underlying: S3ObjectId): RichS3ObjectId =
new RichS3ObjectId(underlying)
implicit def richS3ObjectIdBuilder(underlying: S3ObjectIdBuilder): RichS3ObjectIdBuilder =
new RichS3ObjectIdBuilder(underlying)
implicit def richS3ObjectInputStream(underlying: S3ObjectInputStream): RichS3ObjectInputStream =
new RichS3ObjectInputStream(underlying)
implicit def richS3ObjectSummary(underlying: S3ObjectSummary): RichS3ObjectSummary =
new RichS3ObjectSummary(underlying)
implicit def richS3VersionSummary(underlying: S3VersionSummary): RichS3VersionSummary =
new RichS3VersionSummary(underlying)
implicit def richSSEAwsKeyManagementParams(underlying: SSEAwsKeyManagementParams): RichSSEAwsKeyManagementParams =
new RichSSEAwsKeyManagementParams(underlying)
implicit def richSSECustomerKey(underlying: SSECustomerKey): RichSSECustomerKey =
new RichSSECustomerKey(underlying)
implicit def richSetBucketAclRequest(underlying: SetBucketAclRequest): RichSetBucketAclRequest =
new RichSetBucketAclRequest(underlying)
implicit def richSetBucketCrossOriginConfigurationRequest(
underlying: SetBucketCrossOriginConfigurationRequest
): RichSetBucketCrossOriginConfigurationRequest =
new RichSetBucketCrossOriginConfigurationRequest(underlying)
implicit def richSetBucketLifecycleConfigurationRequest(
underlying: SetBucketLifecycleConfigurationRequest
): RichSetBucketLifecycleConfigurationRequest =
new RichSetBucketLifecycleConfigurationRequest(underlying)
implicit def richSetBucketLoggingConfigurationRequest(
underlying: SetBucketLoggingConfigurationRequest
): RichSetBucketLoggingConfigurationRequest =
new RichSetBucketLoggingConfigurationRequest(underlying)
implicit def richSetBucketNotificationConfigurationRequest(
underlying: SetBucketNotificationConfigurationRequest
): RichSetBucketNotificationConfigurationRequest =
new RichSetBucketNotificationConfigurationRequest(underlying)
implicit def richSetBucketPolicyRequest(underlying: SetBucketPolicyRequest): RichSetBucketPolicyRequest =
new RichSetBucketPolicyRequest(underlying)
implicit def richSetBucketTaggingConfigurationRequest(
underlying: SetBucketTaggingConfigurationRequest
): RichSetBucketTaggingConfigurationRequest =
new RichSetBucketTaggingConfigurationRequest(underlying)
implicit def richSetBucketVersioningConfigurationRequest(
underlying: SetBucketVersioningConfigurationRequest
): RichSetBucketVersioningConfigurationRequest =
new RichSetBucketVersioningConfigurationRequest(underlying)
implicit def richSetBucketWebsiteConfigurationRequest(
underlying: SetBucketWebsiteConfigurationRequest
): RichSetBucketWebsiteConfigurationRequest =
new RichSetBucketWebsiteConfigurationRequest(underlying)
implicit def richSetObjectAclRequest(underlying: SetObjectAclRequest): RichSetObjectAclRequest =
new RichSetObjectAclRequest(underlying)
implicit def richSetRequestPaymentConfigurationRequest(
underlying: SetRequestPaymentConfigurationRequest
): RichSetRequestPaymentConfigurationRequest =
new RichSetRequestPaymentConfigurationRequest(underlying)
implicit def richSimpleMaterialProvider(underlying: SimpleMaterialProvider): RichSimpleMaterialProvider =
new RichSimpleMaterialProvider(underlying)
implicit def richStaticEncryptionMaterialsProvider(
underlying: StaticEncryptionMaterialsProvider
): RichStaticEncryptionMaterialsProvider =
new RichStaticEncryptionMaterialsProvider(underlying)
implicit def richTagSet(underlying: TagSet): RichTagSet =
new RichTagSet(underlying)
implicit def richTopicConfiguration(underlying: TopicConfiguration): RichTopicConfiguration =
new RichTopicConfiguration(underlying)
implicit def richTransition(underlying: Transition): RichTransition =
new RichTransition(underlying)
implicit def richUploadObjectRequest(underlying: UploadObjectRequest): RichUploadObjectRequest =
new RichUploadObjectRequest(underlying)
implicit def richUploadPartRequest(underlying: UploadPartRequest): RichUploadPartRequest =
new RichUploadPartRequest(underlying)
implicit def richUploadPartResult(underlying: UploadPartResult): RichUploadPartResult =
new RichUploadPartResult(underlying)
implicit def richVersionListing(underlying: VersionListing): RichVersionListing =
new RichVersionListing(underlying)
implicit def richWebsiteConfiguration(underlying: WebsiteConfiguration): RichWebsiteConfiguration =
new RichWebsiteConfiguration(underlying)
}
| sisioh/aws4s | aws4s-s3/src/main/scala/org/sisioh/aws4s/s3/Implicits.scala | Scala | mit | 16,529 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.functions.python.{PythonEnv, PythonFunction}
import org.apache.flink.table.planner.expressions.utils.{Func1, RichFunc1}
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
/**
* Test for [[org.apache.flink.table.planner.codegen.ExpressionReducer]].
*/
class ExpressionReductionRulesTest extends TableTestBase {
private val util = batchTestUtil()
util.addTableSource[(Int, Long, Int)]("MyTable", 'a, 'b, 'c)
@Test
def testExpressionReductionWithUDF(): Unit = {
util.addFunction("MyUdf", Func1)
util.verifyPlan("SELECT MyUdf(1) FROM MyTable")
}
@Test
def testExpressionReductionWithRichUDF(): Unit = {
util.addFunction("MyUdf", new RichFunc1)
util.getTableEnv.getConfig.addJobParameter("int.value", "10")
util.verifyPlan("SELECT myUdf(1) FROM MyTable")
}
@Test
def testExpressionReductionWithRichUDFAndInvalidOpen(): Unit = {
util.addFunction("MyUdf", new RichFunc1)
// FunctionContext.getCachedFile will fail during expression reduction
// it will be executed during runtime though
util.getTableEnv.getConfig.addJobParameter("fail-for-cached-file", "true")
util.verifyPlan("SELECT myUdf(1 + 1) FROM MyTable")
}
@Test
def testExpressionReductionWithPythonUDF(): Unit = {
util.addFunction("PyUdf", DeterministicPythonFunc)
util.addFunction("MyUdf", Func1)
util.verifyPlan("SELECT PyUdf(), MyUdf(1) FROM MyTable")
}
}
@SerialVersionUID(1L)
object DeterministicPythonFunc extends ScalarFunction with PythonFunction {
def eval(): Long = 1
override def getSerializedPythonFunction: Array[Byte] = null
override def getPythonEnv: PythonEnv = null
}
| darionyaphet/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/ExpressionReductionRulesTest.scala | Scala | apache-2.0 | 2,692 |
package scodec
package codecs
import scodec.bits._
import shapeless._
class HListCodecTest extends CodecSuite {
case class Foo(x: Int, y: Int, s: String)
case class Bar(x: Int)
case class Baz(a: Int, b: Int, c: Int, d: Int)
case class Flags(x: Boolean, y: Boolean, z: Boolean)
"HList codec support" should {
"support construction via :: operator" in {
roundtripAll((uint8 :: uint8 :: utf8), Seq(1 :: 2 :: "test" :: HNil))
}
"support conversion HList codec to a case class codec via as method" in {
roundtripAll((uint8 :: uint8 :: utf8).as[Foo], Seq(Foo(1, 2, "test")))
}
"support conversion of non-HList codec to a case class codec via as method" in {
roundtripAll(uint8.as[Bar], Seq(Bar(0), Bar(1), Bar(255)))
}
"support converting an hlist of codecs" in {
val a: Codec[Int :: Long :: Boolean :: HNil] = (uint8 :: int64 :: bool :: HNil).toCodec
}
"provide a flatPrepend method" in {
uint8 flatPrepend { n => bits(n.toLong).hlist }
()
}
"provide a flatZipHList method" in {
uint8 flatZipHList { n => bits(n.toLong) }
()
}
"provide ability to append via :+ operator" in {
roundtrip(((uint8 :: uint8) :+ utf8).as[Foo], Foo(1, 2, "test"))
}
"provide ability to concatenate two HList codecs" in {
roundtrip(((uint8 :: uint8) ::: (uint8 :: uint8)).as[Baz], Baz(1, 2, 3, 4))
}
"support HList equivalent of Codec#dropLeft" in {
val codec = (uint8.unit(0) :~>: uint8.hlist).as[Bar]
roundtrip(codec, Bar(1))
codec.encode(Bar(1)).require should have size(16)
}
"support HList equivalent of Codec#dropLeft on a non-HList codec" in {
uint8.unit(0) :~>: uint8
()
}
"support dropping all Unit values out of an HList codec" in {
def ign(size: Int) = scodec.codecs.ignore(size.toLong)
val codec = (uint8 :: ign(8) :: uint8 :: ign(8) :: utf8).dropUnits.as[Foo]
roundtrip(codec, Foo(1, 2, "test"))
}
"support removing an element of an HList codec by type" in {
val flagsCodec = (bool :: bool :: bool :: ignore(5)).as[Flags]
val valuesWithFlags = flagsCodec flatPrepend { flgs =>
conditional(flgs.x, uint8) ::
conditional(flgs.y, uint8) ::
conditional(flgs.z, uint8)
}
val values = valuesWithFlags.derive[Flags].from { case x :: y :: z :: HNil => Flags(x.isDefined, y.isDefined, z.isDefined) }
values.encode(None :: None :: None :: HNil) shouldBe Attempt.successful(bin"00000000")
values.encode(Some(1) :: Some(2) :: Some(3) :: HNil) shouldBe Attempt.successful(bin"11100000 00000001 00000010 00000011")
values.encode(Some(1) :: None :: Some(3) :: HNil) shouldBe Attempt.successful(bin"10100000 00000001 00000011")
roundtrip(values, Some(1) :: Some(2) :: None :: HNil)
}
"support alternative to flatPrepend+derive pattern that avoids intermediate codec shape" in {
val flagsCodec = (bool :: bool :: bool :: ignore(5)).as[Flags]
val values = flagsCodec.consume {
flgs => conditional(flgs.x, uint8) :: conditional(flgs.y, uint8) :: conditional(flgs.z, uint8)
} {
case x :: y :: z :: HNil => Flags(x.isDefined, y.isDefined, z.isDefined)
}
values.encode(None :: None :: None :: HNil) shouldBe Attempt.successful(bin"00000000")
values.encode(Some(1) :: Some(2) :: Some(3) :: HNil) shouldBe Attempt.successful(bin"11100000 00000001 00000010 00000011")
values.encode(Some(1) :: None :: Some(3) :: HNil) shouldBe Attempt.successful(bin"10100000 00000001 00000011")
roundtrip(values, Some(1) :: Some(2) :: None :: HNil)
}
"support mapping a pair of polymorphic functions over an HList codec" in {
object double extends Poly1 {
implicit val i = at[Int] { _ * 2 }
implicit val l = at[Long] { _ * 2 }
implicit val b = at[Boolean] { b => b }
}
object half extends Poly1 {
implicit val i = at[Int] { _ / 2 }
implicit val l = at[Long] { _ / 2 }
implicit val b = at[Boolean] { b => b }
}
val codec = (uint8 :: uint32 :: bool(8) :: uint8).polyxmap(half, double)
val value = 1 :: 2L :: true :: 3 :: HNil
val bits = codec.encode(value).require
bits shouldBe hex"0200000004ff06".bits
val decoded = codec.compact.decode(bits).require.value
decoded shouldBe value
}
"support mapping a pair of polymorphic functions over a non-HList codec" in {
object double extends Poly1 {
implicit val i = at[Int] { _ * 2 }
implicit val l = at[Long] { _ * 2 }
implicit val b = at[Boolean] { b => b }
}
object half extends Poly1 {
implicit val i = at[Int] { _ / 2 }
implicit val l = at[Long] { _ / 2 }
implicit val b = at[Boolean] { b => b }
}
val codec = uint8.polyxmap(half, double)
val value = 1
val bits = codec.encode(value).require
bits shouldBe hex"02".bits
val decoded = codec.compact.decode(bits).require.value
decoded shouldBe value
}
"support mapping a single polymorphic function over an HList codec" in {
object negate extends Poly1 {
implicit val i = at[Int] { i => -i }
implicit val l = at[Long] { i => -i }
implicit val b = at[Boolean] { b => b }
}
val codec = (uint8 :: uint32 :: bool(8) :: uint8).polyxmap1(negate)
val value = -1 :: -2L :: true :: -3 :: HNil
val bits = codec.encode(value).require
bits shouldBe hex"0100000002ff03".bits
val decoded = codec.compact.decode(bits).require.value
decoded shouldBe value
}
"support mapping a single polymorphic function over a non-HList codec" in {
object i2d extends Poly1 {
implicit val i = at[Int] { _.toDouble }
implicit val d = at[Double] { _.toInt }
}
val codec: Codec[Double] = uint8 polyxmap1 i2d
val value = 1.0d
val bits = codec.encode(value).require
bits shouldBe hex"01".bits
val decoded = codec.compact.decode(bits).require.value
decoded shouldBe value
}
}
}
| alissapajer/scodec | shared/src/test/scala/scodec/codecs/HListCodecTest.scala | Scala | bsd-3-clause | 6,156 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.conf.HiveConf
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.{EmptyRow, Expression, In, InSet}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
// TODO: Refactor this to `HivePartitionFilteringSuite`
class HiveClientSuite(version: String)
extends HiveVersionSuite(version) with BeforeAndAfterAll {
import CatalystSqlParser._
private val tryDirectSqlKey = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL.varname
private val testPartitionCount = 3 * 24 * 4
private def init(tryDirectSql: Boolean): HiveClient = {
val storageFormat = CatalogStorageFormat(
locationUri = None,
inputFormat = None,
outputFormat = None,
serde = None,
compressed = false,
properties = Map.empty)
val hadoopConf = new Configuration()
hadoopConf.setBoolean(tryDirectSqlKey, tryDirectSql)
val client = buildClient(hadoopConf)
client
.runSqlHive("CREATE TABLE test (value INT) PARTITIONED BY (ds INT, h INT, chunk STRING)")
val partitions =
for {
ds <- 20170101 to 20170103
h <- 0 to 23
chunk <- Seq("aa", "ab", "ba", "bb")
} yield CatalogTablePartition(Map(
"ds" -> ds.toString,
"h" -> h.toString,
"chunk" -> chunk
), storageFormat)
assert(partitions.size == testPartitionCount)
client.createPartitions(
"default", "test", partitions, ignoreIfExists = false)
client
}
override def beforeAll() {
super.beforeAll()
client = init(true)
}
test(s"getPartitionsByFilter returns all partitions when $tryDirectSqlKey=false") {
val client = init(false)
val filteredPartitions = client.getPartitionsByFilter(client.getTable("default", "test"),
Seq(parseExpression("ds=20170101")))
assert(filteredPartitions.size == testPartitionCount)
}
test("getPartitionsByFilter: ds<=>20170101") {
// Should return all partitions where <=> is not supported
testMetastorePartitionFiltering(
"ds<=>20170101",
20170101 to 20170103,
0 to 23,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: ds=20170101") {
testMetastorePartitionFiltering(
"ds=20170101",
20170101 to 20170101,
0 to 23,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: ds=(20170101 + 1) and h=0") {
// Should return all partitions where h=0 because getPartitionsByFilter does not support
// comparisons to non-literal values
testMetastorePartitionFiltering(
"ds=(20170101 + 1) and h=0",
20170101 to 20170103,
0 to 0,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: chunk='aa'") {
testMetastorePartitionFiltering(
"chunk='aa'",
20170101 to 20170103,
0 to 23,
"aa" :: Nil)
}
test("getPartitionsByFilter: 20170101=ds") {
testMetastorePartitionFiltering(
"20170101=ds",
20170101 to 20170101,
0 to 23,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: ds=20170101 and h=10") {
testMetastorePartitionFiltering(
"ds=20170101 and h=10",
20170101 to 20170101,
10 to 10,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: ds=20170101 or ds=20170102") {
testMetastorePartitionFiltering(
"ds=20170101 or ds=20170102",
20170101 to 20170102,
0 to 23,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: ds in (20170102, 20170103) (using IN expression)") {
testMetastorePartitionFiltering(
"ds in (20170102, 20170103)",
20170102 to 20170103,
0 to 23,
"aa" :: "ab" :: "ba" :: "bb" :: Nil)
}
test("getPartitionsByFilter: ds in (20170102, 20170103) (using INSET expression)") {
testMetastorePartitionFiltering(
"ds in (20170102, 20170103)",
20170102 to 20170103,
0 to 23,
"aa" :: "ab" :: "ba" :: "bb" :: Nil, {
case expr @ In(v, list) if expr.inSetConvertible =>
InSet(v, list.map(_.eval(EmptyRow)).toSet)
})
}
test("getPartitionsByFilter: chunk in ('ab', 'ba') (using IN expression)") {
testMetastorePartitionFiltering(
"chunk in ('ab', 'ba')",
20170101 to 20170103,
0 to 23,
"ab" :: "ba" :: Nil)
}
test("getPartitionsByFilter: chunk in ('ab', 'ba') (using INSET expression)") {
testMetastorePartitionFiltering(
"chunk in ('ab', 'ba')",
20170101 to 20170103,
0 to 23,
"ab" :: "ba" :: Nil, {
case expr @ In(v, list) if expr.inSetConvertible =>
InSet(v, list.map(_.eval(EmptyRow)).toSet)
})
}
test("getPartitionsByFilter: (ds=20170101 and h>=8) or (ds=20170102 and h<8)") {
val day1 = (20170101 to 20170101, 8 to 23, Seq("aa", "ab", "ba", "bb"))
val day2 = (20170102 to 20170102, 0 to 7, Seq("aa", "ab", "ba", "bb"))
testMetastorePartitionFiltering(
"(ds=20170101 and h>=8) or (ds=20170102 and h<8)",
day1 :: day2 :: Nil)
}
test("getPartitionsByFilter: (ds=20170101 and h>=8) or (ds=20170102 and h<(7+1))") {
val day1 = (20170101 to 20170101, 8 to 23, Seq("aa", "ab", "ba", "bb"))
// Day 2 should include all hours because we can't build a filter for h<(7+1)
val day2 = (20170102 to 20170102, 0 to 23, Seq("aa", "ab", "ba", "bb"))
testMetastorePartitionFiltering(
"(ds=20170101 and h>=8) or (ds=20170102 and h<(7+1))",
day1 :: day2 :: Nil)
}
test("getPartitionsByFilter: " +
"chunk in ('ab', 'ba') and ((ds=20170101 and h>=8) or (ds=20170102 and h<8))") {
val day1 = (20170101 to 20170101, 8 to 23, Seq("ab", "ba"))
val day2 = (20170102 to 20170102, 0 to 7, Seq("ab", "ba"))
testMetastorePartitionFiltering(
"chunk in ('ab', 'ba') and ((ds=20170101 and h>=8) or (ds=20170102 and h<8))",
day1 :: day2 :: Nil)
}
test("create client with sharesHadoopClasses = false") {
buildClient(new Configuration(), sharesHadoopClasses = false)
}
private def testMetastorePartitionFiltering(
filterString: String,
expectedDs: Seq[Int],
expectedH: Seq[Int],
expectedChunks: Seq[String]): Unit = {
testMetastorePartitionFiltering(
filterString,
(expectedDs, expectedH, expectedChunks) :: Nil,
identity)
}
private def testMetastorePartitionFiltering(
filterString: String,
expectedDs: Seq[Int],
expectedH: Seq[Int],
expectedChunks: Seq[String],
transform: Expression => Expression): Unit = {
testMetastorePartitionFiltering(
filterString,
(expectedDs, expectedH, expectedChunks) :: Nil,
identity)
}
private def testMetastorePartitionFiltering(
filterString: String,
expectedPartitionCubes: Seq[(Seq[Int], Seq[Int], Seq[String])]): Unit = {
testMetastorePartitionFiltering(filterString, expectedPartitionCubes, identity)
}
private def testMetastorePartitionFiltering(
filterString: String,
expectedPartitionCubes: Seq[(Seq[Int], Seq[Int], Seq[String])],
transform: Expression => Expression): Unit = {
val filteredPartitions = client.getPartitionsByFilter(client.getTable("default", "test"),
Seq(
transform(parseExpression(filterString))
))
val expectedPartitionCount = expectedPartitionCubes.map {
case (expectedDs, expectedH, expectedChunks) =>
expectedDs.size * expectedH.size * expectedChunks.size
}.sum
val expectedPartitions = expectedPartitionCubes.map {
case (expectedDs, expectedH, expectedChunks) =>
for {
ds <- expectedDs
h <- expectedH
chunk <- expectedChunks
} yield Set(
"ds" -> ds.toString,
"h" -> h.toString,
"chunk" -> chunk
)
}.reduce(_ ++ _)
val actualFilteredPartitionCount = filteredPartitions.size
assert(actualFilteredPartitionCount == expectedPartitionCount,
s"Expected $expectedPartitionCount partitions but got $actualFilteredPartitionCount")
assert(filteredPartitions.map(_.spec.toSet).toSet == expectedPartitions.toSet)
}
}
| brad-kaiser/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala | Scala | apache-2.0 | 9,102 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.compiler
import org.junit.Test
import org.junit.Assert._
/** Tests the compiler re-patching of native longs to
* scala.scalajs.runtime.Long
* see org.scalajs.testsuite.jsinterop.RuntimeLongTest
* for a test of the implementation itself
*/
class LongTest {
import LongTest._
@Test def `should_correctly_handle_literals`(): Unit = {
assertEquals(105L, 5L + 100L)
assertEquals(2147483651L, 2147483649L + 2L)
assertEquals(-8589934592L, -2147483648L * 4)
assertEquals(-18014398509482040L, 4503599627370510L * (-4))
}
@Test def `should_correctly_dispatch_unary_ops_on_Longs`(): Unit = {
val x = 10L
assertEquals(-10L, -x)
val y = 5L
assertEquals(-5L, -y)
assertEquals(5L, +y)
assertEquals(-6L, ~y)
}
@Test def `should_correctly_dispatch_binary_ops_on_Longs`(): Unit = {
assertEquals(25F, 5L * 5F, 0F)
assertEquals(1F, 5L % 4F, 0F)
assertEquals(20F, 5F * 4L, 0F)
}
@Test def `should_support_shifts_with_Longs_#622`(): Unit = {
def l(x: Long): Long = x
def i(x: Int): Int = x
assertEquals(268435455L, l(-7L) >>> 100L)
assertEquals(-1L, l(-7L) >> 100L)
assertEquals(-1L, l(-7L) >> 100)
assertEquals(268435455L, l(-7L) >>> 100)
assertEquals(-481036337152L, l(-7L) << 100L)
assertEquals(-481036337152L, l(-7L) << 100)
assertEquals(481036337152L, l(7L) << 100L)
assertEquals(549755813888L, l(8L) << 100L)
assertEquals(1152921504606846975L, l(-7L) >>> 4)
assertEquals(112, i(7) << 100)
assertEquals(-1, i(-7) >> 100)
assertEquals(268435455, i(-7) >>> 100)
assertEquals(-5, i(-65) >> 100)
assertEquals(-5, i(-65) >> 4)
}
@Test def `primitives_should_convert_to_Long`(): Unit = {
// Byte
assertEquals(112L, 112.toByte.toLong)
// Short
assertEquals(-10L, (-10).toShort.toLong)
// Char
assertEquals(65L, 'A'.toLong)
// Int
assertEquals(5L, 5.toLong)
// Long
assertEquals(10L, 10L.toLong)
// Float
assertEquals(100000L, 100000.6f.toLong)
// Double
assertEquals(100000L, 100000.6.toLong)
}
@Test def `should_support_hashCode`(): Unit = {
assertEquals(0, 0L.hashCode())
assertEquals(55, 55L.hashCode())
assertEquals(11, (-12L).hashCode())
assertEquals(10006548, 10006548L.hashCode())
assertEquals(1098747, (-1098748L).hashCode())
assertEquals(-825638905, 613354684553L.hashCode())
assertEquals(1910653900, 9863155567412L.hashCode())
assertEquals(1735398658, 3632147899696541255L.hashCode())
assertEquals(-1689438124, 7632147899696541255L.hashCode())
}
@Test def `should_support_hash_hash`(): Unit = {
assertEquals(0, 0L.##)
assertEquals(55, 55L.##)
assertEquals(-12, (-12L).##)
assertEquals(10006548, 10006548L.##)
assertEquals(-1098748, (-1098748L).##)
assertEquals(1910653900, 9863155567412L.##)
assertEquals(1735398658, 3632147899696541255L.##)
// These two (correctly) give different results on 2.10 and 2.11
//assertEquals(-825638905, 613354684553L.##) // xx06 on 2.10
//assertEquals(-1689438124, 7632147899696541255L.##) // xx25 on 2.10
}
@Test def `should_have_correct_hash_in_case_classes`(): Unit = {
assertEquals(-1669410282, HashTestBox(0L).##)
assertEquals(-1561146018, HashTestBox(55L).##)
assertEquals(-1266055417, HashTestBox(-12L).##)
assertEquals(-1383472436, HashTestBox(10006548L).##)
assertEquals(1748124846, HashTestBox(-1098748L).##)
assertEquals(1291324266, HashTestBox(9863155567412L).##)
assertEquals(-450677189, HashTestBox(3632147899696541255L).##)
assertEquals(259268522, HashTestBox(1461126709984L).##)
assertEquals(818387364, HashTestBox(1L).##)
}
@Test def `should_correctly_concat_to_string`(): Unit = {
val x = 20L
assertEquals("asdf520hello", "asdf" + 5L + x + "hello")
assertEquals("20hello", x + "hello")
}
@Test def `string_should_convert_to_Long`(): Unit = {
assertEquals(45678901234567890L, "45678901234567890".toLong)
}
@Test def `should_correctly_implement_is/asInstanceOf_Longs`(): Unit = {
val dyn: Any = 5L
val stat: Long = 5L
assertEquals(5L, stat.asInstanceOf[Long])
// models current scala behavior. See SI-1448
assertEquals(5, stat.asInstanceOf[Int])
assertTrue(stat.isInstanceOf[Long])
assertFalse(stat.isInstanceOf[Int])
assertEquals(5L, dyn.asInstanceOf[Long])
assertTrue(dyn.isInstanceOf[Long])
assertFalse(dyn.isInstanceOf[Int])
}
@Test def `should_correctly_compare_to_other_numeric_types`(): Unit = {
assertTrue(5L == 5)
assertTrue(5 == 5L)
assertTrue(4 != 5L)
assertTrue('A' == 65L)
}
}
object LongTest {
case class HashTestBox(long: Long)
}
| japgolly/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/LongTest.scala | Scala | bsd-3-clause | 5,279 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.Properties
import kafka.api.ApiVersion
import kafka.cluster.EndPoint
import kafka.consumer.ConsumerConfig
import kafka.message.{BrokerCompressionCodec, CompressionCodec, Message, MessageSet}
import kafka.utils.CoreUtils
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.common.config.SSLConfigs
import org.apache.kafka.common.config.ConfigDef.Importance._
import org.apache.kafka.common.config.ConfigDef.Range._
import org.apache.kafka.common.config.ConfigDef.Type._
import org.apache.kafka.common.config.{ConfigException, AbstractConfig, ConfigDef}
import org.apache.kafka.common.metrics.MetricsReporter
import org.apache.kafka.common.protocol.SecurityProtocol
import org.apache.kafka.common.security.auth.PrincipalBuilder
import scala.collection.{mutable, immutable, JavaConversions, Map}
object Defaults {
/** ********* Zookeeper Configuration ***********/
val ZkSessionTimeoutMs = 6000
val ZkSyncTimeMs = 2000
/** ********* General Configuration ***********/
val MaxReservedBrokerId = 1000
val BrokerId = -1
val MessageMaxBytes = 1000000 + MessageSet.LogOverhead
val NumNetworkThreads = 3
val NumIoThreads = 8
val BackgroundThreads = 10
val QueuedMaxRequests = 500
/** ********* Socket Server Configuration ***********/
val Port = 9092
val HostName: String = new String("")
val SocketSendBufferBytes: Int = 100 * 1024
val SocketReceiveBufferBytes: Int = 100 * 1024
val SocketRequestMaxBytes: Int = 100 * 1024 * 1024
val MaxConnectionsPerIp: Int = Int.MaxValue
val MaxConnectionsPerIpOverrides: String = ""
val ConnectionsMaxIdleMs = 10 * 60 * 1000L
/** ********* Log Configuration ***********/
val NumPartitions = 1
val LogDir = "/tmp/kafka-logs"
val LogSegmentBytes = 1 * 1024 * 1024 * 1024
val LogRollHours = 24 * 7
val LogRollJitterHours = 0
val LogRetentionHours = 24 * 7
val LogRetentionBytes = -1L
val LogCleanupIntervalMs = 5 * 60 * 1000L
val Delete = "delete"
val Compact = "compact"
val LogCleanupPolicy = Delete
val LogCleanerThreads = 1
val LogCleanerIoMaxBytesPerSecond = Double.MaxValue
val LogCleanerDedupeBufferSize = 500 * 1024 * 1024L
val LogCleanerIoBufferSize = 512 * 1024
val LogCleanerDedupeBufferLoadFactor = 0.9d
val LogCleanerBackoffMs = 15 * 1000
val LogCleanerMinCleanRatio = 0.5d
val LogCleanerEnable = false
val LogCleanerDeleteRetentionMs = 24 * 60 * 60 * 1000L
val LogIndexSizeMaxBytes = 10 * 1024 * 1024
val LogIndexIntervalBytes = 4096
val LogFlushIntervalMessages = Long.MaxValue
val LogDeleteDelayMs = 60000
val LogFlushSchedulerIntervalMs = Long.MaxValue
val LogFlushOffsetCheckpointIntervalMs = 60000
val LogPreAllocateEnable = false
val NumRecoveryThreadsPerDataDir = 1
val AutoCreateTopicsEnable = true
val MinInSyncReplicas = 1
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMs = 30000
val ControllerMessageQueueSize = Int.MaxValue
val DefaultReplicationFactor = 1
val ReplicaLagTimeMaxMs = 10000L
val ReplicaSocketTimeoutMs = ConsumerConfig.SocketTimeout
val ReplicaSocketReceiveBufferBytes = ConsumerConfig.SocketBufferSize
val ReplicaFetchMaxBytes = ConsumerConfig.FetchSize
val ReplicaFetchWaitMaxMs = 500
val ReplicaFetchMinBytes = 1
val NumReplicaFetchers = 1
val ReplicaFetchBackoffMs = 1000
val ReplicaHighWatermarkCheckpointIntervalMs = 5000L
val FetchPurgatoryPurgeIntervalRequests = 1000
val ProducerPurgatoryPurgeIntervalRequests = 1000
val AutoLeaderRebalanceEnable = true
val LeaderImbalancePerBrokerPercentage = 10
val LeaderImbalanceCheckIntervalSeconds = 300
val UncleanLeaderElectionEnable = true
val InterBrokerSecurityProtocol = SecurityProtocol.PLAINTEXT.toString
val InterBrokerProtocolVersion = ApiVersion.latestVersion.toString
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetries = 3
val ControlledShutdownRetryBackoffMs = 5000
val ControlledShutdownEnable = true
/** ********* Consumer coordinator configuration ***********/
val ConsumerMinSessionTimeoutMs = 6000
val ConsumerMaxSessionTimeoutMs = 30000
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSize = OffsetManagerConfig.DefaultMaxMetadataSize
val OffsetsLoadBufferSize = OffsetManagerConfig.DefaultLoadBufferSize
val OffsetsTopicReplicationFactor = OffsetManagerConfig.DefaultOffsetsTopicReplicationFactor
val OffsetsTopicPartitions: Int = OffsetManagerConfig.DefaultOffsetsTopicNumPartitions
val OffsetsTopicSegmentBytes: Int = OffsetManagerConfig.DefaultOffsetsTopicSegmentBytes
val OffsetsTopicCompressionCodec: Int = OffsetManagerConfig.DefaultOffsetsTopicCompressionCodec.codec
val OffsetsRetentionMinutes: Int = 24 * 60
val OffsetsRetentionCheckIntervalMs: Long = OffsetManagerConfig.DefaultOffsetsRetentionCheckIntervalMs
val OffsetCommitTimeoutMs = OffsetManagerConfig.DefaultOffsetCommitTimeoutMs
val OffsetCommitRequiredAcks = OffsetManagerConfig.DefaultOffsetCommitRequiredAcks
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefault = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault
val ConsumerQuotaBytesPerSecondDefault = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault
val ProducerQuotaBytesPerSecondOverrides = ClientQuotaManagerConfig.QuotaBytesPerSecondOverrides
val ConsumerQuotaBytesPerSecondOverrides = ClientQuotaManagerConfig.QuotaBytesPerSecondOverrides
val NumQuotaSamples: Int = ClientQuotaManagerConfig.DefaultNumQuotaSamples
val QuotaWindowSizeSeconds: Int = ClientQuotaManagerConfig.DefaultQuotaWindowSizeSeconds
val DeleteTopicEnable = false
val CompressionType = "producer"
/** ********* Kafka Metrics Configuration ***********/
val MetricNumSamples = 2
val MetricSampleWindowMs = 30000
val MetricReporterClasses = ""
/** ********* SSL configuration ***********/
val PrincipalBuilderClass = SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS
val SSLProtocol = SSLConfigs.DEFAULT_SSL_PROTOCOL
val SSLEnabledProtocols = SSLConfigs.DEFAULT_ENABLED_PROTOCOLS
val SSLKeystoreType = SSLConfigs.DEFAULT_SSL_KEYSTORE_TYPE
val SSLKeystoreLocation = "/tmp/ssl.keystore.jks"
val SSLKeystorePassword = "keystore_password"
val SSLKeyPassword = "key_password"
val SSLTruststoreType = SSLConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE
val SSLTruststoreLocation = SSLConfigs.DEFAULT_TRUSTSTORE_LOCATION
val SSLTruststorePassword = SSLConfigs.DEFAULT_TRUSTSTORE_PASSWORD
val SSLKeyManagerAlgorithm = SSLConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM
val SSLTrustManagerAlgorithm = SSLConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM
val SSLClientAuthRequired = "required"
val SSLClientAuthRequested = "requested"
val SSLClientAuthNone = "none"
val SSLClientAuth = SSLClientAuthNone
}
object KafkaConfig {
def main(args: Array[String]) {
System.out.println(configDef.toHtmlTable)
}
/** ********* Zookeeper Configuration ***********/
val ZkConnectProp = "zookeeper.connect"
val ZkSessionTimeoutMsProp = "zookeeper.session.timeout.ms"
val ZkConnectionTimeoutMsProp = "zookeeper.connection.timeout.ms"
val ZkSyncTimeMsProp = "zookeeper.sync.time.ms"
/** ********* General Configuration ***********/
val MaxReservedBrokerIdProp = "reserved.broker.max.id"
val BrokerIdProp = "broker.id"
val MessageMaxBytesProp = "message.max.bytes"
val NumNetworkThreadsProp = "num.network.threads"
val NumIoThreadsProp = "num.io.threads"
val BackgroundThreadsProp = "background.threads"
val QueuedMaxRequestsProp = "queued.max.requests"
/** ********* Socket Server Configuration ***********/
val PortProp = "port"
val HostNameProp = "host.name"
val ListenersProp = "listeners"
val AdvertisedHostNameProp: String = "advertised.host.name"
val AdvertisedPortProp = "advertised.port"
val AdvertisedListenersProp = "advertised.listeners"
val SocketSendBufferBytesProp = "socket.send.buffer.bytes"
val SocketReceiveBufferBytesProp = "socket.receive.buffer.bytes"
val SocketRequestMaxBytesProp = "socket.request.max.bytes"
val MaxConnectionsPerIpProp = "max.connections.per.ip"
val MaxConnectionsPerIpOverridesProp = "max.connections.per.ip.overrides"
val ConnectionsMaxIdleMsProp = "connections.max.idle.ms"
/** ********* Log Configuration ***********/
val NumPartitionsProp = "num.partitions"
val LogDirsProp = "log.dirs"
val LogDirProp = "log.dir"
val LogSegmentBytesProp = "log.segment.bytes"
val LogRollTimeMillisProp = "log.roll.ms"
val LogRollTimeHoursProp = "log.roll.hours"
val LogRollTimeJitterMillisProp = "log.roll.jitter.ms"
val LogRollTimeJitterHoursProp = "log.roll.jitter.hours"
val LogRetentionTimeMillisProp = "log.retention.ms"
val LogRetentionTimeMinutesProp = "log.retention.minutes"
val LogRetentionTimeHoursProp = "log.retention.hours"
val LogRetentionBytesProp = "log.retention.bytes"
val LogCleanupIntervalMsProp = "log.retention.check.interval.ms"
val LogCleanupPolicyProp = "log.cleanup.policy"
val LogCleanerThreadsProp = "log.cleaner.threads"
val LogCleanerIoMaxBytesPerSecondProp = "log.cleaner.io.max.bytes.per.second"
val LogCleanerDedupeBufferSizeProp = "log.cleaner.dedupe.buffer.size"
val LogCleanerIoBufferSizeProp = "log.cleaner.io.buffer.size"
val LogCleanerDedupeBufferLoadFactorProp = "log.cleaner.io.buffer.load.factor"
val LogCleanerBackoffMsProp = "log.cleaner.backoff.ms"
val LogCleanerMinCleanRatioProp = "log.cleaner.min.cleanable.ratio"
val LogCleanerEnableProp = "log.cleaner.enable"
val LogCleanerDeleteRetentionMsProp = "log.cleaner.delete.retention.ms"
val LogIndexSizeMaxBytesProp = "log.index.size.max.bytes"
val LogIndexIntervalBytesProp = "log.index.interval.bytes"
val LogFlushIntervalMessagesProp = "log.flush.interval.messages"
val LogDeleteDelayMsProp = "log.segment.delete.delay.ms"
val LogFlushSchedulerIntervalMsProp = "log.flush.scheduler.interval.ms"
val LogFlushIntervalMsProp = "log.flush.interval.ms"
val LogFlushOffsetCheckpointIntervalMsProp = "log.flush.offset.checkpoint.interval.ms"
val LogPreAllocateProp = "log.preallocate"
val NumRecoveryThreadsPerDataDirProp = "num.recovery.threads.per.data.dir"
val AutoCreateTopicsEnableProp = "auto.create.topics.enable"
val MinInSyncReplicasProp = "min.insync.replicas"
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsProp = "controller.socket.timeout.ms"
val DefaultReplicationFactorProp = "default.replication.factor"
val ReplicaLagTimeMaxMsProp = "replica.lag.time.max.ms"
val ReplicaSocketTimeoutMsProp = "replica.socket.timeout.ms"
val ReplicaSocketReceiveBufferBytesProp = "replica.socket.receive.buffer.bytes"
val ReplicaFetchMaxBytesProp = "replica.fetch.max.bytes"
val ReplicaFetchWaitMaxMsProp = "replica.fetch.wait.max.ms"
val ReplicaFetchMinBytesProp = "replica.fetch.min.bytes"
val ReplicaFetchBackoffMsProp = "replica.fetch.backoff.ms"
val NumReplicaFetchersProp = "num.replica.fetchers"
val ReplicaHighWatermarkCheckpointIntervalMsProp = "replica.high.watermark.checkpoint.interval.ms"
val FetchPurgatoryPurgeIntervalRequestsProp = "fetch.purgatory.purge.interval.requests"
val ProducerPurgatoryPurgeIntervalRequestsProp = "producer.purgatory.purge.interval.requests"
val AutoLeaderRebalanceEnableProp = "auto.leader.rebalance.enable"
val LeaderImbalancePerBrokerPercentageProp = "leader.imbalance.per.broker.percentage"
val LeaderImbalanceCheckIntervalSecondsProp = "leader.imbalance.check.interval.seconds"
val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable"
val InterBrokerSecurityProtocolProp = "security.inter.broker.protocol"
val InterBrokerProtocolVersionProp = "inter.broker.protocol.version"
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesProp = "controlled.shutdown.max.retries"
val ControlledShutdownRetryBackoffMsProp = "controlled.shutdown.retry.backoff.ms"
val ControlledShutdownEnableProp = "controlled.shutdown.enable"
/** ********* Consumer coordinator configuration ***********/
val ConsumerMinSessionTimeoutMsProp = "consumer.min.session.timeout.ms"
val ConsumerMaxSessionTimeoutMsProp = "consumer.max.session.timeout.ms"
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeProp = "offset.metadata.max.bytes"
val OffsetsLoadBufferSizeProp = "offsets.load.buffer.size"
val OffsetsTopicReplicationFactorProp = "offsets.topic.replication.factor"
val OffsetsTopicPartitionsProp = "offsets.topic.num.partitions"
val OffsetsTopicSegmentBytesProp = "offsets.topic.segment.bytes"
val OffsetsTopicCompressionCodecProp = "offsets.topic.compression.codec"
val OffsetsRetentionMinutesProp = "offsets.retention.minutes"
val OffsetsRetentionCheckIntervalMsProp = "offsets.retention.check.interval.ms"
val OffsetCommitTimeoutMsProp = "offsets.commit.timeout.ms"
val OffsetCommitRequiredAcksProp = "offsets.commit.required.acks"
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefaultProp = "quota.producer.default"
val ConsumerQuotaBytesPerSecondDefaultProp = "quota.consumer.default"
val ProducerQuotaBytesPerSecondOverridesProp = "quota.producer.bytes.per.second.overrides"
val ConsumerQuotaBytesPerSecondOverridesProp = "quota.consumer.bytes.per.second.overrides"
val NumQuotaSamplesProp = "quota.window.num"
val QuotaWindowSizeSecondsProp = "quota.window.size.seconds"
val DeleteTopicEnableProp = "delete.topic.enable"
val CompressionTypeProp = "compression.type"
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsProp = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG
val MetricNumSamplesProp: String = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG
val MetricReporterClassesProp: String = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG
/** ********* SSL Configuration ****************/
val PrincipalBuilderClassProp = SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG
val SSLProtocolProp = SSLConfigs.SSL_PROTOCOL_CONFIG
val SSLProviderProp = SSLConfigs.SSL_PROVIDER_CONFIG
val SSLCipherSuitesProp = SSLConfigs.SSL_CIPHER_SUITES_CONFIG
val SSLEnabledProtocolsProp = SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG
val SSLKeystoreTypeProp = SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG
val SSLKeystoreLocationProp = SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG
val SSLKeystorePasswordProp = SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG
val SSLKeyPasswordProp = SSLConfigs.SSL_KEY_PASSWORD_CONFIG
val SSLTruststoreTypeProp = SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG
val SSLTruststoreLocationProp = SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG
val SSLTruststorePasswordProp = SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG
val SSLKeyManagerAlgorithmProp = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG
val SSLTrustManagerAlgorithmProp = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG
val SSLEndpointIdentificationAlgorithmProp = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG
val SSLClientAuthProp = SSLConfigs.SSL_CLIENT_AUTH_CONFIG
/* Documentation */
/** ********* Zookeeper Configuration ***********/
val ZkConnectDoc = "Zookeeper host string"
val ZkSessionTimeoutMsDoc = "Zookeeper session timeout"
val ZkConnectionTimeoutMsDoc = "The max time that the client waits to establish a connection to zookeeper"
val ZkSyncTimeMsDoc = "How far a ZK follower can be behind a ZK leader"
/** ********* General Configuration ***********/
val MaxReservedBrokerIdDoc = "Max number that can be used for a broker.id"
val BrokerIdDoc = "The broker id for this server. " +
"To avoid conflicts between zookeeper generated brokerId and user's config.brokerId " +
"added MaxReservedBrokerId and zookeeper sequence starts from MaxReservedBrokerId + 1."
val MessageMaxBytesDoc = "The maximum size of message that the server can receive"
val NumNetworkThreadsDoc = "the number of network threads that the server uses for handling network requests"
val NumIoThreadsDoc = "The number of io threads that the server uses for carrying out network requests"
val BackgroundThreadsDoc = "The number of threads to use for various background processing tasks"
val QueuedMaxRequestsDoc = "The number of queued requests allowed before blocking the network threads"
/** ********* Socket Server Configuration ***********/
val PortDoc = "the port to listen and accept connections on"
val HostNameDoc = "hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces"
val ListenersDoc = "Listener List - Comma-separated list of URIs we will listen on and their protocols.\\n" +
" Specify hostname as 0.0.0.0 to bind to all interfaces.\\n" +
" Leave hostname empty to bind to default interface.\\n" +
" Examples of legal listener lists:\\n" +
" PLAINTEXT://myhost:9092,TRACE://:9091\\n" +
" PLAINTEXT://0.0.0.0:9092, TRACE://localhost:9093\\n"
val AdvertisedHostNameDoc = "Hostname to publish to ZooKeeper for clients to use. In IaaS environments, this may " +
"need to be different from the interface to which the broker binds. If this is not set, " +
"it will use the value for \\"host.name\\" if configured. Otherwise " +
"it will use the value returned from java.net.InetAddress.getCanonicalHostName()."
val AdvertisedPortDoc = "The port to publish to ZooKeeper for clients to use. In IaaS environments, this may " +
"need to be different from the port to which the broker binds. If this is not set, " +
"it will publish the same port that the broker binds to."
val AdvertisedListenersDoc = "Listeners to publish to ZooKeeper for clients to use, if different than the listeners above." +
" In IaaS environments, this may need to be different from the interface to which the broker binds." +
" If this is not set, the value for \\"listeners\\" will be used."
val SocketSendBufferBytesDoc = "The SO_SNDBUF buffer of the socket sever sockets"
val SocketReceiveBufferBytesDoc = "The SO_RCVBUF buffer of the socket sever sockets"
val SocketRequestMaxBytesDoc = "The maximum number of bytes in a socket request"
val MaxConnectionsPerIpDoc = "The maximum number of connections we allow from each ip address"
val MaxConnectionsPerIpOverridesDoc = "Per-ip or hostname overrides to the default maximum number of connections"
val ConnectionsMaxIdleMsDoc = "Idle connections timeout: the server socket processor threads close the connections that idle more than this"
/** ********* Log Configuration ***********/
val NumPartitionsDoc = "The default number of log partitions per topic"
val LogDirDoc = "The directory in which the log data is kept (supplemental for " + LogDirsProp + " property)"
val LogDirsDoc = "The directories in which the log data is kept"
val LogSegmentBytesDoc = "The maximum size of a single log file"
val LogRollTimeMillisDoc = "The maximum time before a new log segment is rolled out (in milliseconds)"
val LogRollTimeHoursDoc = "The maximum time before a new log segment is rolled out (in hours), secondary to " + LogRollTimeMillisProp + " property"
val LogRollTimeJitterMillisDoc = "The maximum jitter to subtract from logRollTimeMillis (in milliseconds)"
val LogRollTimeJitterHoursDoc = "The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to " + LogRollTimeJitterMillisProp + " property"
val LogRetentionTimeMillisDoc = "The number of milliseconds to keep a log file before deleting it (in milliseconds)"
val LogRetentionTimeMinsDoc = "The number of minutes to keep a log file before deleting it (in minutes), secondary to " + LogRetentionTimeMillisProp + " property"
val LogRetentionTimeHoursDoc = "The number of hours to keep a log file before deleting it (in hours), tertiary to " + LogRetentionTimeMillisProp + " property"
val LogRetentionBytesDoc = "The maximum size of the log before deleting it"
val LogCleanupIntervalMsDoc = "The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion"
val LogCleanupPolicyDoc = "The default cleanup policy for segments beyond the retention window, must be either \\"delete\\" or \\"compact\\""
val LogCleanerThreadsDoc = "The number of background threads to use for log cleaning"
val LogCleanerIoMaxBytesPerSecondDoc = "The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average"
val LogCleanerDedupeBufferSizeDoc = "The total memory used for log deduplication across all cleaner threads"
val LogCleanerIoBufferSizeDoc = "The total memory used for log cleaner I/O buffers across all cleaner threads"
val LogCleanerDedupeBufferLoadFactorDoc = "Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value " +
"will allow more log to be cleaned at once but will lead to more hash collisions"
val LogCleanerBackoffMsDoc = "The amount of time to sleep when there are no logs to clean"
val LogCleanerMinCleanRatioDoc = "The minimum ratio of dirty log to total log for a log to eligible for cleaning"
val LogCleanerEnableDoc = "Should we enable log cleaning?"
val LogCleanerDeleteRetentionMsDoc = "How long are delete records retained?"
val LogIndexSizeMaxBytesDoc = "The maximum size in bytes of the offset index"
val LogIndexIntervalBytesDoc = "The interval with which we add an entry to the offset index"
val LogFlushIntervalMessagesDoc = "The number of messages accumulated on a log partition before messages are flushed to disk "
val LogDeleteDelayMsDoc = "The amount of time to wait before deleting a file from the filesystem"
val LogFlushSchedulerIntervalMsDoc = "The frequency in ms that the log flusher checks whether any log needs to be flushed to disk"
val LogFlushIntervalMsDoc = "The maximum time in ms that a message in any topic is kept in memory before flushed to disk"
val LogFlushOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of the last flush which acts as the log recovery point"
val LogPreAllocateEnableDoc = "Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true."
val NumRecoveryThreadsPerDataDirDoc = "The number of threads per data directory to be used for log recovery at startup and flushing at shutdown"
val AutoCreateTopicsEnableDoc = "Enable auto creation of topic on the server"
val MinInSyncReplicasDoc = "define the minimum number of replicas in ISR needed to satisfy a produce request with required.acks=-1 (or all)"
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsDoc = "The socket timeout for controller-to-broker channels"
val ControllerMessageQueueSizeDoc = "The buffer size for controller-to-broker-channels"
val DefaultReplicationFactorDoc = "default replication factors for automatically created topics"
val ReplicaLagTimeMaxMsDoc = "If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time," +
" the leader will remove the follower from isr"
val ReplicaSocketTimeoutMsDoc = "The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms"
val ReplicaSocketReceiveBufferBytesDoc = "The socket receive buffer for network requests"
val ReplicaFetchMaxBytesDoc = "The number of byes of messages to attempt to fetch"
val ReplicaFetchWaitMaxMsDoc = "max wait time for each fetcher request issued by follower replicas. This value should always be less than the " +
"replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics"
val ReplicaFetchMinBytesDoc = "Minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs"
val NumReplicaFetchersDoc = "Number of fetcher threads used to replicate messages from a source broker. " +
"Increasing this value can increase the degree of I/O parallelism in the follower broker."
val ReplicaFetchBackoffMsDoc = "The amount of time to sleep when fetch partition error occurs."
val ReplicaHighWatermarkCheckpointIntervalMsDoc = "The frequency with which the high watermark is saved out to disk"
val FetchPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the fetch request purgatory"
val ProducerPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the producer request purgatory"
val AutoLeaderRebalanceEnableDoc = "Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals"
val LeaderImbalancePerBrokerPercentageDoc = "The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage."
val LeaderImbalanceCheckIntervalSecondsDoc = "The frequency with which the partition rebalance check is triggered by the controller"
val UncleanLeaderElectionEnableDoc = "Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss"
val InterBrokerSecurityProtocolDoc = "Security protocol used to communicate between brokers. Defaults to plain text."
val InterBrokerProtocolVersionDoc = "Specify which version of the inter-broker protocol will be used.\\n" +
" This is typically bumped after all brokers were upgraded to a new version.\\n" +
" Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.8.3, 0.8.3.0. Check ApiVersion for the full list."
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesDoc = "Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens"
val ControlledShutdownRetryBackoffMsDoc = "Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying."
val ControlledShutdownEnableDoc = "Enable controlled shutdown of the server"
/** ********* Consumer coordinator configuration ***********/
val ConsumerMinSessionTimeoutMsDoc = "The minimum allowed session timeout for registered consumers"
val ConsumerMaxSessionTimeoutMsDoc = "The maximum allowed session timeout for registered consumers"
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeDoc = "The maximum size for a metadata entry associated with an offset commit"
val OffsetsLoadBufferSizeDoc = "Batch size for reading from the offsets segments when loading offsets into the cache."
val OffsetsTopicReplicationFactorDoc = "The replication factor for the offsets topic (set higher to ensure availability). " +
"To ensure that the effective replication factor of the offsets topic is the configured value, " +
"the number of alive brokers has to be at least the replication factor at the time of the " +
"first request for the offsets topic. If not, either the offsets topic creation will fail or " +
"it will get a replication factor of min(alive brokers, configured replication factor)"
val OffsetsTopicPartitionsDoc = "The number of partitions for the offset commit topic (should not change after deployment)"
val OffsetsTopicSegmentBytesDoc = "The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads"
val OffsetsTopicCompressionCodecDoc = "Compression codec for the offsets topic - compression may be used to achieve \\"atomic\\" commits"
val OffsetsRetentionMinutesDoc = "Log retention window in minutes for offsets topic"
val OffsetsRetentionCheckIntervalMsDoc = "Frequency at which to check for stale offsets"
val OffsetCommitTimeoutMsDoc = "Offset commit will be delayed until all replicas for the offsets topic receive the commit " +
"or this timeout is reached. This is similar to the producer request timeout."
val OffsetCommitRequiredAcksDoc = "The required acks before the commit can be accepted. In general, the default (-1) should not be overridden"
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefaultDoc = "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second"
val ConsumerQuotaBytesPerSecondDefaultDoc = "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second"
val ProducerQuotaBytesPerSecondOverridesDoc = "Comma separated list of clientId:quotaBytesPerSecond to override the default producer quota. " +
"Example: clientIdX=10485760,clientIdY=10485760"
val ConsumerQuotaBytesPerSecondOverridesDoc = "Comma separated list of clientId:quotaBytesPerSecond to override the default consumer quota. " +
"Example: clientIdX=10485760,clientIdY=10485760"
val NumQuotaSamplesDoc = "The number of samples to retain in memory"
val QuotaWindowSizeSecondsDoc = "The time span of each sample"
val DeleteTopicEnableDoc = "Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off"
val CompressionTypeDoc = "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs " +
"('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to no compression; and " +
"'producer' which means retain the original compression codec set by the producer."
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsDoc = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC
val MetricNumSamplesDoc = CommonClientConfigs.METRICS_NUM_SAMPLES_DOC
val MetricReporterClassesDoc = CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC
/** ********* SSL Configuration ****************/
val PrincipalBuilderClassDoc = SSLConfigs.PRINCIPAL_BUILDER_CLASS_DOC
val SSLProtocolDoc = SSLConfigs.SSL_PROTOCOL_DOC
val SSLProviderDoc = SSLConfigs.SSL_PROVIDER_DOC
val SSLCipherSuitesDoc = SSLConfigs.SSL_CIPHER_SUITES_DOC
val SSLEnabledProtocolsDoc = SSLConfigs.SSL_ENABLED_PROTOCOLS_DOC
val SSLKeystoreTypeDoc = SSLConfigs.SSL_KEYSTORE_TYPE_DOC
val SSLKeystoreLocationDoc = SSLConfigs.SSL_KEYSTORE_LOCATION_DOC
val SSLKeystorePasswordDoc = SSLConfigs.SSL_KEYSTORE_PASSWORD_DOC
val SSLKeyPasswordDoc = SSLConfigs.SSL_KEY_PASSWORD_DOC
val SSLTruststoreTypeDoc = SSLConfigs.SSL_TRUSTSTORE_TYPE_DOC
val SSLTruststorePasswordDoc = SSLConfigs.SSL_TRUSTSTORE_PASSWORD_DOC
val SSLTruststoreLocationDoc = SSLConfigs.SSL_TRUSTSTORE_LOCATION_DOC
val SSLKeyManagerAlgorithmDoc = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_DOC
val SSLTrustManagerAlgorithmDoc = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC
val SSLEndpointIdentificationAlgorithmDoc = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC
val SSLClientAuthDoc = SSLConfigs.SSL_CLIENT_AUTH_DOC
private val configDef = {
import ConfigDef.Range._
import ConfigDef.ValidString._
import ConfigDef.Type._
import ConfigDef.Importance._
new ConfigDef()
/** ********* Zookeeper Configuration ***********/
.define(ZkConnectProp, STRING, HIGH, ZkConnectDoc)
.define(ZkSessionTimeoutMsProp, INT, Defaults.ZkSessionTimeoutMs, HIGH, ZkSessionTimeoutMsDoc)
.define(ZkConnectionTimeoutMsProp, INT, HIGH, ZkConnectionTimeoutMsDoc, false)
.define(ZkSyncTimeMsProp, INT, Defaults.ZkSyncTimeMs, LOW, ZkSyncTimeMsDoc)
/** ********* General Configuration ***********/
.define(MaxReservedBrokerIdProp, INT, Defaults.MaxReservedBrokerId, atLeast(0), MEDIUM, MaxReservedBrokerIdProp)
.define(BrokerIdProp, INT, Defaults.BrokerId, HIGH, BrokerIdDoc)
.define(MessageMaxBytesProp, INT, Defaults.MessageMaxBytes, atLeast(0), HIGH, MessageMaxBytesDoc)
.define(NumNetworkThreadsProp, INT, Defaults.NumNetworkThreads, atLeast(1), HIGH, NumNetworkThreadsDoc)
.define(NumIoThreadsProp, INT, Defaults.NumIoThreads, atLeast(1), HIGH, NumIoThreadsDoc)
.define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc)
.define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc)
/** ********* Socket Server Configuration ***********/
.define(PortProp, INT, Defaults.Port, HIGH, PortDoc)
.define(HostNameProp, STRING, Defaults.HostName, HIGH, HostNameDoc)
.define(ListenersProp, STRING, HIGH, ListenersDoc, false)
.define(AdvertisedHostNameProp, STRING, HIGH, AdvertisedHostNameDoc, false)
.define(AdvertisedPortProp, INT, HIGH, AdvertisedPortDoc, false)
.define(AdvertisedListenersProp, STRING, HIGH, AdvertisedListenersDoc, false)
.define(SocketSendBufferBytesProp, INT, Defaults.SocketSendBufferBytes, HIGH, SocketSendBufferBytesDoc)
.define(SocketReceiveBufferBytesProp, INT, Defaults.SocketReceiveBufferBytes, HIGH, SocketReceiveBufferBytesDoc)
.define(SocketRequestMaxBytesProp, INT, Defaults.SocketRequestMaxBytes, atLeast(1), HIGH, SocketRequestMaxBytesDoc)
.define(MaxConnectionsPerIpProp, INT, Defaults.MaxConnectionsPerIp, atLeast(1), MEDIUM, MaxConnectionsPerIpDoc)
.define(MaxConnectionsPerIpOverridesProp, STRING, Defaults.MaxConnectionsPerIpOverrides, MEDIUM, MaxConnectionsPerIpOverridesDoc)
.define(ConnectionsMaxIdleMsProp, LONG, Defaults.ConnectionsMaxIdleMs, MEDIUM, ConnectionsMaxIdleMsDoc)
/** ********* Log Configuration ***********/
.define(NumPartitionsProp, INT, Defaults.NumPartitions, atLeast(1), MEDIUM, NumPartitionsDoc)
.define(LogDirProp, STRING, Defaults.LogDir, HIGH, LogDirDoc)
.define(LogDirsProp, STRING, HIGH, LogDirsDoc, false)
.define(LogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(Message.MinHeaderSize), HIGH, LogSegmentBytesDoc)
.define(LogRollTimeMillisProp, LONG, HIGH, LogRollTimeMillisDoc, false)
.define(LogRollTimeHoursProp, INT, Defaults.LogRollHours, atLeast(1), HIGH, LogRollTimeHoursDoc)
.define(LogRollTimeJitterMillisProp, LONG, HIGH, LogRollTimeJitterMillisDoc, false)
.define(LogRollTimeJitterHoursProp, INT, Defaults.LogRollJitterHours, atLeast(0), HIGH, LogRollTimeJitterHoursDoc)
.define(LogRetentionTimeMillisProp, LONG, HIGH, LogRetentionTimeMillisDoc, false)
.define(LogRetentionTimeMinutesProp, INT, HIGH, LogRetentionTimeMinsDoc, false)
.define(LogRetentionTimeHoursProp, INT, Defaults.LogRetentionHours, HIGH, LogRetentionTimeHoursDoc)
.define(LogRetentionBytesProp, LONG, Defaults.LogRetentionBytes, HIGH, LogRetentionBytesDoc)
.define(LogCleanupIntervalMsProp, LONG, Defaults.LogCleanupIntervalMs, atLeast(1), MEDIUM, LogCleanupIntervalMsDoc)
.define(LogCleanupPolicyProp, STRING, Defaults.LogCleanupPolicy, in(Defaults.Compact, Defaults.Delete), MEDIUM, LogCleanupPolicyDoc)
.define(LogCleanerThreadsProp, INT, Defaults.LogCleanerThreads, atLeast(0), MEDIUM, LogCleanerThreadsDoc)
.define(LogCleanerIoMaxBytesPerSecondProp, DOUBLE, Defaults.LogCleanerIoMaxBytesPerSecond, MEDIUM, LogCleanerIoMaxBytesPerSecondDoc)
.define(LogCleanerDedupeBufferSizeProp, LONG, Defaults.LogCleanerDedupeBufferSize, MEDIUM, LogCleanerDedupeBufferSizeDoc)
.define(LogCleanerIoBufferSizeProp, INT, Defaults.LogCleanerIoBufferSize, atLeast(0), MEDIUM, LogCleanerIoBufferSizeDoc)
.define(LogCleanerDedupeBufferLoadFactorProp, DOUBLE, Defaults.LogCleanerDedupeBufferLoadFactor, MEDIUM, LogCleanerDedupeBufferLoadFactorDoc)
.define(LogCleanerBackoffMsProp, LONG, Defaults.LogCleanerBackoffMs, atLeast(0), MEDIUM, LogCleanerBackoffMsDoc)
.define(LogCleanerMinCleanRatioProp, DOUBLE, Defaults.LogCleanerMinCleanRatio, MEDIUM, LogCleanerMinCleanRatioDoc)
.define(LogCleanerEnableProp, BOOLEAN, Defaults.LogCleanerEnable, MEDIUM, LogCleanerEnableDoc)
.define(LogCleanerDeleteRetentionMsProp, LONG, Defaults.LogCleanerDeleteRetentionMs, MEDIUM, LogCleanerDeleteRetentionMsDoc)
.define(LogIndexSizeMaxBytesProp, INT, Defaults.LogIndexSizeMaxBytes, atLeast(4), MEDIUM, LogIndexSizeMaxBytesDoc)
.define(LogIndexIntervalBytesProp, INT, Defaults.LogIndexIntervalBytes, atLeast(0), MEDIUM, LogIndexIntervalBytesDoc)
.define(LogFlushIntervalMessagesProp, LONG, Defaults.LogFlushIntervalMessages, atLeast(1), HIGH, LogFlushIntervalMessagesDoc)
.define(LogDeleteDelayMsProp, LONG, Defaults.LogDeleteDelayMs, atLeast(0), HIGH, LogDeleteDelayMsDoc)
.define(LogFlushSchedulerIntervalMsProp, LONG, Defaults.LogFlushSchedulerIntervalMs, HIGH, LogFlushSchedulerIntervalMsDoc)
.define(LogFlushIntervalMsProp, LONG, HIGH, LogFlushIntervalMsDoc, false)
.define(LogFlushOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushOffsetCheckpointIntervalMsDoc)
.define(LogPreAllocateProp, BOOLEAN, Defaults.LogPreAllocateEnable, MEDIUM, LogPreAllocateEnableDoc)
.define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc)
.define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc)
.define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc)
/** ********* Replication configuration ***********/
.define(ControllerSocketTimeoutMsProp, INT, Defaults.ControllerSocketTimeoutMs, MEDIUM, ControllerSocketTimeoutMsDoc)
.define(DefaultReplicationFactorProp, INT, Defaults.DefaultReplicationFactor, MEDIUM, DefaultReplicationFactorDoc)
.define(ReplicaLagTimeMaxMsProp, LONG, Defaults.ReplicaLagTimeMaxMs, HIGH, ReplicaLagTimeMaxMsDoc)
.define(ReplicaSocketTimeoutMsProp, INT, Defaults.ReplicaSocketTimeoutMs, HIGH, ReplicaSocketTimeoutMsDoc)
.define(ReplicaSocketReceiveBufferBytesProp, INT, Defaults.ReplicaSocketReceiveBufferBytes, HIGH, ReplicaSocketReceiveBufferBytesDoc)
.define(ReplicaFetchMaxBytesProp, INT, Defaults.ReplicaFetchMaxBytes, HIGH, ReplicaFetchMaxBytesDoc)
.define(ReplicaFetchWaitMaxMsProp, INT, Defaults.ReplicaFetchWaitMaxMs, HIGH, ReplicaFetchWaitMaxMsDoc)
.define(ReplicaFetchBackoffMsProp, INT, Defaults.ReplicaFetchBackoffMs, atLeast(0), MEDIUM, ReplicaFetchBackoffMsDoc)
.define(ReplicaFetchMinBytesProp, INT, Defaults.ReplicaFetchMinBytes, HIGH, ReplicaFetchMinBytesDoc)
.define(NumReplicaFetchersProp, INT, Defaults.NumReplicaFetchers, HIGH, NumReplicaFetchersDoc)
.define(ReplicaHighWatermarkCheckpointIntervalMsProp, LONG, Defaults.ReplicaHighWatermarkCheckpointIntervalMs, HIGH, ReplicaHighWatermarkCheckpointIntervalMsDoc)
.define(FetchPurgatoryPurgeIntervalRequestsProp, INT, Defaults.FetchPurgatoryPurgeIntervalRequests, MEDIUM, FetchPurgatoryPurgeIntervalRequestsDoc)
.define(ProducerPurgatoryPurgeIntervalRequestsProp, INT, Defaults.ProducerPurgatoryPurgeIntervalRequests, MEDIUM, ProducerPurgatoryPurgeIntervalRequestsDoc)
.define(AutoLeaderRebalanceEnableProp, BOOLEAN, Defaults.AutoLeaderRebalanceEnable, HIGH, AutoLeaderRebalanceEnableDoc)
.define(LeaderImbalancePerBrokerPercentageProp, INT, Defaults.LeaderImbalancePerBrokerPercentage, HIGH, LeaderImbalancePerBrokerPercentageDoc)
.define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, HIGH, LeaderImbalanceCheckIntervalSecondsDoc)
.define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc)
.define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc)
.define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, MEDIUM, InterBrokerProtocolVersionDoc)
/** ********* Controlled shutdown configuration ***********/
.define(ControlledShutdownMaxRetriesProp, INT, Defaults.ControlledShutdownMaxRetries, MEDIUM, ControlledShutdownMaxRetriesDoc)
.define(ControlledShutdownRetryBackoffMsProp, LONG, Defaults.ControlledShutdownRetryBackoffMs, MEDIUM, ControlledShutdownRetryBackoffMsDoc)
.define(ControlledShutdownEnableProp, BOOLEAN, Defaults.ControlledShutdownEnable, MEDIUM, ControlledShutdownEnableDoc)
/** ********* Consumer coordinator configuration ***********/
.define(ConsumerMinSessionTimeoutMsProp, INT, Defaults.ConsumerMinSessionTimeoutMs, MEDIUM, ConsumerMinSessionTimeoutMsDoc)
.define(ConsumerMaxSessionTimeoutMsProp, INT, Defaults.ConsumerMaxSessionTimeoutMs, MEDIUM, ConsumerMaxSessionTimeoutMsDoc)
/** ********* Offset management configuration ***********/
.define(OffsetMetadataMaxSizeProp, INT, Defaults.OffsetMetadataMaxSize, HIGH, OffsetMetadataMaxSizeDoc)
.define(OffsetsLoadBufferSizeProp, INT, Defaults.OffsetsLoadBufferSize, atLeast(1), HIGH, OffsetsLoadBufferSizeDoc)
.define(OffsetsTopicReplicationFactorProp, SHORT, Defaults.OffsetsTopicReplicationFactor, atLeast(1), HIGH, OffsetsTopicReplicationFactorDoc)
.define(OffsetsTopicPartitionsProp, INT, Defaults.OffsetsTopicPartitions, atLeast(1), HIGH, OffsetsTopicPartitionsDoc)
.define(OffsetsTopicSegmentBytesProp, INT, Defaults.OffsetsTopicSegmentBytes, atLeast(1), HIGH, OffsetsTopicSegmentBytesDoc)
.define(OffsetsTopicCompressionCodecProp, INT, Defaults.OffsetsTopicCompressionCodec, HIGH, OffsetsTopicCompressionCodecDoc)
.define(OffsetsRetentionMinutesProp, INT, Defaults.OffsetsRetentionMinutes, atLeast(1), HIGH, OffsetsRetentionMinutesDoc)
.define(OffsetsRetentionCheckIntervalMsProp, LONG, Defaults.OffsetsRetentionCheckIntervalMs, atLeast(1), HIGH, OffsetsRetentionCheckIntervalMsDoc)
.define(OffsetCommitTimeoutMsProp, INT, Defaults.OffsetCommitTimeoutMs, atLeast(1), HIGH, OffsetCommitTimeoutMsDoc)
.define(OffsetCommitRequiredAcksProp, SHORT, Defaults.OffsetCommitRequiredAcks, HIGH, OffsetCommitRequiredAcksDoc)
.define(DeleteTopicEnableProp, BOOLEAN, Defaults.DeleteTopicEnable, HIGH, DeleteTopicEnableDoc)
.define(CompressionTypeProp, STRING, Defaults.CompressionType, HIGH, CompressionTypeDoc)
/** ********* Kafka Metrics Configuration ***********/
.define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc)
.define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc)
.define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc)
/** ********* Quota configuration ***********/
.define(ProducerQuotaBytesPerSecondDefaultProp, LONG, Defaults.ProducerQuotaBytesPerSecondDefault, atLeast(1), HIGH, ProducerQuotaBytesPerSecondDefaultDoc)
.define(ConsumerQuotaBytesPerSecondDefaultProp, LONG, Defaults.ConsumerQuotaBytesPerSecondDefault, atLeast(1), HIGH, ConsumerQuotaBytesPerSecondDefaultDoc)
.define(ProducerQuotaBytesPerSecondOverridesProp, STRING, Defaults.ProducerQuotaBytesPerSecondOverrides, HIGH, ProducerQuotaBytesPerSecondOverridesDoc)
.define(ConsumerQuotaBytesPerSecondOverridesProp, STRING, Defaults.ConsumerQuotaBytesPerSecondOverrides, HIGH, ConsumerQuotaBytesPerSecondOverridesDoc)
.define(NumQuotaSamplesProp, INT, Defaults.NumQuotaSamples, atLeast(1), LOW, NumQuotaSamplesDoc)
.define(QuotaWindowSizeSecondsProp, INT, Defaults.QuotaWindowSizeSeconds, atLeast(1), LOW, QuotaWindowSizeSecondsDoc)
/** ********* SSL Configuration ****************/
.define(PrincipalBuilderClassProp, STRING, Defaults.PrincipalBuilderClass, MEDIUM, PrincipalBuilderClassDoc)
.define(SSLProtocolProp, STRING, Defaults.SSLProtocol, MEDIUM, SSLProtocolDoc)
.define(SSLProviderProp, STRING, MEDIUM, SSLProviderDoc, false)
.define(SSLEnabledProtocolsProp, LIST, Defaults.SSLEnabledProtocols, MEDIUM, SSLEnabledProtocolsDoc)
.define(SSLKeystoreTypeProp, STRING, Defaults.SSLKeystoreType, MEDIUM, SSLKeystoreTypeDoc)
.define(SSLKeystoreLocationProp, STRING, Defaults.SSLKeystoreLocation, MEDIUM, SSLKeystoreLocationDoc)
.define(SSLKeystorePasswordProp, STRING, Defaults.SSLKeystorePassword, MEDIUM, SSLKeystorePasswordDoc)
.define(SSLKeyPasswordProp, STRING, Defaults.SSLKeyPassword, MEDIUM, SSLKeyPasswordDoc)
.define(SSLTruststoreTypeProp, STRING, Defaults.SSLTruststoreType, MEDIUM, SSLTruststoreTypeDoc)
.define(SSLTruststoreLocationProp, STRING, Defaults.SSLTruststoreLocation, MEDIUM, SSLTruststoreLocationDoc)
.define(SSLTruststorePasswordProp, STRING, Defaults.SSLTruststorePassword, MEDIUM, SSLTruststorePasswordDoc)
.define(SSLKeyManagerAlgorithmProp, STRING, Defaults.SSLKeyManagerAlgorithm, MEDIUM, SSLKeyManagerAlgorithmDoc)
.define(SSLTrustManagerAlgorithmProp, STRING, Defaults.SSLTrustManagerAlgorithm, MEDIUM, SSLTrustManagerAlgorithmDoc)
.define(SSLClientAuthProp, STRING, Defaults.SSLClientAuth, in(Defaults.SSLClientAuthRequired, Defaults.SSLClientAuthRequested, Defaults.SSLClientAuthNone), MEDIUM, SSLClientAuthDoc)
}
def configNames() = {
import scala.collection.JavaConversions._
configDef.names().toList.sorted
}
/**
* Check that property names are valid
*/
def validateNames(props: Properties) {
import scala.collection.JavaConversions._
val names = configDef.names()
for (name <- props.keys)
require(names.contains(name), "Unknown configuration \\"%s\\".".format(name))
}
def fromProps(props: Properties): KafkaConfig = {
KafkaConfig(props)
}
def fromProps(defaults: Properties, overrides: Properties): KafkaConfig = {
val props = new Properties()
props.putAll(defaults)
props.putAll(overrides)
fromProps(props)
}
}
case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(KafkaConfig.configDef, props) {
/** ********* Zookeeper Configuration ***********/
val zkConnect: String = getString(KafkaConfig.ZkConnectProp)
val zkSessionTimeoutMs: Int = getInt(KafkaConfig.ZkSessionTimeoutMsProp)
val zkConnectionTimeoutMs: java.lang.Integer =
Option(getInt(KafkaConfig.ZkConnectionTimeoutMsProp)).getOrElse(getInt(KafkaConfig.ZkSessionTimeoutMsProp))
val zkSyncTimeMs: Int = getInt(KafkaConfig.ZkSyncTimeMsProp)
/** ********* General Configuration ***********/
val maxReservedBrokerId: Int = getInt(KafkaConfig.MaxReservedBrokerIdProp)
var brokerId: Int = getInt(KafkaConfig.BrokerIdProp)
val numNetworkThreads = getInt(KafkaConfig.NumNetworkThreadsProp)
val backgroundThreads = getInt(KafkaConfig.BackgroundThreadsProp)
val queuedMaxRequests = getInt(KafkaConfig.QueuedMaxRequestsProp)
val numIoThreads = getInt(KafkaConfig.NumIoThreadsProp)
val messageMaxBytes = getInt(KafkaConfig.MessageMaxBytesProp)
/** ********* Socket Server Configuration ***********/
val hostName = getString(KafkaConfig.HostNameProp)
val port = getInt(KafkaConfig.PortProp)
val advertisedHostName = Option(getString(KafkaConfig.AdvertisedHostNameProp)).getOrElse(hostName)
val advertisedPort: java.lang.Integer = Option(getInt(KafkaConfig.AdvertisedPortProp)).getOrElse(port)
val socketSendBufferBytes = getInt(KafkaConfig.SocketSendBufferBytesProp)
val socketReceiveBufferBytes = getInt(KafkaConfig.SocketReceiveBufferBytesProp)
val socketRequestMaxBytes = getInt(KafkaConfig.SocketRequestMaxBytesProp)
val maxConnectionsPerIp = getInt(KafkaConfig.MaxConnectionsPerIpProp)
val maxConnectionsPerIpOverrides: Map[String, Int] =
getMap(KafkaConfig.MaxConnectionsPerIpOverridesProp, getString(KafkaConfig.MaxConnectionsPerIpOverridesProp)).map { case (k, v) => (k, v.toInt)}
val connectionsMaxIdleMs = getLong(KafkaConfig.ConnectionsMaxIdleMsProp)
/** ********* Log Configuration ***********/
val autoCreateTopicsEnable = getBoolean(KafkaConfig.AutoCreateTopicsEnableProp)
val numPartitions = getInt(KafkaConfig.NumPartitionsProp)
val logDirs = CoreUtils.parseCsvList( Option(getString(KafkaConfig.LogDirsProp)).getOrElse(getString(KafkaConfig.LogDirProp)))
val logSegmentBytes = getInt(KafkaConfig.LogSegmentBytesProp)
val logFlushIntervalMessages = getLong(KafkaConfig.LogFlushIntervalMessagesProp)
val logCleanerThreads = getInt(KafkaConfig.LogCleanerThreadsProp)
val numRecoveryThreadsPerDataDir = getInt(KafkaConfig.NumRecoveryThreadsPerDataDirProp)
val logFlushSchedulerIntervalMs = getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp)
val logFlushOffsetCheckpointIntervalMs = getInt(KafkaConfig.LogFlushOffsetCheckpointIntervalMsProp).toLong
val logCleanupIntervalMs = getLong(KafkaConfig.LogCleanupIntervalMsProp)
val logCleanupPolicy = getString(KafkaConfig.LogCleanupPolicyProp)
val offsetsRetentionMinutes = getInt(KafkaConfig.OffsetsRetentionMinutesProp)
val offsetsRetentionCheckIntervalMs = getLong(KafkaConfig.OffsetsRetentionCheckIntervalMsProp)
val logRetentionBytes = getLong(KafkaConfig.LogRetentionBytesProp)
val logCleanerDedupeBufferSize = getLong(KafkaConfig.LogCleanerDedupeBufferSizeProp)
val logCleanerDedupeBufferLoadFactor = getDouble(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp)
val logCleanerIoBufferSize = getInt(KafkaConfig.LogCleanerIoBufferSizeProp)
val logCleanerIoMaxBytesPerSecond = getDouble(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp)
val logCleanerDeleteRetentionMs = getLong(KafkaConfig.LogCleanerDeleteRetentionMsProp)
val logCleanerBackoffMs = getLong(KafkaConfig.LogCleanerBackoffMsProp)
val logCleanerMinCleanRatio = getDouble(KafkaConfig.LogCleanerMinCleanRatioProp)
val logCleanerEnable = getBoolean(KafkaConfig.LogCleanerEnableProp)
val logIndexSizeMaxBytes = getInt(KafkaConfig.LogIndexSizeMaxBytesProp)
val logIndexIntervalBytes = getInt(KafkaConfig.LogIndexIntervalBytesProp)
val logDeleteDelayMs = getLong(KafkaConfig.LogDeleteDelayMsProp)
val logRollTimeMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeHoursProp))
val logRollTimeJitterMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeJitterMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeJitterHoursProp))
val logFlushIntervalMs: java.lang.Long = Option(getLong(KafkaConfig.LogFlushIntervalMsProp)).getOrElse(getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp))
val logRetentionTimeMillis = getLogRetentionTimeMillis
val minInSyncReplicas = getInt(KafkaConfig.MinInSyncReplicasProp)
val logPreAllocateEnable: java.lang.Boolean = getBoolean(KafkaConfig.LogPreAllocateProp)
/** ********* Replication configuration ***********/
val controllerSocketTimeoutMs: Int = getInt(KafkaConfig.ControllerSocketTimeoutMsProp)
val defaultReplicationFactor: Int = getInt(KafkaConfig.DefaultReplicationFactorProp)
val replicaLagTimeMaxMs = getLong(KafkaConfig.ReplicaLagTimeMaxMsProp)
val replicaSocketTimeoutMs = getInt(KafkaConfig.ReplicaSocketTimeoutMsProp)
val replicaSocketReceiveBufferBytes = getInt(KafkaConfig.ReplicaSocketReceiveBufferBytesProp)
val replicaFetchMaxBytes = getInt(KafkaConfig.ReplicaFetchMaxBytesProp)
val replicaFetchWaitMaxMs = getInt(KafkaConfig.ReplicaFetchWaitMaxMsProp)
val replicaFetchMinBytes = getInt(KafkaConfig.ReplicaFetchMinBytesProp)
val replicaFetchBackoffMs = getInt(KafkaConfig.ReplicaFetchBackoffMsProp)
val numReplicaFetchers = getInt(KafkaConfig.NumReplicaFetchersProp)
val replicaHighWatermarkCheckpointIntervalMs = getLong(KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp)
val fetchPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.FetchPurgatoryPurgeIntervalRequestsProp)
val producerPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.ProducerPurgatoryPurgeIntervalRequestsProp)
val autoLeaderRebalanceEnable = getBoolean(KafkaConfig.AutoLeaderRebalanceEnableProp)
val leaderImbalancePerBrokerPercentage = getInt(KafkaConfig.LeaderImbalancePerBrokerPercentageProp)
val leaderImbalanceCheckIntervalSeconds = getLong(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp)
val uncleanLeaderElectionEnable: java.lang.Boolean = getBoolean(KafkaConfig.UncleanLeaderElectionEnableProp)
val interBrokerSecurityProtocol = SecurityProtocol.valueOf(getString(KafkaConfig.InterBrokerSecurityProtocolProp))
val interBrokerProtocolVersion = ApiVersion(getString(KafkaConfig.InterBrokerProtocolVersionProp))
/** ********* Controlled shutdown configuration ***********/
val controlledShutdownMaxRetries = getInt(KafkaConfig.ControlledShutdownMaxRetriesProp)
val controlledShutdownRetryBackoffMs = getLong(KafkaConfig.ControlledShutdownRetryBackoffMsProp)
val controlledShutdownEnable = getBoolean(KafkaConfig.ControlledShutdownEnableProp)
/** ********* Consumer coordinator configuration ***********/
val consumerMinSessionTimeoutMs = getInt(KafkaConfig.ConsumerMinSessionTimeoutMsProp)
val consumerMaxSessionTimeoutMs = getInt(KafkaConfig.ConsumerMaxSessionTimeoutMsProp)
/** ********* Offset management configuration ***********/
val offsetMetadataMaxSize = getInt(KafkaConfig.OffsetMetadataMaxSizeProp)
val offsetsLoadBufferSize = getInt(KafkaConfig.OffsetsLoadBufferSizeProp)
val offsetsTopicReplicationFactor = getShort(KafkaConfig.OffsetsTopicReplicationFactorProp)
val offsetsTopicPartitions = getInt(KafkaConfig.OffsetsTopicPartitionsProp)
val offsetCommitTimeoutMs = getInt(KafkaConfig.OffsetCommitTimeoutMsProp)
val offsetCommitRequiredAcks = getShort(KafkaConfig.OffsetCommitRequiredAcksProp)
val offsetsTopicSegmentBytes = getInt(KafkaConfig.OffsetsTopicSegmentBytesProp)
val offsetsTopicCompressionCodec = Option(getInt(KafkaConfig.OffsetsTopicCompressionCodecProp)).map(value => CompressionCodec.getCompressionCodec(value)).orNull
/** ********* Metric Configuration **************/
val metricNumSamples = getInt(KafkaConfig.MetricNumSamplesProp)
val metricSampleWindowMs = getLong(KafkaConfig.MetricSampleWindowMsProp)
val metricReporterClasses: java.util.List[MetricsReporter] = getConfiguredInstances(KafkaConfig.MetricReporterClassesProp, classOf[MetricsReporter])
/** ********* SSL Configuration **************/
val principalBuilderClass = getString(KafkaConfig.PrincipalBuilderClassProp)
val sslProtocol = getString(KafkaConfig.SSLProtocolProp)
val sslProvider = getString(KafkaConfig.SSLProviderProp)
val sslEnabledProtocols = getList(KafkaConfig.SSLEnabledProtocolsProp)
val sslKeystoreType = getString(KafkaConfig.SSLKeystoreTypeProp)
val sslKeystoreLocation = getString(KafkaConfig.SSLKeystoreLocationProp)
val sslKeystorePassword = getString(KafkaConfig.SSLKeystorePasswordProp)
val sslKeyPassword = getString(KafkaConfig.SSLKeyPasswordProp)
val sslTruststoreType = getString(KafkaConfig.SSLTruststoreTypeProp)
val sslTruststoreLocation = getString(KafkaConfig.SSLTruststoreLocationProp)
val sslTruststorePassword = getString(KafkaConfig.SSLTruststorePasswordProp)
val sslKeyManagerAlgorithm = getString(KafkaConfig.SSLKeyManagerAlgorithmProp)
val sslTrustManagerAlgorithm = getString(KafkaConfig.SSLTrustManagerAlgorithmProp)
val sslClientAuth = getString(KafkaConfig.SSLClientAuthProp)
/** ********* Quota Configuration **************/
val producerQuotaBytesPerSecondDefault = getLong(KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp)
val consumerQuotaBytesPerSecondDefault = getLong(KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp)
val producerQuotaBytesPerSecondOverrides = getString(KafkaConfig.ProducerQuotaBytesPerSecondOverridesProp)
val consumerQuotaBytesPerSecondOverrides = getString(KafkaConfig.ConsumerQuotaBytesPerSecondOverridesProp)
val numQuotaSamples = getInt(KafkaConfig.NumQuotaSamplesProp)
val quotaWindowSizeSeconds = getInt(KafkaConfig.QuotaWindowSizeSecondsProp)
val deleteTopicEnable = getBoolean(KafkaConfig.DeleteTopicEnableProp)
val compressionType = getString(KafkaConfig.CompressionTypeProp)
val listeners = getListeners
val advertisedListeners = getAdvertisedListeners
private def getLogRetentionTimeMillis: Long = {
val millisInMinute = 60L * 1000L
val millisInHour = 60L * millisInMinute
val millis: java.lang.Long =
Option(getLong(KafkaConfig.LogRetentionTimeMillisProp)).getOrElse(
Option(getInt(KafkaConfig.LogRetentionTimeMinutesProp)) match {
case Some(mins) => millisInMinute * mins
case None => getInt(KafkaConfig.LogRetentionTimeHoursProp) * millisInHour
})
if (millis < 0) return -1
millis
}
private def getMap(propName: String, propValue: String): Map[String, String] = {
try {
CoreUtils.parseCsvMap(propValue)
} catch {
case e: Exception => throw new IllegalArgumentException("Error parsing configuration property '%s': %s".format(propName, e.getMessage))
}
}
private def validateUniquePortAndProtocol(listeners: String) {
val endpoints = try {
val listenerList = CoreUtils.parseCsvList(listeners)
listenerList.map(listener => EndPoint.createEndPoint(listener))
} catch {
case e: Exception => throw new IllegalArgumentException("Error creating broker listeners from '%s': %s".format(listeners, e.getMessage))
}
// filter port 0 for unit tests
val endpointsWithoutZeroPort = endpoints.map(ep => ep.port).filter(_ != 0)
val distinctPorts = endpointsWithoutZeroPort.distinct
val distinctProtocols = endpoints.map(ep => ep.protocolType).distinct
require(distinctPorts.size == endpointsWithoutZeroPort.size, "Each listener must have a different port")
require(distinctProtocols.size == endpoints.size, "Each listener must have a different protocol")
}
// If the user did not define listeners but did define host or port, let's use them in backward compatible way
// If none of those are defined, we default to PLAINTEXT://:9092
private def getListeners(): immutable.Map[SecurityProtocol, EndPoint] = {
if (getString(KafkaConfig.ListenersProp) != null) {
validateUniquePortAndProtocol(getString(KafkaConfig.ListenersProp))
CoreUtils.listenerListToEndPoints(getString(KafkaConfig.ListenersProp))
} else {
CoreUtils.listenerListToEndPoints("PLAINTEXT://" + hostName + ":" + port)
}
}
// If the user defined advertised listeners, we use those
// If he didn't but did define advertised host or port, we'll use those and fill in the missing value from regular host / port or defaults
// If none of these are defined, we'll use the listeners
private def getAdvertisedListeners(): immutable.Map[SecurityProtocol, EndPoint] = {
if (getString(KafkaConfig.AdvertisedListenersProp) != null) {
validateUniquePortAndProtocol(getString(KafkaConfig.AdvertisedListenersProp))
CoreUtils.listenerListToEndPoints(getString(KafkaConfig.AdvertisedListenersProp))
} else if (getString(KafkaConfig.AdvertisedHostNameProp) != null || getInt(KafkaConfig.AdvertisedPortProp) != null) {
CoreUtils.listenerListToEndPoints("PLAINTEXT://" + advertisedHostName + ":" + advertisedPort)
} else {
getListeners()
}
}
private def getMetricClasses(metricClasses: java.util.List[String]): java.util.List[MetricsReporter] = {
val reporterList = new util.ArrayList[MetricsReporter]();
val iterator = metricClasses.iterator()
while (iterator.hasNext) {
val reporterName = iterator.next()
if (!reporterName.isEmpty) {
val reporter: MetricsReporter = CoreUtils.createObject[MetricsReporter](reporterName)
reporter.configure(originals)
reporterList.add(reporter)
}
}
reporterList
}
private def getPrincipalBuilderClass(principalBuilderClass: String): PrincipalBuilder = {
CoreUtils.createObject[PrincipalBuilder](principalBuilderClass)
}
validateValues()
private def validateValues() {
require(brokerId >= -1 && brokerId <= maxReservedBrokerId, "broker.id must be equal or greater than -1 and not greater than reserved.broker.max.id")
require(logRollTimeMillis >= 1, "log.roll.ms must be equal or greater than 1")
require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be equal or greater than 0")
require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, equal or greater than 1")
require(logDirs.size > 0)
require(logCleanerDedupeBufferSize / logCleanerThreads > 1024 * 1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.")
require(replicaFetchWaitMaxMs <= replicaSocketTimeoutMs, "replica.socket.timeout.ms should always be at least replica.fetch.wait.max.ms" +
" to prevent unnecessary socket timeouts")
require(replicaFetchMaxBytes >= messageMaxBytes, "replica.fetch.max.bytes should be equal or greater than message.max.bytes")
require(replicaFetchWaitMaxMs <= replicaLagTimeMaxMs, "replica.fetch.wait.max.ms should always be at least replica.lag.time.max.ms" +
" to prevent frequent changes in ISR")
require(offsetCommitRequiredAcks >= -1 && offsetCommitRequiredAcks <= offsetsTopicReplicationFactor,
"offsets.commit.required.acks must be greater or equal -1 and less or equal to offsets.topic.replication.factor")
require(BrokerCompressionCodec.isValid(compressionType), "compression.type : " + compressionType + " is not valid." +
" Valid options are " + BrokerCompressionCodec.brokerCompressionOptions.mkString(","))
}
def channelConfigs: java.util.Map[String, Object] = {
val channelConfigs = new java.util.HashMap[String, Object]()
import kafka.server.KafkaConfig._
channelConfigs.put(PrincipalBuilderClassProp, Class.forName(principalBuilderClass))
channelConfigs.put(SSLProtocolProp, sslProtocol)
channelConfigs.put(SSLEnabledProtocolsProp, sslEnabledProtocols)
channelConfigs.put(SSLKeystoreTypeProp, sslKeystoreType)
channelConfigs.put(SSLKeystoreLocationProp, sslKeystoreLocation)
channelConfigs.put(SSLKeystorePasswordProp, sslKeystorePassword)
channelConfigs.put(SSLKeyPasswordProp, sslKeyPassword)
channelConfigs.put(SSLTruststoreTypeProp, sslTruststoreType)
channelConfigs.put(SSLTruststoreLocationProp, sslTruststoreLocation)
channelConfigs.put(SSLTruststorePasswordProp, sslTruststorePassword)
channelConfigs.put(SSLKeyManagerAlgorithmProp, sslKeyManagerAlgorithm)
channelConfigs.put(SSLTrustManagerAlgorithmProp, sslTrustManagerAlgorithm)
channelConfigs.put(SSLClientAuthProp, sslClientAuth)
channelConfigs
}
}
| mpoindexter/kafka | core/src/main/scala/kafka/server/KafkaConfig.scala | Scala | apache-2.0 | 63,618 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import org.apache.spark.annotation.{DeveloperApi, Unstable}
import org.apache.spark.deploy.k8s.KubernetesExecutorConf
/**
* :: DeveloperApi ::
*
* A base interface to help user extend custom feature step in executor side.
* Note: If your custom feature step would be used only in driver or both in driver and executor,
* please use this.
*/
@Unstable
@DeveloperApi
trait KubernetesExecutorCustomFeatureConfigStep extends KubernetesFeatureConfigStep {
/**
* Initialize the configuration for executor user feature step, this only applicable when user
* specified `spark.kubernetes.executor.pod.featureSteps` the init will be called after feature
* step loading.
*/
def init(config: KubernetesExecutorConf): Unit
}
| shaneknapp/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KubernetesExecutorCustomFeatureConfigStep.scala | Scala | apache-2.0 | 1,583 |
package org.katis.capnproto.runtime
import java.nio.ByteBuffer
import java.nio.channels.WritableByteChannel
class PackedOutputStream(output: BufferedOutputStream) extends WritableByteChannel {
val inner = output
def write(inBuf: ByteBuffer): Int = {
val length = inBuf.remaining()
var out = this.inner.writeBuffer
val slowBuffer = ByteBuffer.allocate(20)
var inPtr = inBuf.position()
val inEnd = inPtr + length
while (inPtr < inEnd) {
if (out.remaining() < 10) {
if (out == slowBuffer) {
val oldLimit = out.limit()
out.limit(out.position())
out.rewind()
this.inner.write(out)
out.limit(oldLimit)
}
out = slowBuffer
out.rewind()
}
val tagPos = out.position()
out.position(tagPos + 1)
var curByte: Byte = 0
curByte = inBuf.get(inPtr)
val bit0 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit0 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit1 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit1 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit2 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit2 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit3 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit3 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit4 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit4 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit5 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit5 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit6 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit6 - 1)
inPtr += 1
curByte = inBuf.get(inPtr)
val bit7 = if (curByte != 0) 1.toByte else 0.toByte
out.put(curByte)
out.position(out.position() + bit7 - 1)
inPtr += 1
val tag = ((bit0 << 0) | (bit1 << 1) | (bit2 << 2) | (bit3 << 3) |
(bit4 << 4) |
(bit5 << 5) |
(bit6 << 6) |
(bit7 << 7)).toByte
out.put(tagPos, tag)
if (tag == 0) {
val runStart = inPtr
var limit = inEnd
if (limit - inPtr > 255 * 8) {
limit = inPtr + 255 * 8
}
while (inPtr < limit && inBuf.getLong(inPtr) == 0) {
inPtr += 8
}
out.put(((inPtr - runStart) / 8).toByte)
} else if (tag == 0xff.toByte) {
val runStart = inPtr
var limit = inEnd
if (limit - inPtr > 255 * 8) {
limit = inPtr + 255 * 8
}
var run = true
while (run && inPtr < limit) {
var c: Byte = 0
for (ii <- 0.until(8)) {
c = (c + (if (inBuf.get(inPtr) == 0) 1 else 0).toByte).toByte
inPtr += 1
}
if (c >= 2) {
inPtr -= 8
run = false
}
}
val count = inPtr - runStart
out.put((count / 8).toByte)
if (count <= out.remaining()) {
inBuf.position(runStart)
val slice = inBuf.slice()
slice.limit(count)
out.put(slice)
} else {
if (out == slowBuffer) {
val oldLimit = out.limit()
out.limit(out.position())
out.rewind()
this.inner.write(out)
out.limit(oldLimit)
}
inBuf.position(runStart)
val slice = inBuf.slice()
slice.limit(count)
while (slice.hasRemaining) {
this.inner.write(slice)
}
out = this.inner.writeBuffer
}
}
}
if (out == slowBuffer) {
out.limit(out.position())
out.rewind()
this.inner.write(out)
}
inBuf.position(inPtr)
length
}
def close() {
this.inner.close()
}
def isOpen(): Boolean = this.inner.isOpen
}
| katis/capnp-scala | runtime/shared/src/main/scala-2.11/org/katis/capnproto/runtime/PackedOutputStream.scala | Scala | mit | 4,232 |
package shapeless.contrib.scalaz
import shapeless.Lazy
import scalaz._
sealed trait FreeInstances0 {
implicit def freeEqual[F[_], A](implicit A0: Equal[A], FF0: Lazy[Equal[F[Free[F, A]]]], F0: Functor[F]): Equal[Free[F, A]] =
new FreeEqual[F, A] {
def FF = FF0.value
def F = F0
def A = A0
}
}
trait FreeInstances extends FreeInstances0 {
implicit def freeOrder[F[_], A](implicit A0: Order[A], FF0: Lazy[Order[F[Free[F, A]]]], F0: Functor[F]): Order[Free[F, A]] =
new FreeOrder[F, A] {
def FF = FF0.value
def F = F0
def A = A0
}
implicit def freeShow[F[_], A](implicit A: Show[A], FF: Lazy[Show[F[Free[F, A]]]], F0: Functor[F]): Show[Free[F, A]] =
Show.shows{_.resume match {
case \/-(a) => "Return(" + A.shows(a) + ")"
case -\/(a) => "Suspend(" + FF.value.shows(a) + ")"
}}
}
private sealed trait FreeEqual[F[_], A] extends Equal[Free[F, A]] {
def FF: Equal[F[Free[F, A]]]
def A: Equal[A]
implicit def F: Functor[F]
override final def equal(a: Free[F, A], b: Free[F, A]) =
(a.resume, b.resume) match {
case (-\/(a), -\/(b)) => FF.equal(a, b)
case (\/-(a), \/-(b)) => A.equal(a, b)
case _ => false
}
}
private sealed trait FreeOrder[F[_], A] extends Order[Free[F, A]] with FreeEqual[F, A] {
def FF: Order[F[Free[F, A]]]
def A: Order[A]
implicit def F: Functor[F]
def order(a: Free[F, A], b: Free[F, A]) =
(a.resume, b.resume) match {
case (-\/(a), -\/(b)) => FF.order(a, b)
case (\/-(a), \/-(b)) => A.order(a, b)
case (\/-(_), -\/(_)) => Ordering.GT
case (-\/(_), \/-(_)) => Ordering.LT
}
}
| milessabin/shapeless-contrib | scalaz/src/main/scala/free.scala | Scala | mit | 1,654 |
package paperdoll.std
import paperdoll.core.effect.GenericBind
import paperdoll.core.effect.GenericHandler
import paperdoll.core.effect.Effects
import paperdoll.core.effect.Arr
import paperdoll.core.effect.Pure
import paperdoll.core.layer.Layers
import shapeless.Coproduct
object OptionLayer {
/** Options are handled by: if Some, run the continuation, otherwise
* return Pure(None)
*/
def handleOption: GenericHandler.Aux[Option_, Option] =
new GenericBind[Option_] {
override type O[X] = Option[X]
override def pure[A](a: A) = Some(a)
override def bind[V, RR <: Coproduct, RL <: Layers[RR], A](eff: Option[V], cont: Arr[RR, RL, V, Option[A]]) =
eff.fold[Effects[RR, RL, Option[A]]](Pure(None))(cont)
}
} | m50d/paperdoll | std/src/main/scala/paperdoll/std/OptionLayer.scala | Scala | apache-2.0 | 773 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.servicemanager.ambari.rest
import scala.concurrent.Future
import com.ning.http.client.{RequestBuilder, Request}
import dispatch.{Future => _, _}, Defaults._
import net.liftweb.json._
import net.liftweb.json.JsonAST.JString
import net.liftweb.json.JsonDSL._
import es.tid.cosmos.servicemanager.ServiceError
/**
* Wraps Ambari's host-related REST API calls.
*
* @param hostInfo the Ambari JSON response that describes the host
* @param clusterBaseUrl the base url of the cluster
*/
private[ambari] class Host private[ambari](hostInfo: JValue, clusterBaseUrl: Request) extends RequestProcessor {
val name = hostInfo \\ "Hosts" \\ "public_host_name" match {
case JString(hostName) => hostName
case _ =>
throw new ServiceError("Ambari's host information response doesn't contain a " +
s"""Hosts/public_host_name element. Context data:
| Request: ${clusterBaseUrl.toString}
| Response: ${pretty(render(hostInfo))}""".stripMargin)
}
/**
* Given a sequence of component names, add each component to the host. The services of each of
* the components must have been added to the cluster previously.
*/
def addComponents(componentNames: Seq[String]): Future[Unit] = {
def getJsonForComponent(componentName: String) =
"HostRoles" -> ("component_name" -> componentName)
def ignoreResult(result: JValue) {}
if (!componentNames.isEmpty)
performRequest(new RequestBuilder(clusterBaseUrl) / "hosts"
<<? Map("Hosts/host_name" -> name)
<< compact(render("host_components" -> componentNames.map(getJsonForComponent))))
.map(ignoreResult)
else Future.successful()
}
def getComponentNames: Seq[String] = as.FlatValues(hostInfo, "host_components", "component_name")
}
| telefonicaid/fiware-cosmos-platform | ambari-service-manager/src/main/scala/es/tid/cosmos/servicemanager/ambari/rest/Host.scala | Scala | apache-2.0 | 2,439 |
package com.sksamuel.scapegoat.inspections.naming
import scala.reflect.internal.Flags
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
/**
* @author
* Stephen Samuel
*/
class MethodNames
extends Inspection(
text = "Method name not recommended",
defaultLevel = Levels.Info,
description = "Warns on method names that don't adhere to the Scala style guidelines.",
explanation =
"Methods should be in camelCase style with the first letter lower-case. See http://docs.scala-lang.org/style/naming-conventions.html#methods."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser: context.Traverser =
new context.Traverser {
import context.global._
private val regex = "^([a-z][A-Za-z0-9]*(_\\\\$eq)?)|(unary_\\\\$(minus|plus|bang|tilde))$"
override def inspect(tree: Tree): Unit = {
tree match {
case dd: DefDef if dd.symbol != null && dd.symbol.isSynthetic =>
case DefDef(mods, _, _, _, _, _) if mods.hasFlag(Flags.ACCESSOR) =>
case DefDef(_, nme.CONSTRUCTOR, _, _, _, _) =>
case DefDef(_, _, _, _, _, _) if tree.symbol != null && tree.symbol.isConstructor =>
case DefDef(_, name, _, _, _, _) if !name.decode.exists(_.isLetter) =>
case DefDef(_, name, _, _, _, _) if !name.toString.matches(regex) =>
context.warn(tree.pos, self)
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scapegoat | src/main/scala/com/sksamuel/scapegoat/inspections/naming/MethodNames.scala | Scala | apache-2.0 | 1,699 |
package controllers.resource
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import utils.resource.LinkGenerator
import utils.resource.Jsonable
class Team(
teamEntity: models.abstracts.Team
) extends Jsonable {
lazy val genuineLinkToMatches: String = LinkGenerator.linkToMatchesWithTeamId(teamEntity.id.toString)
lazy val toJson: JsValue = {
val matchesLink = new Link(MatchOverview.multipleResourceName, genuineLinkToMatches);
Json.toJson(
Map(
Team.nameName -> Json.toJson(teamEntity.name),
Link.resourceName -> matchesLink.toJson
)
)
}
}
object Team {
val resourceName: String = "team"
val nameName: String = "name"
} | ishakir/cric-stat | app/controllers/resource/Team.scala | Scala | epl-1.0 | 726 |
/**
* Copyright 2015, 2016 Gianluca Amato <gianluca.amato@unich.it>
*
* This file is part of ScalaFix.
* ScalaFix is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ScalaFix is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of a
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ScalaFix. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.scalafix.finite
import it.unich.scalafix.FixpointSolverTracer
import it.unich.scalafix.assignments.{IOAssignment, InputAssignment}
/**
* A fixpoint solver based on a worklist.
*/
object WorkListSolver {
/**
* Solve a finite equation system.
*
* @tparam U type of the unknowns for the equation system
* @tparam V type of values of the equatiom system
* @param eqs equation system to solve
* @param start a assignment to start the evaluation (defaults to `eqs.initial`)
* @param tracer a tracer to track the behaviour of the solver (defaults to the empty tracer)
* @return the solution of the equation system
*/
def apply[U, V](eqs: FiniteEquationSystem[U, V])
(
start: InputAssignment[U, V] = eqs.initial,
tracer: FixpointSolverTracer[U, V] = FixpointSolverTracer.empty[U, V]
): IOAssignment[U, V] = {
val current = start.toIOAssignment
tracer.initialized(current)
// is it better to use a Queue for a worklist ?
val workList = collection.mutable.LinkedHashSet.empty[U]
workList ++= eqs.unknowns
while (workList.nonEmpty) {
val x = workList.head
workList.remove(x)
val newval = eqs.body(current)(x)
tracer.evaluated(current, x, newval)
if (newval != current(x)) {
current(x) = newval
// variant with Queue
// for (y <- eqs.infl(x); if !(workList contains y)) workList += y
workList ++= eqs.infl(x)
}
}
tracer.completed(current)
current
}
}
| jandom-devel/ScalaFix | core/src/main/scala/it/unich/scalafix/finite/WorkListSolver.scala | Scala | gpl-3.0 | 2,347 |
//#imports
import com.twitter.finagle.{Httpx, Service}
import com.twitter.finagle.httpx
import com.twitter.util.{Await, Future}
//#imports
object Server extends App {
//#service
val service = new Service[httpx.Request, httpx.Response] {
def apply(req: httpx.Request): Future[httpx.Response] =
Future.value(
httpx.Response(req.version, httpx.Status.Ok)
)
}
//#service
//#builder
val server = Httpx.serve(":8080", service)
Await.ready(server)
//#builder
}
| travisbrown/finagle | doc/src/sphinx/code/quickstart/Server.scala | Scala | apache-2.0 | 487 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import java.io._
import java.nio.ByteBuffer
import java.util.Locale
import javax.annotation.Nullable
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import com.esotericsoftware.kryo.{Kryo, KryoException, Serializer => KryoClassSerializer}
import com.esotericsoftware.kryo.io.{Input => KryoInput, Output => KryoOutput}
import com.esotericsoftware.kryo.io.{UnsafeInput => KryoUnsafeInput, UnsafeOutput => KryoUnsafeOutput}
import com.esotericsoftware.kryo.pool.{KryoCallback, KryoFactory, KryoPool}
import com.esotericsoftware.kryo.serializers.{JavaSerializer => KryoJavaSerializer}
import com.twitter.chill.{AllScalaRegistrar, EmptyScalaKryoInstantiator}
import org.apache.avro.generic.{GenericData, GenericRecord}
import org.roaringbitmap.RoaringBitmap
import org.apache.spark._
import org.apache.spark.api.python.PythonBroadcast
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Kryo._
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.{CompressedMapStatus, HighlyCompressedMapStatus}
import org.apache.spark.storage._
import org.apache.spark.util.{BoundedPriorityQueue, ByteBufferInputStream, SerializableConfiguration, SerializableJobConf, Utils}
import org.apache.spark.util.collection.CompactBuffer
/**
* A Spark serializer that uses the <a href="https://code.google.com/p/kryo/">
* Kryo serialization library</a>.
*
* @note This serializer is not guaranteed to be wire-compatible across different versions of
* Spark. It is intended to be used to serialize/de-serialize data within a single
* Spark application.
*/
class KryoSerializer(conf: SparkConf)
extends org.apache.spark.serializer.Serializer
with Logging
with Serializable {
private val bufferSizeKb = conf.get(KRYO_SERIALIZER_BUFFER_SIZE)
if (bufferSizeKb >= ByteUnit.GiB.toKiB(2)) {
throw new IllegalArgumentException(s"${KRYO_SERIALIZER_BUFFER_SIZE.key} must be less than " +
s"2048 MiB, got: + ${ByteUnit.KiB.toMiB(bufferSizeKb)} MiB.")
}
private val bufferSize = ByteUnit.KiB.toBytes(bufferSizeKb).toInt
val maxBufferSizeMb = conf.get(KRYO_SERIALIZER_MAX_BUFFER_SIZE).toInt
if (maxBufferSizeMb >= ByteUnit.GiB.toMiB(2)) {
throw new IllegalArgumentException(s"${KRYO_SERIALIZER_MAX_BUFFER_SIZE.key} must be less " +
s"than 2048 MiB, got: $maxBufferSizeMb MiB.")
}
private val maxBufferSize = ByteUnit.MiB.toBytes(maxBufferSizeMb).toInt
private val referenceTracking = conf.get(KRYO_REFERENCE_TRACKING)
private val registrationRequired = conf.get(KRYO_REGISTRATION_REQUIRED)
private val userRegistrators = conf.get(KRYO_USER_REGISTRATORS)
.map(_.trim)
.filter(!_.isEmpty)
private val classesToRegister = conf.get(KRYO_CLASSES_TO_REGISTER)
.map(_.trim)
.filter(!_.isEmpty)
private val avroSchemas = conf.getAvroSchema
// whether to use unsafe based IO for serialization
private val useUnsafe = conf.get(KRYO_USE_UNSAFE)
private val usePool = conf.get(KRYO_USE_POOL)
def newKryoOutput(): KryoOutput =
if (useUnsafe) {
new KryoUnsafeOutput(bufferSize, math.max(bufferSize, maxBufferSize))
} else {
new KryoOutput(bufferSize, math.max(bufferSize, maxBufferSize))
}
@transient
private lazy val factory: KryoFactory = new KryoFactory() {
override def create: Kryo = {
newKryo()
}
}
private class PoolWrapper extends KryoPool {
private var pool: KryoPool = getPool
override def borrow(): Kryo = pool.borrow()
override def release(kryo: Kryo): Unit = pool.release(kryo)
override def run[T](kryoCallback: KryoCallback[T]): T = pool.run(kryoCallback)
def reset(): Unit = {
pool = getPool
}
private def getPool: KryoPool = {
new KryoPool.Builder(factory).softReferences.build
}
}
@transient
private lazy val internalPool = new PoolWrapper
def pool: KryoPool = internalPool
def newKryo(): Kryo = {
val instantiator = new EmptyScalaKryoInstantiator
val kryo = instantiator.newKryo()
kryo.setRegistrationRequired(registrationRequired)
val oldClassLoader = Thread.currentThread.getContextClassLoader
val classLoader = defaultClassLoader.getOrElse(Thread.currentThread.getContextClassLoader)
// Allow disabling Kryo reference tracking if user knows their object graphs don't have loops.
// Do this before we invoke the user registrator so the user registrator can override this.
kryo.setReferences(referenceTracking)
for (cls <- KryoSerializer.toRegister) {
kryo.register(cls)
}
for ((cls, ser) <- KryoSerializer.toRegisterSerializer) {
kryo.register(cls, ser)
}
// For results returned by asJavaIterable. See JavaIterableWrapperSerializer.
kryo.register(JavaIterableWrapperSerializer.wrapperClass, new JavaIterableWrapperSerializer)
// Allow sending classes with custom Java serializers
kryo.register(classOf[SerializableWritable[_]], new KryoJavaSerializer())
kryo.register(classOf[SerializableConfiguration], new KryoJavaSerializer())
kryo.register(classOf[SerializableJobConf], new KryoJavaSerializer())
kryo.register(classOf[PythonBroadcast], new KryoJavaSerializer())
kryo.register(classOf[GenericRecord], new GenericAvroSerializer(avroSchemas))
kryo.register(classOf[GenericData.Record], new GenericAvroSerializer(avroSchemas))
try {
// scalastyle:off classforname
// Use the default classloader when calling the user registrator.
Thread.currentThread.setContextClassLoader(classLoader)
// Register classes given through spark.kryo.classesToRegister.
classesToRegister
.foreach { className => kryo.register(Class.forName(className, true, classLoader)) }
// Allow the user to register their own classes by setting spark.kryo.registrator.
userRegistrators
.map(Class.forName(_, true, classLoader).getConstructor().
newInstance().asInstanceOf[KryoRegistrator])
.foreach { reg => reg.registerClasses(kryo) }
// scalastyle:on classforname
} catch {
case e: Exception =>
throw new SparkException(s"Failed to register classes with Kryo", e)
} finally {
Thread.currentThread.setContextClassLoader(oldClassLoader)
}
// Register Chill's classes; we do this after our ranges and the user's own classes to let
// our code override the generic serializers in Chill for things like Seq
new AllScalaRegistrar().apply(kryo)
// Register types missed by Chill.
// scalastyle:off
kryo.register(classOf[Array[Tuple1[Any]]])
kryo.register(classOf[Array[Tuple2[Any, Any]]])
kryo.register(classOf[Array[Tuple3[Any, Any, Any]]])
kryo.register(classOf[Array[Tuple4[Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple5[Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple6[Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple7[Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple8[Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple9[Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple11[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple12[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple13[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple14[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple15[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple16[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple17[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple18[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple19[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple20[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple21[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
kryo.register(classOf[Array[Tuple22[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]]])
// scalastyle:on
kryo.register(None.getClass)
kryo.register(Nil.getClass)
kryo.register(Utils.classForName("scala.collection.immutable.$colon$colon"))
kryo.register(Utils.classForName("scala.collection.immutable.Map$EmptyMap$"))
kryo.register(classOf[ArrayBuffer[Any]])
// We can't load those class directly in order to avoid unnecessary jar dependencies.
// We load them safely, ignore it if the class not found.
Seq(
"org.apache.spark.ml.attribute.Attribute",
"org.apache.spark.ml.attribute.AttributeGroup",
"org.apache.spark.ml.attribute.BinaryAttribute",
"org.apache.spark.ml.attribute.NominalAttribute",
"org.apache.spark.ml.attribute.NumericAttribute",
"org.apache.spark.ml.feature.Instance",
"org.apache.spark.ml.feature.LabeledPoint",
"org.apache.spark.ml.feature.OffsetInstance",
"org.apache.spark.ml.linalg.DenseMatrix",
"org.apache.spark.ml.linalg.DenseVector",
"org.apache.spark.ml.linalg.Matrix",
"org.apache.spark.ml.linalg.SparseMatrix",
"org.apache.spark.ml.linalg.SparseVector",
"org.apache.spark.ml.linalg.Vector",
"org.apache.spark.ml.stat.distribution.MultivariateGaussian",
"org.apache.spark.ml.tree.impl.TreePoint",
"org.apache.spark.mllib.clustering.VectorWithNorm",
"org.apache.spark.mllib.linalg.DenseMatrix",
"org.apache.spark.mllib.linalg.DenseVector",
"org.apache.spark.mllib.linalg.Matrix",
"org.apache.spark.mllib.linalg.SparseMatrix",
"org.apache.spark.mllib.linalg.SparseVector",
"org.apache.spark.mllib.linalg.Vector",
"org.apache.spark.mllib.regression.LabeledPoint",
"org.apache.spark.mllib.stat.distribution.MultivariateGaussian"
).foreach { name =>
try {
val clazz = Utils.classForName(name)
kryo.register(clazz)
} catch {
case NonFatal(_) => // do nothing
case _: NoClassDefFoundError if Utils.isTesting => // See SPARK-23422.
}
}
kryo.setClassLoader(classLoader)
kryo
}
override def setDefaultClassLoader(classLoader: ClassLoader): Serializer = {
super.setDefaultClassLoader(classLoader)
internalPool.reset()
this
}
override def newInstance(): SerializerInstance = {
new KryoSerializerInstance(this, useUnsafe, usePool)
}
private[spark] override lazy val supportsRelocationOfSerializedObjects: Boolean = {
// If auto-reset is disabled, then Kryo may store references to duplicate occurrences of objects
// in the stream rather than writing those objects' serialized bytes, breaking relocation. See
// https://groups.google.com/d/msg/kryo-users/6ZUSyfjjtdo/FhGG1KHDXPgJ for more details.
newInstance().asInstanceOf[KryoSerializerInstance].getAutoReset()
}
}
private[spark]
class KryoSerializationStream(
serInstance: KryoSerializerInstance,
outStream: OutputStream,
useUnsafe: Boolean) extends SerializationStream {
private[this] var output: KryoOutput =
if (useUnsafe) new KryoUnsafeOutput(outStream) else new KryoOutput(outStream)
private[this] var kryo: Kryo = serInstance.borrowKryo()
override def writeObject[T: ClassTag](t: T): SerializationStream = {
kryo.writeClassAndObject(output, t)
this
}
override def flush() {
if (output == null) {
throw new IOException("Stream is closed")
}
output.flush()
}
override def close() {
if (output != null) {
try {
output.close()
} finally {
serInstance.releaseKryo(kryo)
kryo = null
output = null
}
}
}
}
private[spark]
class KryoDeserializationStream(
serInstance: KryoSerializerInstance,
inStream: InputStream,
useUnsafe: Boolean) extends DeserializationStream {
private[this] var input: KryoInput =
if (useUnsafe) new KryoUnsafeInput(inStream) else new KryoInput(inStream)
private[this] var kryo: Kryo = serInstance.borrowKryo()
override def readObject[T: ClassTag](): T = {
try {
kryo.readClassAndObject(input).asInstanceOf[T]
} catch {
// DeserializationStream uses the EOF exception to indicate stopping condition.
case e: KryoException
if e.getMessage.toLowerCase(Locale.ROOT).contains("buffer underflow") =>
throw new EOFException
}
}
override def close() {
if (input != null) {
try {
// Kryo's Input automatically closes the input stream it is using.
input.close()
} finally {
serInstance.releaseKryo(kryo)
kryo = null
input = null
}
}
}
}
private[spark] class KryoSerializerInstance(
ks: KryoSerializer, useUnsafe: Boolean, usePool: Boolean)
extends SerializerInstance {
/**
* A re-used [[Kryo]] instance. Methods will borrow this instance by calling `borrowKryo()`, do
* their work, then release the instance by calling `releaseKryo()`. Logically, this is a caching
* pool of size one. SerializerInstances are not thread-safe, hence accesses to this field are
* not synchronized.
*/
@Nullable private[this] var cachedKryo: Kryo = if (usePool) null else borrowKryo()
/**
* Borrows a [[Kryo]] instance. If possible, this tries to re-use a cached Kryo instance;
* otherwise, it allocates a new instance.
*/
private[serializer] def borrowKryo(): Kryo = {
if (usePool) {
val kryo = ks.pool.borrow()
kryo.reset()
kryo
} else {
if (cachedKryo != null) {
val kryo = cachedKryo
// As a defensive measure, call reset() to clear any Kryo state that might have
// been modified by the last operation to borrow this instance
// (see SPARK-7766 for discussion of this issue)
kryo.reset()
cachedKryo = null
kryo
} else {
ks.newKryo()
}
}
}
/**
* Release a borrowed [[Kryo]] instance. If this serializer instance already has a cached Kryo
* instance, then the given Kryo instance is discarded; otherwise, the Kryo is stored for later
* re-use.
*/
private[serializer] def releaseKryo(kryo: Kryo): Unit = {
if (usePool) {
ks.pool.release(kryo)
} else {
if (cachedKryo == null) {
cachedKryo = kryo
}
}
}
// Make these lazy vals to avoid creating a buffer unless we use them.
private lazy val output = ks.newKryoOutput()
private lazy val input = if (useUnsafe) new KryoUnsafeInput() else new KryoInput()
override def serialize[T: ClassTag](t: T): ByteBuffer = {
output.clear()
val kryo = borrowKryo()
try {
kryo.writeClassAndObject(output, t)
} catch {
case e: KryoException if e.getMessage.startsWith("Buffer overflow") =>
throw new SparkException(s"Kryo serialization failed: ${e.getMessage}. To avoid this, " +
s"increase ${KRYO_SERIALIZER_MAX_BUFFER_SIZE.key} value.", e)
} finally {
releaseKryo(kryo)
}
ByteBuffer.wrap(output.toBytes)
}
override def deserialize[T: ClassTag](bytes: ByteBuffer): T = {
val kryo = borrowKryo()
try {
if (bytes.hasArray) {
input.setBuffer(bytes.array(), bytes.arrayOffset() + bytes.position(), bytes.remaining())
} else {
input.setBuffer(new Array[Byte](4096))
input.setInputStream(new ByteBufferInputStream(bytes))
}
kryo.readClassAndObject(input).asInstanceOf[T]
} finally {
releaseKryo(kryo)
}
}
override def deserialize[T: ClassTag](bytes: ByteBuffer, loader: ClassLoader): T = {
val kryo = borrowKryo()
val oldClassLoader = kryo.getClassLoader
try {
kryo.setClassLoader(loader)
if (bytes.hasArray) {
input.setBuffer(bytes.array(), bytes.arrayOffset() + bytes.position(), bytes.remaining())
} else {
input.setBuffer(new Array[Byte](4096))
input.setInputStream(new ByteBufferInputStream(bytes))
}
kryo.readClassAndObject(input).asInstanceOf[T]
} finally {
kryo.setClassLoader(oldClassLoader)
releaseKryo(kryo)
}
}
override def serializeStream(s: OutputStream): SerializationStream = {
new KryoSerializationStream(this, s, useUnsafe)
}
override def deserializeStream(s: InputStream): DeserializationStream = {
new KryoDeserializationStream(this, s, useUnsafe)
}
/**
* Returns true if auto-reset is on. The only reason this would be false is if the user-supplied
* registrator explicitly turns auto-reset off.
*/
def getAutoReset(): Boolean = {
val field = classOf[Kryo].getDeclaredField("autoReset")
field.setAccessible(true)
val kryo = borrowKryo()
try {
field.get(kryo).asInstanceOf[Boolean]
} finally {
releaseKryo(kryo)
}
}
}
/**
* Interface implemented by clients to register their classes with Kryo when using Kryo
* serialization.
*/
trait KryoRegistrator {
def registerClasses(kryo: Kryo): Unit
}
private[serializer] object KryoSerializer {
// Commonly used classes.
private val toRegister: Seq[Class[_]] = Seq(
ByteBuffer.allocate(1).getClass,
classOf[StorageLevel],
classOf[CompressedMapStatus],
classOf[HighlyCompressedMapStatus],
classOf[CompactBuffer[_]],
classOf[BlockManagerId],
classOf[Array[Boolean]],
classOf[Array[Byte]],
classOf[Array[Short]],
classOf[Array[Int]],
classOf[Array[Long]],
classOf[Array[Float]],
classOf[Array[Double]],
classOf[Array[Char]],
classOf[Array[String]],
classOf[Array[Array[String]]],
classOf[BoundedPriorityQueue[_]],
classOf[SparkConf]
)
private val toRegisterSerializer = Map[Class[_], KryoClassSerializer[_]](
classOf[RoaringBitmap] -> new KryoClassSerializer[RoaringBitmap]() {
override def write(kryo: Kryo, output: KryoOutput, bitmap: RoaringBitmap): Unit = {
bitmap.serialize(new KryoOutputObjectOutputBridge(kryo, output))
}
override def read(kryo: Kryo, input: KryoInput, cls: Class[RoaringBitmap]): RoaringBitmap = {
val ret = new RoaringBitmap
ret.deserialize(new KryoInputObjectInputBridge(kryo, input))
ret
}
}
)
}
/**
* This is a bridge class to wrap KryoInput as an InputStream and ObjectInput. It forwards all
* methods of InputStream and ObjectInput to KryoInput. It's usually helpful when an API expects
* an InputStream or ObjectInput but you want to use Kryo.
*/
private[spark] class KryoInputObjectInputBridge(
kryo: Kryo, input: KryoInput) extends FilterInputStream(input) with ObjectInput {
override def readLong(): Long = input.readLong()
override def readChar(): Char = input.readChar()
override def readFloat(): Float = input.readFloat()
override def readByte(): Byte = input.readByte()
override def readShort(): Short = input.readShort()
override def readUTF(): String = input.readString() // readString in kryo does utf8
override def readInt(): Int = input.readInt()
override def readUnsignedShort(): Int = input.readShortUnsigned()
override def skipBytes(n: Int): Int = {
input.skip(n)
n
}
override def readFully(b: Array[Byte]): Unit = input.read(b)
override def readFully(b: Array[Byte], off: Int, len: Int): Unit = input.read(b, off, len)
override def readLine(): String = throw new UnsupportedOperationException("readLine")
override def readBoolean(): Boolean = input.readBoolean()
override def readUnsignedByte(): Int = input.readByteUnsigned()
override def readDouble(): Double = input.readDouble()
override def readObject(): AnyRef = kryo.readClassAndObject(input)
}
/**
* This is a bridge class to wrap KryoOutput as an OutputStream and ObjectOutput. It forwards all
* methods of OutputStream and ObjectOutput to KryoOutput. It's usually helpful when an API expects
* an OutputStream or ObjectOutput but you want to use Kryo.
*/
private[spark] class KryoOutputObjectOutputBridge(
kryo: Kryo, output: KryoOutput) extends FilterOutputStream(output) with ObjectOutput {
override def writeFloat(v: Float): Unit = output.writeFloat(v)
// There is no "readChars" counterpart, except maybe "readLine", which is not supported
override def writeChars(s: String): Unit = throw new UnsupportedOperationException("writeChars")
override def writeDouble(v: Double): Unit = output.writeDouble(v)
override def writeUTF(s: String): Unit = output.writeString(s) // writeString in kryo does UTF8
override def writeShort(v: Int): Unit = output.writeShort(v)
override def writeInt(v: Int): Unit = output.writeInt(v)
override def writeBoolean(v: Boolean): Unit = output.writeBoolean(v)
override def write(b: Int): Unit = output.write(b)
override def write(b: Array[Byte]): Unit = output.write(b)
override def write(b: Array[Byte], off: Int, len: Int): Unit = output.write(b, off, len)
override def writeBytes(s: String): Unit = output.writeString(s)
override def writeChar(v: Int): Unit = output.writeChar(v.toChar)
override def writeLong(v: Long): Unit = output.writeLong(v)
override def writeByte(v: Int): Unit = output.writeByte(v)
override def writeObject(obj: AnyRef): Unit = kryo.writeClassAndObject(output, obj)
}
/**
* A Kryo serializer for serializing results returned by asJavaIterable.
*
* The underlying object is scala.collection.convert.Wrappers$IterableWrapper.
* Kryo deserializes this into an AbstractCollection, which unfortunately doesn't work.
*/
private class JavaIterableWrapperSerializer
extends com.esotericsoftware.kryo.Serializer[java.lang.Iterable[_]] {
import JavaIterableWrapperSerializer._
override def write(kryo: Kryo, out: KryoOutput, obj: java.lang.Iterable[_]): Unit = {
// If the object is the wrapper, simply serialize the underlying Scala Iterable object.
// Otherwise, serialize the object itself.
if (obj.getClass == wrapperClass && underlyingMethodOpt.isDefined) {
kryo.writeClassAndObject(out, underlyingMethodOpt.get.invoke(obj))
} else {
kryo.writeClassAndObject(out, obj)
}
}
override def read(kryo: Kryo, in: KryoInput, clz: Class[java.lang.Iterable[_]])
: java.lang.Iterable[_] = {
kryo.readClassAndObject(in) match {
case scalaIterable: Iterable[_] => scalaIterable.asJava
case javaIterable: java.lang.Iterable[_] => javaIterable
}
}
}
private object JavaIterableWrapperSerializer extends Logging {
// The class returned by JavaConverters.asJava
// (scala.collection.convert.Wrappers$IterableWrapper).
import scala.collection.JavaConverters._
val wrapperClass = Seq(1).asJava.getClass
// Get the underlying method so we can use it to get the Scala collection for serialization.
private val underlyingMethodOpt = {
try Some(wrapperClass.getDeclaredMethod("underlying")) catch {
case e: Exception =>
logError("Failed to find the underlying field in " + wrapperClass, e)
None
}
}
}
| WindCanDie/spark | core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala | Scala | apache-2.0 | 24,533 |
package sims.test.gjk
import processing.core.PApplet
import processing.core.PConstants._
class GJKTest extends PApplet {
implicit val top = this
import sims.dynamics._
import sims.math._
import sims.test.gui._
import sims.test.gui.RichShape._
var s1: GraphicalShape = _
var s2: GraphicalShape = _
override def setup() = {
size(600, 600, P2D)
background(255,255,255)
frameRate(60)
s1 = (new Rectangle(1, 3) {position = Vector2D(5,5)}).toGraphical
s2 = (new Rectangle(1, 2) {position = Vector2D(8,8); rotation = 0.2}).toGraphical
}
val PPM = 39.37f * 96
var viewScale: Float = 1.0f / 80
val GJK = new GJK2[Shape]
var invert = false
def pair = if (!invert) (s1, s2) else (s2, s1)
override def draw() = {
smooth()
background(255,255,255)
translate(0, height)
scale(viewScale * PPM, -viewScale * PPM)
if (keyCode == 32) invert = true
else invert = false
val collision = GJK.collision(pair._1.shape, pair._2.shape)
/*if (collision != None) {
pushMatrix()
rectMode(CORNER)
stroke(255, 0, 50)
strokeWeight(10)
fill(0, 0, 0, 0)
rect(0, 0, 600, 600)
strokeWeight(1)
popMatrix()
}*/
//val separation = GJK.collision(pair._1.shape, pair._2.shape)
//if (!separation.isEmpty)
//List(separation.get.point1, separation.get.point2) foreach (p => ellipse(p.x.toFloat, p.y.toFloat, 0.1f, 0.1f))
label()
s2.shape.position = Vector2D(mouseX / viewScale / PPM, -(mouseY - height) / viewScale / PPM)
s1.render()
s2.render()
collision match {
case Some(c) => {
stroke(0, 255, 0)
for (p <- c.points) {
ellipse(p.x.toFloat, p.y.toFloat, 0.1f, 0.1f)
val s = p
val e = p + c.normal
line(s.x.toFloat, s.y.toFloat, e.x.toFloat, e.y.toFloat)
println(c.overlap)
}
}
case _ => ()
}
/*stroke(255, 0, 255)
val f = FeatureManifold.farthestFeature(pair._1.shape, Vector2D.j + Vector2D.i)
f match {
case Left(p) => ellipse(p.x.toFloat, p.y.toFloat, 0.1f, 0.1f)
case Right(s) => line(s.point1.x.toFloat, s.point1.y.toFloat, s.point2.x.toFloat, s.point2.y.toFloat)
}*/
}
private val fontSize = 16
private val f = createFont("Monospaced.plain", fontSize)
private def label() = {
val size = 16
fill(0, 0, 0)
textMode(SCREEN)
textFont(f)
val p1 = pair._1.shape
val p2 = pair._2.shape
text("1", (p1.position.x * PPM * viewScale).toFloat, (height - p1.position.y * PPM * viewScale).toFloat)
text("2", (p2.position.x * PPM * viewScale).toFloat, (height - p2.position.y * PPM * viewScale).toFloat)
}
}
object GJKTest {
def main(args: Array[String]): Unit = {
PApplet.main(args ++ Array("sims.test.gjk.GJKTest"))
}
} | jodersky/sims2 | src/test/scala/sims/test/gjk/GJKTest.scala | Scala | bsd-3-clause | 2,714 |
package effectful.examples.effects.sql
import org.apache.commons.io.IOUtils
sealed trait CharData {
//todo: can't use Reader -- refactor this to work through EffectIterator w/SqlDriver.readStreamChunk
def toCharStream() : java.io.Reader
def toCharString() : String
}
object CharData {
def apply(reader: java.io.Reader) : IsReader = IsReader(reader)
def apply(data: String) : IsString = IsString(data)
case class IsReader(reader: java.io.Reader) extends CharData {
override def toCharStream() = reader
override def toCharString() = IOUtils.toString(reader)
}
case class IsString(data: String) extends CharData {
override def toCharStream() = new java.io.StringReader(data)
override def toCharString() = data
}
}
sealed trait BinData {
//todo: can't use InputStream -- refactor this to work through EffectIterator w/SqlDriver.readStreamChunk
def toBinStream() : java.io.InputStream
def toByteArray() : Array[Byte]
}
object BinData {
def apply(bin: java.io.InputStream) : IsBinStream = IsBinStream(bin)
def apply(data: Array[Byte]) : IsByteArray = IsByteArray(data)
case class IsBinStream(bin: java.io.InputStream) extends BinData {
override def toBinStream() = bin
override def toByteArray() = IOUtils.toByteArray(bin)
}
case class IsByteArray(data: Array[Byte]) extends BinData {
override def toBinStream() = new java.io.ByteArrayInputStream(data)
override def toByteArray() = data
}
}
sealed trait SqlVal {
def sqlType: SqlType
}
object SqlVal {
// Mappings based on: https://docs.oracle.com/javase/1.5.0/docs/guide/jdbc/getstart/mapping.html
// Note: sqlType is required by JDBC PreparedStatement.setNull
case class NULL(sqlType: SqlType) extends SqlVal
case class CHAR(
fixedLength: Long,
data: CharData
) extends SqlVal {
def sqlType = SqlType.CHAR(fixedLength)
}
case class NCHAR(
fixedLength: Long,
data: CharData
) extends SqlVal {
def sqlType = SqlType.NCHAR(fixedLength)
}
case class VARCHAR(
maxLength: Long,
data: CharData
) extends SqlVal {
def sqlType = SqlType.VARCHAR(maxLength)
}
case class NVARCHAR(
maxLength: Long,
data: CharData
) extends SqlVal {
def sqlType = SqlType.NVARCHAR(maxLength)
}
case class CLOB(data: CharData) extends SqlVal {
def sqlType = SqlType.CLOB
}
case class NCLOB(data: CharData) extends SqlVal {
def sqlType = SqlType.NCLOB
}
case class BINARY(
fixedSize: Long,
data: BinData
) extends SqlVal {
def sqlType = SqlType.BINARY(fixedSize)
}
case class VARBINARY(
maxSize: Long,
data: BinData
) extends SqlVal {
def sqlType = SqlType.VARBINARY(maxSize)
}
case class BLOB(data: BinData) extends SqlVal {
def sqlType = SqlType.BLOB
}
case class BOOLEAN(value: Boolean) extends SqlVal {
def sqlType = SqlType.BOOLEAN
}
case class BIT(value: Boolean) extends SqlVal {
def sqlType = SqlType.BIT
}
case class TINYINT(value: Short) extends SqlVal { // +/- 0-255
def sqlType = SqlType.TINYINT
}
case class SMALLINT(value: Short) extends SqlVal {
def sqlType = SqlType.SMALLINT
}
case class INTEGER(value: Int) extends SqlVal {
def sqlType = SqlType.INTEGER
}
case class BIGINT(value: Long) extends SqlVal {
def sqlType = SqlType.BIGINT
}
case class REAL(value: Float) extends SqlVal {
def sqlType = SqlType.REAL
}
case class DOUBLE(value: Double) extends SqlVal {
def sqlType = SqlType.DOUBLE
}
case class NUMERIC(
value: BigDecimal,
precision: Int = 0,
scale: Int = 0
) extends SqlVal {
def sqlType = SqlType.NUMERIC(precision,scale)
}
case class DECIMAL(
value: BigDecimal,
precision: Int = 0,
scale: Int = 0
) extends SqlVal {
def sqlType = SqlType.DECIMAL(precision,scale)
}
case class DATE(date: java.time.LocalDate) extends SqlVal {
def sqlType = SqlType.DATE
}
case class TIME(time: java.time.LocalTime) extends SqlVal {
def sqlType = SqlType.TIME
}
case class TIMESTAMP(timestamp: java.time.Instant) extends SqlVal {
def sqlType = SqlType.TIMESTAMP
}
/*
case NULL(_ =>
case CHAR(_,data) =>
case NCHAR(_,data) =>
case VARCHAR(_,data) =>
case NVARCHAR(_,data) =>
case CLOB(data) =>
case NCLOB(data) =>
case BINARY(_,data) =>
case VARBINARY(_,data) =>
case BLOB(data) =>
case BOOLEAN(value) =>
case BIT(value) =>
case TINYINT(value) =>
case SMALLINT(value) =>
case INTEGER(value) =>
case BIGINT(value) =>
case REAL(value) =>
case DOUBLE(value) =>
case NUMERIC(value,_,_) =>
case DECIMAL(value,_,_) =>
case DATE(date) =>
case TIME(time) =>
case TIMESTAMP(timestamp) =>
*/
}
| lancegatlin/effectful-demo | src/test/scala/effectful/examples/effects/sql/SqlVal.scala | Scala | mit | 4,761 |
package com.goyeau.kubernetes.client.api
import cats.effect.Async
import com.goyeau.kubernetes.client.KubeConfig
import com.goyeau.kubernetes.client.operation._
import io.circe.{Decoder, Encoder}
import io.k8s.apiextensionsapiserver.pkg.apis.apiextensions.v1.{CustomResourceDefinition, CustomResourceDefinitionList}
import org.http4s.Uri
import org.http4s.client.Client
import org.http4s.implicits._
private[client] class CustomResourceDefinitionsApi[F[_]](val httpClient: Client[F], val config: KubeConfig)(implicit
val F: Async[F],
val listDecoder: Decoder[CustomResourceDefinitionList],
val resourceEncoder: Encoder[CustomResourceDefinition],
val resourceDecoder: Decoder[CustomResourceDefinition]
) extends Creatable[F, CustomResourceDefinition]
with Replaceable[F, CustomResourceDefinition]
with Gettable[F, CustomResourceDefinition]
with Listable[F, CustomResourceDefinitionList]
with Deletable[F]
with DeletableTerminated[F]
with GroupDeletable[F]
with Watchable[F, CustomResourceDefinition] { self =>
val resourceUri: Uri = uri"/apis" / "apiextensions.k8s.io" / "v1" / "customresourcedefinitions"
override val watchResourceUri: Uri =
uri"/apis" / "apiextensions.k8s.io" / "v1" / "watch" / "customresourcedefinitions"
}
| joan38/kubernetes-client | kubernetes-client/src/com/goyeau/kubernetes/client/api/CustomResourceDefinitionsApi.scala | Scala | apache-2.0 | 1,281 |
package bindings
import com.google.inject.AbstractModule
import javax.inject.{ Inject, Singleton }
import net.sf.ehcache.CacheManager
import play.api.inject.ApplicationLifecycle
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
/** Arreglar el problema de la instancia de EHCache ya inicializada */
class CacheFix extends AbstractModule {
def configure() = {
bind(classOf[CacheFixInstance]).asEagerSingleton()
}
}
@Singleton
class CacheFixInstance @Inject() (
lifecycle: ApplicationLifecycle
) {
lifecycle.addStopHook { () ⇒
Future(CacheManager.getInstance.shutdown())
}
}
| kdoomsday/kaminalapp | app/bindings/CacheFix.scala | Scala | mit | 638 |
package functions.implementations.core
import dbpedia.dataparsers.StringParser
import dbpedia.dataparsers.util.wikiparser.impl.simple.SimpleWikiParser
/**
* Created by wmaroy on 22.04.17.
*/
class ExtractStringFunction {
val wikiparser = new SimpleWikiParser
def execute(property : String): Seq[String] = {
val propertyNode = wikiparser.parseProperty(property)
Seq(StringParser.parse(propertyNode).get)
}
}
| FnOio/dbpedia-parsing-functions-scala | src/main/scala/functions/implementations/core/ExtractStringFunction.scala | Scala | gpl-2.0 | 432 |
package com.seanshubin.duration.format
import com.seanshubin.duration.format.DurationFormat.NanosecondsFormat
import org.scalatest.FunSuite
class NanosecondsFormatTest extends FunSuite {
test("parse") {
assertParse("0", "0")
assertParse("1 day", "86,400,000,000,000")
assertParse("5 seconds", "5,000,000,000")
assertParse("2 days", "172,800,000,000,000")
assertParse("5 minutes", "300,000,000,000")
assertParse("10 hours", "36,000,000,000,000")
assertParse("1 second", "1,000,000,000")
assertParse("1 millisecond", "1,000,000")
assertParse("500 milliseconds", "500,000,000")
assertParse("55 minutes", "3,300,000,000,000")
assertParse("22", "22")
assertParse("1 day 5 hours 2 minutes 1 second 123 milliseconds", "104,521,123,000,000")
assertParse("2 Days 1 Hour 1 Minute 53 Seconds 1 Millisecond", "176,513,001,000,000")
assertParse("32 days 5 hours", "2,782,800,000,000,000")
assertParse("1 day 2 hours 1 day", "180,000,000,000,000")
assertParse("1 hour 2 days 1 hours", "180,000,000,000,000")
assertParse("25 days", "2,160,000,000,000,000")
assertParse("9223372036854775807", "9,223,372,036,854,775,807")
assertParse("9223372036854775807 nanoseconds", "9,223,372,036,854,775,807")
assertParse("106751 days 23 hours 47 minutes 16 seconds 854 milliseconds 775 microseconds 807 nanoseconds", "9,223,372,036,854,775,807")
}
test("back and forth") {
assertBackAndForth("1428 days 21 hours 33 minutes 9 seconds 123 milliseconds 456 microseconds 789 nanoseconds")
assertBackAndForth("1 day 10 hours 36 seconds 789 milliseconds")
assertBackAndForth("10 hours 17 minutes 36 seconds 789 milliseconds")
assertBackAndForth("1 day 10 hours 17 minutes 36 seconds")
assertBackAndForth("17 minutes")
assertBackAndForth("789 milliseconds")
assertBackAndForth("1 day 5 hours 2 minutes 1 second 123 milliseconds")
assertBackAndForth("2 days 1 hour 1 minute 53 seconds 1 millisecond")
assertBackAndForth("25 days")
assertBackAndForth("0 nanoseconds")
}
test("error message") {
assertErrorMessage("1 foo", """'foo' does not match a valid time unit (nanoseconds, microseconds, milliseconds, seconds, minutes, hours, days)""")
assertErrorMessage("1 SecondsA", """'SecondsA' does not match a valid time unit (nanoseconds, microseconds, milliseconds, seconds, minutes, hours, days)""")
assertErrorMessage("a 1 foo", """'a 1 foo' does not match a valid pattern: \\d+\\s+[a-zA-Z]+(?:\\s+\\d+\\s+[a-zA-Z]+)*""")
assertErrorMessage("1 foo 3", """'1 foo 3' does not match a valid pattern: \\d+\\s+[a-zA-Z]+(?:\\s+\\d+\\s+[a-zA-Z]+)*""")
assertErrorMessage("seconds", """'seconds' does not match a valid pattern: \\d+\\s+[a-zA-Z]+(?:\\s+\\d+\\s+[a-zA-Z]+)*""")
assertErrorMessage("1 foo 2 bar", """'foo' does not match a valid time unit (nanoseconds, microseconds, milliseconds, seconds, minutes, hours, days)""")
}
def assertParse(verbose: String, expected: String) {
val parsed: Long = NanosecondsFormat.parse(verbose)
val actual = f"$parsed%,d"
assert(expected === actual)
}
def assertBackAndForth(verbose: String) {
val parsed: Long = NanosecondsFormat.parse(verbose)
val formatted = NanosecondsFormat.format(parsed)
assert(verbose === formatted)
}
def assertErrorMessage(verbose: String, expected: String) {
try {
NanosecondsFormat.parse(verbose)
fail(s"Expected '$verbose' to throw an exception during parsing")
} catch {
case ex: Exception =>
assert(ex.getMessage === expected)
}
}
}
| SeanShubin/duration-format | format/src/test/scala/com/seanshubin/duration/format/NanosecondsFormatTest.scala | Scala | unlicense | 3,583 |
package fi.allacca
import android.provider.CalendarContract.Events
import android.content.ContentValues
import android.content.Context
import org.joda.time.{Interval, DateTime}
class CalendarEvent(val title: String, val startTime: Long, val endTime: Long, val description: String = "", val location: String = "", val allDay: Boolean = false) {
def isDuring(day: DateTime): Boolean = {
val effectiveEnd = if (endTime < startTime) java.lang.Long.MAX_VALUE else endTime
val intervalOfEvent = new Interval(startTime, effectiveEnd)
val intervalOfDay = new Interval(day.withTimeAtStartOfDay, day.withTimeAtStartOfDay.plusDays(1))
intervalOfDay.overlaps(intervalOfEvent)
}
override def toString = s"$title ($description) $startTime - $endTime"
}
class CalendarEventService(context: Context) {
def createEvent(calendarId: Long, event: CalendarEvent): Long = {
val values = new ContentValues()
values.put("dtstart", Long.box(event.startTime))
values.put("dtend", Long.box(event.endTime))
//values.put("rrule", "FREQ=DAILY;COUNT=20;BYDAY=MO,TU,WE,TH,FR;WKST=MO")
// values.put("rrule", "FREQ=DAILY;COUNT=1;BYDAY=MO,TU,WE,TH,FR;WKST=MO")
values.put("title", event.title)
values.put("eventLocation", event.location)
values.put("calendar_id", Long.box(calendarId))
values.put("eventTimezone", "Europe/Berlin")
values.put("description", event.description)
values.put("selfAttendeeStatus", Int.box(1))
val allDay = if (event.allDay) 1 else 0
values.put("allDay", Int.box(allDay))
values.put("organizer", "some.mail@some.address.com")
values.put("guestsCanInviteOthers", Int.box(1))
values.put("guestsCanModify", Int.box(1))
values.put("availability", Int.box(0))
val uri = context.getContentResolver.insert(Events.CONTENT_URI, values)
val eventId = uri.getLastPathSegment.toLong
eventId
}
}
| timorantalaiho/allacca | src/main/scala/fi/allacca/calendarEvents.scala | Scala | gpl-3.0 | 1,884 |
package org.littlewings.hazelcast.mapreduce
import com.hazelcast.core.ICompletableFuture
import com.hazelcast.mapreduce.{JobTracker, KeyValueSource}
import org.scalatest.{FunSpec, Entry}
import org.scalatest.Matchers._
class SimpleAllKeysMapReduceSpec extends FunSpec
with HazelcastSpecSupport {
describe("simple all mapreduce") {
it("test map") {
withHazelcast(2) { hazelcast =>
val map = hazelcast.getMap[String, String]("simple-map")
(1 to 100).foreach(i => map.put(s"key$i", s"value$i"))
val source = KeyValueSource.fromMap(map)
val jobTracker = hazelcast.getJobTracker("default")
val job = jobTracker.newJob(source)
val future: ICompletableFuture[java.util.Map[String, Int]] =
job
.mapper(new SimpleAllKeysMapper)
.combiner(new SimpleAllKeysCombinerFactory)
.reducer(new SimpleAllKeysReducerFactory)
.submit
val result: java.util.Map[String, Int] = future.get
result.get("key1") should be (1)
result should contain (Entry("key1", 1))
result should contain (Entry("key2", 1))
result should have size 100
}
}
it("test list") {
withHazelcast(2) { hazelcast =>
val list = hazelcast.getList[String]("simple-list")
(1 to 100).foreach(i => list.add(s"entry$i"))
val source = KeyValueSource.fromList(list)
val jobTracker = hazelcast.getJobTracker("default")
val job = jobTracker.newJob(source)
val future: ICompletableFuture[java.util.Map[String, Int]] =
job
.mapper(new SimpleAllKeysMapper)
.combiner(new SimpleAllKeysCombinerFactory)
.reducer(new SimpleAllKeysReducerFactory)
.submit
val result: java.util.Map[String, Int] = future.get
result should contain (Entry("simple-list", 100))
result should have size 1
}
}
}
}
| kazuhira-r/hazelcast-examples | hazelcast-mapreduce-trial/src/test/scala/org/littlewings/hazelcast/mapreduce/SimpleAllKeysMapReduceSpec.scala | Scala | mit | 1,988 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.CATO04
import uk.gov.hmrc.ct.box.{CtBigDecimal, CtBoxIdentifier, Linked}
case class B64(value: BigDecimal) extends CtBoxIdentifier("Marginal Rate Relief") with CtBigDecimal
object B64 extends Linked[CATO04, B64] {
override def apply(source: CATO04): B64 = B64(source.value)
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B64.scala | Scala | apache-2.0 | 941 |
package com.crockeo.clasp
// The declaration of all of the implicits for the project.
object Implicits {
// Implementing Functor & Monad over an Either.
implicit class EitherCollection[A, B](e: Either[A, B]) {
def map[C](f: B => C): Either[A, C] = e match {
case Left(a) => Left(a)
case Right(b) => Right(f(b))
}
def flatMap[C](f: B => Either[A, C]): Either[A, C] = e match {
case Left(a) => Left(a)
case Right(b) => f(b)
}
}
}
| crockeo/clasp | src/main/scala/Implicits.scala | Scala | mit | 477 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.swing
/**
* A component that can contain other components.
*
* @see javax.swing.JPanel
*/
abstract class Panel extends Component with Container.Wrapper {
override lazy val peer: javax.swing.JPanel = new javax.swing.JPanel with SuperMixin
}
| SethTisue/scala-swing | src/main/scala/scala/swing/Panel.scala | Scala | bsd-3-clause | 789 |
package org.awong.searching
trait SymbolTable[Key,Value] {
def put(key: Key, value: Value): Unit
def get(key: Key): Option[Value]
def delete(key: Key): Unit = {
put(key, ???)
}
def contains(key: Key): Boolean = {
get(key).nonEmpty
}
def isEmpty(): Boolean = {
size() == 0
}
def size(): Int
def keys(): Iterable[Key]
def fail(msg: String) = throw new IllegalArgumentException(msg)
}
trait OrderedSymbolTable[Key <: Ordered[Key], Value] extends SymbolTable[Key,Value] {
/**
* smallest key
*/
def min(): Option[Key]
/**
* largest key
*/
def max(): Option[Key]
/**
* largest key less than or equal to given key
*/
def floor(key: Key): Option[Key]
/**
* smallest key greater than or equal to given key
*/
def ceiling(key: Key): Option[Key]
/**
* number of keys less than given key
*/
def rank(key: Key): Int
/**
* the key given the rank
*/
def select(rank: Int): Option[Key]
/**
* delete the smallest key
*/
def deleteMin(): Unit = {
min() match {
case Some(m) => delete(m)
case _ =>
}
}
/**
* delete the greatest key
*/
def deleteMax(): Unit = {
max() match {
case Some(m) => delete(m)
case _ =>
}
}
/**
* Searches for the successor of given 'key'.
*
*/
def successor[K1 >: Key <% Ordered[K1]](key: K1): Key
/**
* Searches for the successor of given 'key'.
*/
def predecessor[K1 >: Key <% Ordered[K1]](key: K1): Key
/**
* number of keys from [lo ... high]
*/
def size(low: Key, high: Key): Int = {
if (high < low) {
0
} else if (contains(high)){
rank(high) - rank(low) + 1
} else {
rank(high) - rank(low)
}
}
/**
* keys in [lo ... high] in sorted order
*/
def keys(low: Key, high: Key): Iterable[Key]
override def keys(): Iterable[Key] = {
(min(), max()) match {
case (None, None) => Seq[Key]()
case (None, Some(m)) => Seq[Key](m)
case (Some(m), None) => Seq[Key](m)
case (Some(aMin), Some(aMax)) => keys(aMin, aMax)
}
}
}
trait MySet[Key] {
def add(key: Key): Unit
def delete(key: Key): Unit
def contains(key: Key): Boolean
def isEmpty(): Boolean
def size(): Int
} | alanktwong/algorithms-scala | searching/src/main/scala/org/awong/searching/SymbolTable.scala | Scala | mit | 2,257 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.common.partitions
import java.time.Instant
import java.time.temporal.ChronoUnit
import java.util.Date
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.filter.expression.AttributeExpression.FunctionLiteral
import org.locationtech.geomesa.filter.visitor.BoundsFilterVisitor
import org.locationtech.geomesa.fs.storage.api.PartitionScheme.SimplifiedFilter
import org.locationtech.geomesa.fs.storage.api.{NamedOptions, PartitionSchemeFactory}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.DateParsing
import org.opengis.filter.{Filter, PropertyIsLessThan}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.AllExpectations
@RunWith(classOf[JUnitRunner])
class PartitionSchemeTest extends Specification with AllExpectations {
import org.locationtech.geomesa.filter.{checkOrder, decomposeAnd}
sequential
val sft = SimpleFeatureTypes.createType("test", "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
val sf = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-02-03T10:15:30Z", "POINT (10 10)")
"PartitionScheme" should {
"partition based on attribute" >> {
val ps = PartitionSchemeFactory.load(sft, NamedOptions("attribute", Map("partitioned-attribute" -> "name")))
ps.getPartitionName(sf) mustEqual "test"
ps.getSimplifiedFilters(ECQL.toFilter("name IN ('foo', 'bar')")) must
beSome(Seq(SimplifiedFilter(Filter.INCLUDE, Seq("foo", "bar"), partial = false)))
ps.getSimplifiedFilters(ECQL.toFilter("name IN ('foo', 'bar')"), Some("foo")) must
beSome(Seq(SimplifiedFilter(Filter.INCLUDE, Seq("foo"), partial = false)))
ps.getSimplifiedFilters(ECQL.toFilter("name < 'foo' and name > 'bar'")) must beNone
ps.getSimplifiedFilters(ECQL.toFilter("bbox(geom,-170,-80,170,80)")) must beNone
}
"partition based on date" >> {
val ps = DateTimeScheme("yyyy-MM-dd", ChronoUnit.DAYS, 1, "dtg", 2)
ps.getPartitionName(sf) mustEqual "2017-02-03"
}
"partition based on date with slash delimiter" >> {
val ps = DateTimeScheme("yyyy/DDD/HH", ChronoUnit.DAYS, 1, "dtg", 2)
ps.getPartitionName(sf) mustEqual "2017/034/10"
}
"partition based on date with slash delimiter" >> {
val ps = DateTimeScheme("yyyy/DDD/HH", ChronoUnit.DAYS, 1, "dtg", 2)
ps.getPartitionName(sf) mustEqual "2017/034/10"
}
"weekly partitions" >> {
val ps = PartitionSchemeFactory.load(sft, NamedOptions("weekly"))
ps must beAnInstanceOf[DateTimeScheme]
ps.getPartitionName(sf) mustEqual "2017/W05"
val tenWeeksOut = ScalaSimpleFeature.create(sft, "1", "test", 10,
Date.from(Instant.parse("2017-01-01T00:00:00Z").plus(9*7 + 1, ChronoUnit.DAYS)), "POINT (10 10)")
ps.getPartitionName(tenWeeksOut) mustEqual "2017/W10"
}
"10 bit datetime z2 partition" >> {
val sf = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (10 10)")
val sf2 = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (-75 38)")
val ps = CompositeScheme(Seq(
DateTimeScheme("yyy/DDD", ChronoUnit.DAYS, 1, "dtg", 2),
Z2Scheme(10, "geom", 3)
))
ps.getPartitionName(sf) mustEqual "2017/003/0770"
ps.getPartitionName(sf2) mustEqual "2017/003/0617"
}
"10 bit datetime xz2 partition" >> {
val sf = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (10 10)")
val sf2 = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (-75 38)")
val ps = CompositeScheme(Seq(
DateTimeScheme("yyy/DDD", ChronoUnit.DAYS, 1, "dtg", 2),
XZ2Scheme(10, "geom", 3)
))
ps.getPartitionName(sf) mustEqual "2017/003/1030"
ps.getPartitionName(sf2) mustEqual "2017/003/0825"
}
"20 bit datetime z2 partition" >> {
val sf = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (10 10)")
val sf2 = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (-75 38)")
val ps = CompositeScheme(Seq(
DateTimeScheme("yyy/DDD", ChronoUnit.DAYS, 1, "dtg", 2),
Z2Scheme(20, "geom", 3)
))
ps.getPartitionName(sf) mustEqual "2017/003/0789456"
ps.getPartitionName(sf2) mustEqual "2017/003/0632516"
}
"20 bit datetime xz2 partition" >> {
val sf = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (10 10)")
val sf2 = ScalaSimpleFeature.create(sft, "1", "test", 10, "2017-01-03T10:15:30Z", "POINT (-75 38)")
val ps = CompositeScheme(Seq(
DateTimeScheme("yyy/DDD", ChronoUnit.DAYS, 1, "dtg", 2),
XZ2Scheme(20, "geom", 3)
))
ps.getPartitionName(sf) mustEqual "2017/003/1052614"
ps.getPartitionName(sf2) mustEqual "2017/003/0843360"
}
"return correct date partitions" >> {
val ps = DateTimeScheme("yyyy/DDD/HH", ChronoUnit.HOURS, 1, "dtg", 2)
val filter = ECQL.toFilter("dtg >= '2016-08-03T00:00:00.000Z' and dtg < '2016-08-03T01:55:00.000Z'")
val covering = ps.getSimplifiedFilters(filter)
covering must beSome
covering.get must haveSize(2)
covering.get.map(_.filter) must containTheSameElementsAs(Seq(Filter.INCLUDE, filter))
foreach(covering.get)(_.partial must beFalse)
covering.get.find(_.filter == Filter.INCLUDE).map(_.partitions.size) must beSome(1)
covering.get.find(_.filter != Filter.INCLUDE).map(_.partitions.size) must beSome(1)
}
"2 bit datetime z2 partition" >> {
val ps = Z2Scheme(2, "geom", 3)
val spatial = ps.getSimplifiedFilters(ECQL.toFilter("bbox(geom,-179,-89,179,89)"))
spatial must beSome
spatial.get must haveSize(1)
spatial.get.head.partial must beFalse
spatial.get.head.partitions must haveSize(4)
val temporal = ps.getSimplifiedFilters(
ECQL.toFilter("dtg >= '2016-08-03T00:00:00.000Z' and dtg < '2016-08-04T00:00:00.000Z'"))
temporal must beNone
}
"2 bit z2 with date" >> {
val filter = ECQL.toFilter("dtg >= '2016-08-03T00:00:00.000Z' and dtg < '2016-08-04T00:00:00.000Z'")
foreach(Seq(
PartitionSchemeFactory.load(sft, NamedOptions("hourly,z2-2bit")),
CompositeScheme(Seq(DateTimeScheme("yyy/DDD/HH", ChronoUnit.HOURS, 1, "dtg", 2), Z2Scheme(2, "geom", 3)))
)) { ps =>
val covering = ps.getSimplifiedFilters(filter)
covering must beSome
covering.get must haveSize(1)
covering.get.head.filter mustEqual Filter.INCLUDE
covering.get.head.partial must beTrue
covering.get.head.partitions must haveSize(24)
}
}
"2 bit with filter" >> {
val ps = Z2Scheme(2, "geom", 3)
val filters = Seq(
("bbox(geom, -180, -90, 180, 90)", 4),
("bbox(geom, -1, -1, 1, 1)", 4),
("bbox(geom, -10, 5, 10, 6)", 2)
)
foreach(filters) { case (filter, count) =>
val covering = ps.getSimplifiedFilters(ECQL.toFilter(filter))
covering must beSome
covering.get must haveSize(1)
covering.get.head.partial must beFalse
covering.get.head.partitions must haveSize(count)
}
}
"calculate covering filters for z2" >> {
foreach(Seq(2, 4, 8)) { bits =>
val ps = Z2Scheme(bits, "geom", 3)
val partitions = (0 until math.pow(2, bits).toInt).map(_.toString)
val filters = partitions.map(ps.getCoveringFilter)
val envelopes = filters.map(BoundsFilterVisitor.visit(_))
// verify none of the envelopes overlap (common borders are ok)
foreach(envelopes.tails.toSeq.dropRight(1)) { tails =>
foreach(tails.tail) { t =>
val i = t.intersection(tails.head)
i.isEmpty || i.getWidth == 0 || i.getHeight == 0 must beTrue
}
}
// verify the envelopes cover the entire world
envelopes.map(_.getArea).sum mustEqual 360d * 180
}
}
"exclude endpoints in covering z2 filters" >> {
val ps = Z2Scheme(4, "geom", 3)
val partitions = (0 until 16).map(_.toString)
val checks = partitions.map { p =>
val filter = ps.getCoveringFilter(p)
val decomposed = decomposeAnd(filter)
val envelope = BoundsFilterVisitor.visit(filter)
val xInclusive = envelope.getMaxX == 180d
val yInclusive = envelope.getMaxY == 90d
(decomposed, xInclusive, yInclusive)
}
checks.count { case (_, xInclusive, yInclusive) => xInclusive && yInclusive } mustEqual 1
checks.count { case (_, xInclusive, _) => xInclusive } mustEqual 4
checks.count { case (_, _, yInclusive) => yInclusive } mustEqual 4
foreach(checks) { case (decomposed, xInclusive, yInclusive) =>
val functions = decomposed.collect { case lt: PropertyIsLessThan =>
checkOrder(lt.getExpression2, lt.getExpression1) match {
case Some(f: FunctionLiteral) => f.function.getName
case _ => null
}
}
if (xInclusive && yInclusive) {
decomposed must haveLength(1)
} else if (xInclusive) {
decomposed must haveLength(2)
functions mustEqual Seq("getY")
} else if (yInclusive) {
decomposed must haveLength(2)
functions mustEqual Seq("getX")
} else {
decomposed must haveLength(3)
functions must containTheSameElementsAs(Seq("getX", "getY"))
}
}
}
"calculate covering filters for composite datetime z2" >> {
val ps = CompositeScheme(Seq(DateTimeScheme("yyyy/MM/dd", ChronoUnit.DAYS, 1, "dtg", 2), Z2Scheme(2, "geom", 3)))
val expected =
ECQL.toFilter("bbox(geom,0,0,180,90) AND dtg >= '2018-01-01T00:00:00.000Z' AND dtg < '2018-01-02T00:00:00.000Z'")
// compare toString to get around crs comparison failures in bbox
decomposeAnd(ps.getCoveringFilter("2018/01/01/3")).map(_.toString) must
containTheSameElementsAs(decomposeAnd(expected).map(_.toString))
}
"calculate covering filters for monthly datetime" >> {
import DateTimeScheme.Formats._
forall(Seq(Minute, Hourly, Daily, Weekly, Monthly, JulianMinute, JulianHourly, JulianDaily)) { format =>
val ps = DateTimeScheme(format.formatter, format.unit, 1, "dtg", 2)
val partition = ps.getPartitionName(sf)
val start = DateParsing.parse(partition, format.formatter)
val end = start.plus(1, format.unit)
val expected = ECQL.toFilter(s"dtg >= '${DateParsing.format(start)}' AND dtg < '${DateParsing.format(end)}'")
decomposeAnd(ps.getCoveringFilter(partition)) must containTheSameElementsAs(decomposeAnd(expected))
}
}
"calculate covering filters for cql" >> {
import DateTimeScheme.Formats._
foreach(Seq(Minute, Hourly, Daily, Weekly, Monthly, JulianMinute, JulianHourly, JulianDaily)) { format =>
val ps = DateTimeScheme(format.formatter, format.unit, 1, "dtg", 2)
val partition = ps.getPartitionName(sf)
val start = DateParsing.parse(partition, format.formatter)
val end = start.plus(1, format.unit)
val filter = ECQL.toFilter(s"dtg >= '${DateParsing.format(start)}' AND dtg < '${DateParsing.format(end)}'")
val partitions = ps.getIntersectingPartitions(filter)
partitions must beSome(Seq(partition))
}
}
"4 bit with filter" >> {
val ps = Z2Scheme(4, "geom", 3)
val filters = Seq(
("bbox(geom, -180, -90, 180, 90)", 16),
("bbox(geom, -1, -1, 1, 1)", 4),
("bbox(geom, -10, 5, 10, 6)", 2),
("bbox(geom, -90, 5, 90, 6)", 3),
("bbox(geom, -90.000000001, 5, 90, 6)", 4),
("bbox(geom, -90.000000001, 5, 180, 6)", 4)
)
foreach(filters) { case (filter, count) =>
val covering = ps.getSimplifiedFilters(ECQL.toFilter(filter))
covering must beSome
covering.get must haveSize(1)
covering.get.head.partial must beFalse
covering.get.head.partitions must haveSize(count)
}
}
"handle edge boundaries" >> {
val dtScheme = DateTimeScheme("yyyy/yyyyMMdd", ChronoUnit.DAYS, 1, "dtg", 2)
val exclusive = ECQL.toFilter("dtg > '2017-01-02' and dtg < '2017-01-04T00:00:00.000Z'")
val twoDays = dtScheme.getSimplifiedFilters(exclusive)
twoDays must beSome
twoDays.get must haveSize(2)
twoDays.get.map(_.filter) must containTheSameElementsAs(Seq(Filter.INCLUDE, exclusive))
foreach(twoDays.get)(_.partial must beFalse)
twoDays.get.find(_.filter == Filter.INCLUDE).map(_.partitions) must beSome(Seq("2017/20170103"))
twoDays.get.find(_.filter != Filter.INCLUDE).map(_.partitions) must beSome(Seq("2017/20170102"))
val inclusive = ECQL.toFilter("dtg >= '2017-01-02' and dtg <= '2017-01-04T00:00:00.001Z'")
val threeDays = dtScheme.getSimplifiedFilters(inclusive)
threeDays must beSome
threeDays.get must haveSize(2)
threeDays.get.map(_.filter) must containTheSameElementsAs(Seq(Filter.INCLUDE, inclusive))
foreach(threeDays.get)(_.partial must beFalse)
threeDays.get.find(_.filter == Filter.INCLUDE).map(_.partitions) must beSome(containTheSameElementsAs(Seq("2017/20170102", "2017/20170103")))
threeDays.get.find(_.filter != Filter.INCLUDE).map(_.partitions) must beSome(Seq("2017/20170104"))
}
}
}
| locationtech/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-common/src/test/scala/org/locationtech/geomesa/fs/storage/common/partitions/PartitionSchemeTest.scala | Scala | apache-2.0 | 14,154 |
package learnscalaforgreatgood
import akka.actor._
import akka.routing.RoundRobinPool
import org.json4s.DefaultFormats
import spray.httpx.Json4sSupport
import spray.routing._
import scala.util.{Failure, Success}
object Resource {
def apply() = new Resource()
}
class Resource() extends HttpServiceActor with ActorLogging with Json4sSupport {
import DomainConverter.formats
import DomainSchema._
implicit val system = context.system
val json4sFormats = DefaultFormats
val service = context.actorOf(Props(Service()).withRouter(RoundRobinPool(nrOfInstances = 5)))
def receive = runRoute(api)
//http://stackoverflow.com/a/22099919
lazy val api = pathPrefix("api") {
pathEndOrSingleSlash {
get {
complete(DomainSchema.all)
} ~
put {
entity(as[Domain]) { domain =>
//service ! (ctx, domain)
complete(DomainSchema.put(domain))
}
}
} ~
pathPrefix("dropcreate") {
pathEndOrSingleSlash {
get {
complete("dropcreate:ok")
}
}
} ~
pathPrefix(IntNumber) { k =>
pathEndOrSingleSlash {
get {
complete(DomainSchema.get(k))
}
}
}
} ~
pathPrefix("static") {
pathEnd {
getFromFile("web/index.html")
}
}
}
| mateuszjancy/scala-spray-akka | skeleton/src/main/scala/learnscalaforgreatgood/Resource.scala | Scala | apache-2.0 | 1,351 |
/*
* Copyright (C) 2009-2011 Mathias Doenitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.parboiled.scala.rules
import org.parboiled.matchers._
/**
* The base class of all rules simply removing a certain number of elements off the top of the value stack.
*/
abstract class PopRule extends Rule
/**
* A rule removing the top value stack element with a given type.
*/
class PopRule1[-Z](val matcher: Matcher) extends PopRule {
def ~[X, Y](other: PopRule2[X, Y]) = new PopRule3[X, Y, Z](append(other))
def ~[Y](other: PopRule1[Y]) = new PopRule2[Y, Z](append(other))
def ~[X, Y, A](other: ReductionRule2[X, Y, A]) = new ReductionRule3[X, Y, Z, A](append(other))
def ~[X, Y, A, B](other: ReductionRule2_2[X, Y, A, B]) = new ReductionRule3_2[X, Y, Z, A, B](append(other))
def ~[X, Y, A, B, C](other: ReductionRule2_3[X, Y, A, B, C]) = new ReductionRule3_3[X, Y, Z, A, B, C](append(other))
def ~[X, Y, A, B, C, D](other: ReductionRule2_4[X, Y, A, B, C, D]) = new ReductionRule3_4[X, Y, Z, A, B, C, D](append(other))
def ~[X, Y, A, B, C, D, E](other: ReductionRule2_5[X, Y, A, B, C, D, E]) = new ReductionRule3_5[X, Y, Z, A, B, C, D, E](append(other))
def ~[X, Y, A, B, C, D, E, F](other: ReductionRule2_6[X, Y, A, B, C, D, E, F]) = new ReductionRule3_6[X, Y, Z, A, B, C, D, E, F](append(other))
def ~[X, Y, A, B, C, D, E, F, G](other: ReductionRule2_7[X, Y, A, B, C, D, E, F, G]) = new ReductionRule3_7[X, Y, Z, A, B, C, D, E, F, G](append(other))
def ~[Y, A](other: ReductionRule1[Y, A]) = new ReductionRule2[Y, Z, A](append(other))
def ~[Y, A, B](other: ReductionRule1_2[Y, A, B]) = new ReductionRule2_2[Y, Z, A, B](append(other))
def ~[Y, A, B, C](other: ReductionRule1_3[Y, A, B, C]) = new ReductionRule2_3[Y, Z, A, B, C](append(other))
def ~[Y, A, B, C, D](other: ReductionRule1_4[Y, A, B, C, D]) = new ReductionRule2_4[Y, Z, A, B, C, D](append(other))
def ~[Y, A, B, C, D, E](other: ReductionRule1_5[Y, A, B, C, D, E]) = new ReductionRule2_5[Y, Z, A, B, C, D, E](append(other))
def ~[Y, A, B, C, D, E, F](other: ReductionRule1_6[Y, A, B, C, D, E, F]) = new ReductionRule2_6[Y, Z, A, B, C, D, E, F](append(other))
def ~[Y, A, B, C, D, E, F, G](other: ReductionRule1_7[Y, A, B, C, D, E, F, G]) = new ReductionRule2_7[Y, Z, A, B, C, D, E, F, G](append(other))
def ~[A](other: Rule1[A]) = new ReductionRule1[Z, A](append(other))
def |[ZZ <: Z](other: PopRule1[ZZ]) = new PopRule1[ZZ](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new PopRule1[Z](matcher).asInstanceOf[this.type]
}
/**
* A rule removing the top two value stack elements with given types.
*/
class PopRule2[-Y, -Z](val matcher: Matcher) extends PopRule {
def ~[X](other: PopRule1[X]) = new PopRule3[X, Y, Z](append(other))
def ~[X, A](other: ReductionRule1[X, A]) = new ReductionRule3[X, Y, Z, A](append(other))
def ~[X, A, B](other: ReductionRule1_2[X, A, B]) = new ReductionRule3_2[X, Y, Z, A, B](append(other))
def ~[X, A, B, C](other: ReductionRule1_3[X, A, B, C]) = new ReductionRule3_3[X, Y, Z, A, B, C](append(other))
def ~[X, A, B, C, D](other: ReductionRule1_4[X, A, B, C, D]) = new ReductionRule3_4[X, Y, Z, A, B, C, D](append(other))
def ~[X, A, B, C, D, E](other: ReductionRule1_5[X, A, B, C, D, E]) = new ReductionRule3_5[X, Y, Z, A, B, C, D, E](append(other))
def ~[X, A, B, C, D, E, F](other: ReductionRule1_6[X, A, B, C, D, E, F]) = new ReductionRule3_6[X, Y, Z, A, B, C, D, E, F](append(other))
def ~[X, A, B, C, D, E, F, G](other: ReductionRule1_7[X, A, B, C, D, E, F, G]) = new ReductionRule3_7[X, Y, Z, A, B, C, D, E, F, G](append(other))
def ~[A](other: Rule1[A]) = new ReductionRule2[Y, Z, A](append(other))
def |[YY <: Y, ZZ <: Z](other: PopRule2[YY, ZZ]) = new PopRule2[YY, ZZ](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new PopRule2[Y, Z](matcher).asInstanceOf[this.type]
}
/**
* A rule removing the top three value stack elements with given types.
*/
class PopRule3[-X, -Y, -Z](val matcher: Matcher) extends PopRule {
def ~[A](other: Rule1[A]) = new ReductionRule3[X, Y, Z, A](append(other))
def |[XX <: X, YY <: Y, ZZ <: Z](other: PopRule3[XX, YY, ZZ]) = new PopRule3[XX, YY, ZZ](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new PopRule3[X, Y, Z](matcher).asInstanceOf[this.type]
}
| sirthias/parboiled | parboiled-scala/src/main/scala/org/parboiled/scala/rules/PopRule.scala | Scala | apache-2.0 | 4,842 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.PairwiseDistance
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class PairwiseDistanceSpec extends TorchSpec {
"A PairwiseDistance with one dimension input" should "generate correct output and grad" in {
torchCheck()
val seed = 100
Random.setSeed(seed)
val module = new PairwiseDistance[Double](1)
val input1 = Tensor[Double](10).apply1(_ => Random.nextDouble())
val input2 = Tensor[Double](10).apply1(_ => Random.nextDouble())
val input = T(1.0 -> input1, 2.0 -> input2)
val gradOutput = Tensor[Double](1).randn()
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val scalaTime = System.nanoTime() - start
val code =
s"""
torch.manualSeed($seed)
module = nn.PairwiseDistance(1)
output = module:forward(input)
gradInput = module:backward(input, gradOutput)
gradInput1 = gradInput[1]
gradInput2 = gradInput[2]
""".stripMargin
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput1", "gradInput2"))
val torchOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val torchgradInput1 = torchResult("gradInput1").asInstanceOf[Tensor[Double]]
val torchgradInput2 = torchResult("gradInput2").asInstanceOf[Tensor[Double]]
val torchgradInput = T(torchgradInput1, torchgradInput2)
torchOutput should be (output)
torchgradInput should be (gradInput)
println("Test case : PairwiseDistance, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 +
" s")
}
"A PairwiseDistance with two dimension input" should "generate correct output and grad" in {
torchCheck()
val seed = 100
Random.setSeed(seed)
val module = new PairwiseDistance[Double](5)
val input1 = Tensor[Double](5, 10).apply1(_ => Random.nextDouble())
val input2 = Tensor[Double](5, 10).apply1(_ => Random.nextDouble())
val input = T(1.0 -> input1, 2.0 -> input2)
val gradOutput = Tensor[Double](5).randn()
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val scalaTime = System.nanoTime() - start
val code =
s"""
torch.manualSeed($seed)
module = nn.PairwiseDistance(5)
output = module:forward(input)
gradInput = module:backward(input, gradOutput)
gradInput1 = gradInput[1]
gradInput2 = gradInput[2]
""".stripMargin
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput1", "gradInput2"))
val torchOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val torchgradInput1 = torchResult("gradInput1").asInstanceOf[Tensor[Double]]
val torchgradInput2 = torchResult("gradInput2").asInstanceOf[Tensor[Double]]
val torchgradInput = T(torchgradInput1, torchgradInput2)
torchOutput should be (output)
torchgradInput should be (gradInput)
println("Test case : PairwiseDistance, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 +
" s")
}
}
| jenniew/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/PairwiseDistanceSpec.scala | Scala | apache-2.0 | 3,984 |
package com.twitter.scalding.db.macros.impl.handler
import scala.language.experimental.macros
import scala.reflect.macros.Context
import scala.reflect.runtime.universe._
import scala.util.Success
import com.twitter.scalding.db.macros.impl.FieldName
object DateTypeHandler {
def apply[T](c: Context)(implicit accessorTree: List[c.universe.MethodSymbol],
fieldName: FieldName,
defaultValue: Option[c.Expr[String]],
annotationInfo: List[(c.universe.Type, Option[Int])],
nullable: Boolean): scala.util.Try[List[ColumnFormat[c.type]]] = {
import c.universe._
val helper = new {
val ctx: c.type = c
val cfieldName = fieldName
val cannotationInfo = annotationInfo
} with AnnotationHelper
val extracted = for {
(nextHelper, dateAnno) <- helper.dateAnnotation
_ <- nextHelper.validateFinished
} yield (dateAnno)
extracted.flatMap {
case WithDate => Success(List(ColumnFormat(c)(accessorTree, "DATE", None)))
case WithoutDate => Success(List(ColumnFormat(c)(accessorTree, "DATETIME", None)))
}
}
}
| tdyas/scalding | scalding-db/src/main/scala/com/twitter/scalding/db/macros/impl/handler/DateTypeHandler.scala | Scala | apache-2.0 | 1,086 |
package lila.security
import scala.concurrent.duration._
import java.net.InetAddress
import org.joda.time.DateTime
import ornicar.scalalib.Random
import play.api.libs.json._
import play.api.mvc.Results.Redirect
import play.api.mvc.{ RequestHeader, Handler, Action, Cookies }
import play.modules.reactivemongo.json.ImplicitBSONHandlers._
import spray.caching.{ LruCache, Cache }
import lila.common.LilaCookie
import lila.common.PimpedJson._
import lila.db.api._
import tube.firewallTube
final class Firewall(
cookieName: Option[String],
enabled: Boolean,
cachedIpsTtl: Duration) {
// def requestHandler(req: RequestHeader): Fu[Option[Handler]] =
// cookieName.filter(_ => enabled) ?? { cn =>
// blocksIp(req.remoteAddress) map { bIp =>
// val bCs = blocksCookies(req.cookies, cn)
// if (bIp && !bCs) infectCookie(cn)(req).some
// else if (bCs && !bIp) { blockIp(req.remoteAddress); None }
// else None
// }
// }
def blocks(req: RequestHeader): Fu[Boolean] = if (enabled) {
cookieName.fold(blocksIp(req.remoteAddress)) { cn =>
blocksIp(req.remoteAddress) map (_ || blocksCookies(req.cookies, cn))
}
}
else fuccess(false)
def accepts(req: RequestHeader): Fu[Boolean] = blocks(req) map (!_)
def blockIp(ip: String): Funit = validIp(ip) ?? {
$update(Json.obj("_id" -> ip), Json.obj("_id" -> ip, "date" -> $date(DateTime.now)), upsert = true) >>- refresh
}
def unblockIps(ips: Iterable[String]): Funit =
$remove($select.byIds(ips filter validIp)) >>- refresh
private def infectCookie(name: String)(implicit req: RequestHeader) = Action {
log("Infect cookie " + formatReq(req))
val cookie = LilaCookie.cookie(name, Random nextStringUppercase 32)
Redirect("/") withCookies cookie
}
def blocksIp(ip: String): Fu[Boolean] = ips contains ip
private def refresh {
ips.clear
}
private def log(msg: Any) {
loginfo("[%s] %s".format("firewall", msg.toString))
}
private def formatReq(req: RequestHeader) =
"%s %s %s".format(req.remoteAddress, req.uri, req.headers.get("User-Agent") | "?")
private def blocksCookies(cookies: Cookies, name: String) =
(cookies get name).isDefined
// http://stackoverflow.com/questions/106179/regular-expression-to-match-hostname-or-ip-address
private val ipRegex = """^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$""".r
private def validIp(ip: String) =
(ipRegex matches ip) && ip != "127.0.0.1" && ip != "0.0.0.0"
private type IP = Vector[Byte]
private lazy val ips = new {
private val cache: Cache[Set[IP]] = LruCache(timeToLive = cachedIpsTtl)
private def strToIp(ip: String) = InetAddress.getByName(ip).getAddress.toVector
def apply: Fu[Set[IP]] = cache(true)(fetch)
def clear { cache.clear }
def contains(ip: String) = apply map (_ contains strToIp(ip))
def fetch: Fu[Set[IP]] =
$primitive($select.all, "_id")(_.asOpt[String]) map { _.map(strToIp).toSet }
}
}
| bjhaid/lila | modules/security/src/main/Firewall.scala | Scala | mit | 3,052 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.enhancement
trait HenshinMystic extends MonkEnhancement
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/enhancement/HenshinMystic.scala | Scala | apache-2.0 | 754 |
package s99
object P11 {
def encodeModified[E](list: List[E]): List[Any] = {
P09.pack(list).map { l =>
val s = l.size
val h = l.head
if (s == 1) h else (s, h)
}
}
}
| qilab-/algorithm-problems | s-99/src/main/scala/s99/P11.scala | Scala | unlicense | 198 |
package fr.iscpif.doors.client
/*
* Copyright (C) 25/10/16 // mathieu.leclaire@openmole.org
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
object Utils {
def uuid = java.util.UUID.randomUUID.toString
}
| ISCPIF/doors | client/src/main/scala/fr/iscpif/doors/client/Utils.scala | Scala | agpl-3.0 | 827 |
package sangria.ast
import sangria.execution.InputDocumentMaterializer
import sangria.marshalling.{FromInput, InputUnmarshaller}
import sangria.parser.{AggregateSourceMapper, DeliveryScheme, SourceMapper}
import sangria.renderer.QueryRenderer
import sangria.validation.DocumentAnalyzer
import sangria.schema.{InputType, Schema}
import sangria.validation.TypeInfo
import sangria.visitor._
import scala.collection.immutable.ListMap
/** A complete GraphQL request operated on by a GraphQL service.
*
* @param definitions
* The definitions, which primarily constitute the document.
* @param sourceMapper
*
* @see
* [[https://spec.graphql.org/June2018/#Document]]
*/
case class Document(
definitions: Vector[Definition],
override val trailingComments: Vector[Comment] = Vector.empty,
override val location: Option[AstLocation] = None,
sourceMapper: Option[SourceMapper] = None)
extends AstNode
with WithTrailingComments {
/** Map of operation name to its definition. */
lazy val operations: Map[Option[String], OperationDefinition] = Map(definitions.collect {
case op: OperationDefinition =>
op.name -> op
}: _*)
/** Map of fragment name to its definition. */
lazy val fragments: Map[String, FragmentDefinition] = Map(definitions.collect {
case fragment: FragmentDefinition =>
fragment.name -> fragment
}: _*)
lazy val source: Option[String] = sourceMapper.map(_.source)
def operationType(operationName: Option[String] = None): Option[OperationType] =
operation(operationName).map(_.operationType)
/** Return the operation for the given name.
*
* @return
* `None`, if no operations are defined or if the given name is ambiguous
*/
def operation(operationName: Option[String] = None): Option[OperationDefinition] =
if (operationName.isEmpty && operations.size != 1)
None
else if (operationName.isEmpty && operations.size == 1)
Some(operations.head._2)
else
operationName
.flatMap(opName => operations.get(Some(opName)))
.orElse(
operations.values.headOption
) //FIXME This appears to return the first operation if the named one doesn't exist?
def withoutSourceMapper: Document = copy(sourceMapper = None)
override def canEqual(other: Any): Boolean = other.isInstanceOf[Document]
/** Merges two documents. The `sourceMapper`s are combined. */
def merge(other: Document): Document = Document.merge(Vector(this, other))
/** An alias for `merge`
*/
def +(other: Document): Document = merge(other)
lazy val analyzer: DocumentAnalyzer = DocumentAnalyzer(this)
lazy val separateOperations: Map[Option[String], Document] = analyzer.separateOperations
def separateOperation(definition: OperationDefinition): Document =
analyzer.separateOperation(definition)
def separateOperation(operationName: Option[String]): Option[Document] =
analyzer.separateOperation(operationName)
override def equals(other: Any): Boolean = other match {
case that: Document =>
that.canEqual(this) &&
definitions == that.definitions &&
location == that.location
case _ => false
}
private[this] lazy val hash =
Seq(definitions, location).map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
override def hashCode(): Int = hash
}
object Document {
/** Provided a collection of ASTs, presumably each from different files, concatenate the ASTs
* together into batched AST, useful for validating many GraphQL source files which together
* represent one conceptual application.
*
* The result `Document` will retain correlation to the original `sourceMapper`s.
*/
def merge(documents: Traversable[Document]): Document = {
val originalSourceMappers = documents.flatMap(_.sourceMapper).toVector
val sourceMapper =
if (originalSourceMappers.nonEmpty) Some(AggregateSourceMapper.merge(originalSourceMappers))
else None
Document(documents.toVector.flatMap(_.definitions), sourceMapper = sourceMapper)
}
/** The most basic, but valid document with a stub `Query` type
*/
val emptyStub: Document =
Document(
Vector(
ObjectTypeDefinition(
"Query",
Vector.empty,
Vector(FieldDefinition("stub", NamedType("String"), Vector.empty)))))
}
case class InputDocument(
values: Vector[Value],
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None,
sourceMapper: Option[SourceMapper] = None)
extends AstNode
with WithTrailingComments {
lazy val source: Option[String] = sourceMapper.map(_.source)
/** Merges two documents. The `sourceMapper` is lost along the way.
*/
def merge(other: InputDocument): InputDocument = InputDocument.merge(Vector(this, other))
/** An alias for `merge`
*/
def +(other: InputDocument): InputDocument = merge(other)
def to[T](
schema: Schema[_, _],
inputType: InputType[T]
)(implicit fromInput: FromInput[T], scheme: DeliveryScheme[Vector[T]]): scheme.Result =
InputDocumentMaterializer.to(schema, this, inputType)
def to[T, Vars](
schema: Schema[_, _],
inputType: InputType[T],
variables: Vars
)(implicit
iu: InputUnmarshaller[Vars],
fromInput: FromInput[T],
scheme: DeliveryScheme[Vector[T]]): scheme.Result =
InputDocumentMaterializer.to(schema, this, inputType, variables)
def to[T](inputType: InputType[T])(implicit
fromInput: FromInput[T],
scheme: DeliveryScheme[Vector[T]]): scheme.Result =
InputDocumentMaterializer.to(this, inputType)
def to[T, Vars](
inputType: InputType[T],
variables: Vars = InputUnmarshaller.emptyMapVars
)(implicit
iu: InputUnmarshaller[Vars],
fromInput: FromInput[T],
scheme: DeliveryScheme[Vector[T]]): scheme.Result =
InputDocumentMaterializer.to(this, inputType, variables)
override def equals(other: Any): Boolean = other match {
case that: InputDocument =>
(that.canEqual(this)) &&
values == that.values &&
location == that.location
case _ => false
}
override def hashCode(): Int =
Seq(values, location).map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
object InputDocument {
def merge(documents: Traversable[InputDocument]): InputDocument =
InputDocument(documents.toVector.flatMap(_.values))
}
sealed trait ConditionalFragment extends AstNode {
def typeConditionOpt: Option[NamedType]
}
sealed trait WithComments extends AstNode {
def comments: Vector[Comment]
}
sealed trait WithDescription extends AstNode {
def description: Option[StringValue]
}
sealed trait WithTrailingComments {
def trailingComments: Vector[Comment]
}
sealed trait SelectionContainer extends AstNode with WithComments with WithTrailingComments {
def selections: Vector[Selection]
def location: Option[AstLocation]
}
/** A definition in a [[Document GraphQL document]].
*
* A GraphQL document consists primarily of definitions, which are either executable or
* representative of a GraphQL type system. The executable definitions are
* [[OperationDefinition operation]] and [[FragmentDefinition fragment definitions]]; those that
* represent a type system fall into [[TypeSystemDefinition definition]] or
* [[TypeSystemExtensionDefinition extension]] categories.
*
* @see
* [[https://spec.graphql.org/June2018/#Definition]]
*/
sealed trait Definition extends AstNode
/** A definition of a GraphQL operation.
*
* Every GraphQL request invokes a specific operation, possibly with values to substitute into the
* operation's variables.
*
* @param name
* The name of the operation. Optional only if there is only one operation in the
* [[Document document]]. Used for selecting the specific operation to invoke in a GraphQL
* request.
* @param variables
* The variables that must be substituted into the operation. Values for these must be provided
* either by their defaults or with the GraphQL request.
*
* @see
* [[https://spec.graphql.org/June2018/#OperationDefinition]]
*/
case class OperationDefinition(
operationType: OperationType = OperationType.Query,
name: Option[String] = None,
variables: Vector[VariableDefinition] = Vector.empty,
override val directives: Vector[Directive] = Vector.empty,
override val selections: Vector[Selection],
override val comments: Vector[Comment] = Vector.empty,
override val trailingComments: Vector[Comment] = Vector.empty,
override val location: Option[AstLocation] = None)
extends Definition
with WithDirectives
with SelectionContainer
case class FragmentDefinition(
name: String,
typeCondition: NamedType,
directives: Vector[Directive],
selections: Vector[Selection],
variables: Vector[VariableDefinition] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Definition
with ConditionalFragment
with WithDirectives
with SelectionContainer {
lazy val typeConditionOpt: Option[NamedType] = Some(typeCondition)
}
/** A definition of a variable to an [[OperationDefinition operation]].
*
* @param name
* Name of the variable being defined.
* @param defaultValue
* Value that the variable should assume in an operation if none was provided with the GraphQL
* request.
*
* @see
* [[https://spec.graphql.org/June2018/#VariableDefinition]]
*/
case class VariableDefinition(
name: String,
tpe: Type,
defaultValue: Option[Value],
override val directives: Vector[Directive] = Vector.empty,
override val comments: Vector[Comment] = Vector.empty,
override val location: Option[AstLocation] = None)
extends AstNode
with WithComments
with WithDirectives
sealed trait Type extends AstNode {
def namedType: NamedType = {
@annotation.tailrec
def loop(tpe: Type): NamedType = tpe match {
case NotNullType(ofType, _) => loop(ofType)
case ListType(ofType, _) => loop(ofType)
case named: NamedType => named
}
loop(this)
}
}
case class NamedType(name: String, location: Option[AstLocation] = None) extends Type
case class NotNullType(ofType: Type, location: Option[AstLocation] = None) extends Type
case class ListType(ofType: Type, location: Option[AstLocation] = None) extends Type
sealed trait WithArguments extends AstNode {
def arguments: Vector[Argument]
}
sealed trait Selection extends AstNode with WithDirectives with WithComments
case class Field(
alias: Option[String],
name: String,
arguments: Vector[Argument],
directives: Vector[Directive],
selections: Vector[Selection],
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Selection
with SelectionContainer
with WithArguments {
lazy val outputName: String = alias.getOrElse(name)
}
case class FragmentSpread(
name: String,
directives: Vector[Directive],
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Selection
case class InlineFragment(
typeCondition: Option[NamedType],
directives: Vector[Directive],
selections: Vector[Selection],
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Selection
with ConditionalFragment
with SelectionContainer {
def typeConditionOpt: Option[NamedType] = typeCondition
}
sealed trait NameValue extends AstNode with WithComments {
def name: String
def value: Value
}
sealed trait WithDirectives extends AstNode {
def directives: Vector[Directive]
}
case class Directive(
name: String,
arguments: Vector[Argument],
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends AstNode
with WithArguments
case class Argument(
name: String,
value: Value,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends NameValue
/** A value that can be substituted into a GraphQL operation [[VariableDefinition variable]].
*
* Called "input values" in the GraphQL spec. Input values can be [[ScalarValue scalars]],
* [[EnumValue enumeration values]], [[ListValue lists]], [[ObjectValue objects]], or
* [[NullValue null values]].
*
* @see
* [[https://spec.graphql.org/June2018/#Value]]
* @group value
*/
sealed trait Value extends AstNode with WithComments {
override def renderPretty: String = QueryRenderer.render(this, QueryRenderer.PrettyInput)
}
/** @group scalar
*/
sealed trait ScalarValue extends Value
/** @group scalar
*/
case class IntValue(
value: Int,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ScalarValue
/** @group scalar
*/
case class BigIntValue(
value: BigInt,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ScalarValue
/** @group scalar
*/
case class FloatValue(
value: Double,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ScalarValue
/** @group scalar
*/
case class BigDecimalValue(
value: BigDecimal,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ScalarValue
/** @group scalar
*/
case class StringValue(
value: String,
block: Boolean = false,
blockRawValue: Option[String] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ScalarValue
/** @group scalar
*/
case class BooleanValue(
value: Boolean,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ScalarValue
/** @group value
*/
case class EnumValue(
value: String,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Value
/** @group value
*/
case class ListValue(
values: Vector[Value],
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Value
/** @group value
*/
case class VariableValue(
name: String,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Value
/** @group value
*/
case class NullValue(comments: Vector[Comment] = Vector.empty, location: Option[AstLocation] = None)
extends Value
/** @group value
*/
case class ObjectValue(
fields: Vector[ObjectField],
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends Value {
lazy val fieldsByName: Map[String, Value] =
fields.foldLeft(ListMap.empty[String, Value]) { case (acc, field) =>
acc + (field.name -> field.value)
}
}
/** @group value
*/
object ObjectValue {
def apply(fields: (String, Value)*): ObjectValue = ObjectValue(
fields.toVector.map(f => ObjectField(f._1, f._2)))
}
case class ObjectField(
name: String,
value: Value,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends NameValue
case class Comment(text: String, location: Option[AstLocation] = None) extends AstNode
// Schema Definition
case class ScalarTypeDefinition(
name: String,
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeDefinition
with WithDescription {
def rename(newName: String): ScalarTypeDefinition = copy(name = newName)
}
case class FieldDefinition(
name: String,
fieldType: Type,
arguments: Vector[InputValueDefinition],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends SchemaAstNode
with WithDirectives
with WithDescription
case class InputValueDefinition(
name: String,
valueType: Type,
defaultValue: Option[Value],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends SchemaAstNode
with WithDirectives
with WithDescription
case class ObjectTypeDefinition(
name: String,
interfaces: Vector[NamedType],
fields: Vector[FieldDefinition],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeDefinition
with WithTrailingComments
with WithDescription {
def rename(newName: String): ObjectTypeDefinition = copy(name = newName)
}
case class InterfaceTypeDefinition(
name: String,
fields: Vector[FieldDefinition],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeDefinition
with WithTrailingComments
with WithDescription {
def rename(newName: String): InterfaceTypeDefinition = copy(name = newName)
}
case class UnionTypeDefinition(
name: String,
types: Vector[NamedType],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeDefinition
with WithDescription {
def rename(newName: String): UnionTypeDefinition = copy(name = newName)
}
case class EnumTypeDefinition(
name: String,
values: Vector[EnumValueDefinition],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeDefinition
with WithTrailingComments
with WithDescription {
def rename(newName: String): EnumTypeDefinition = copy(name = newName)
}
case class EnumValueDefinition(
name: String,
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends SchemaAstNode
with WithDirectives
with WithDescription
case class InputObjectTypeDefinition(
name: String,
fields: Vector[InputValueDefinition],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeDefinition
with WithTrailingComments
with WithDescription {
def rename(newName: String): InputObjectTypeDefinition = copy(name = newName)
}
case class ObjectTypeExtensionDefinition(
name: String,
interfaces: Vector[NamedType],
fields: Vector[FieldDefinition],
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ObjectLikeTypeExtensionDefinition
with WithTrailingComments {
def rename(newName: String): ObjectTypeExtensionDefinition = copy(name = newName)
}
case class InterfaceTypeExtensionDefinition(
name: String,
fields: Vector[FieldDefinition],
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends ObjectLikeTypeExtensionDefinition
with WithTrailingComments {
def rename(newName: String): InterfaceTypeExtensionDefinition = copy(name = newName)
}
case class InputObjectTypeExtensionDefinition(
name: String,
fields: Vector[InputValueDefinition],
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeExtensionDefinition
with WithTrailingComments {
def rename(newName: String): InputObjectTypeExtensionDefinition = copy(name = newName)
}
case class EnumTypeExtensionDefinition(
name: String,
values: Vector[EnumValueDefinition],
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeExtensionDefinition
with WithTrailingComments {
def rename(newName: String): EnumTypeExtensionDefinition = copy(name = newName)
}
case class UnionTypeExtensionDefinition(
name: String,
types: Vector[NamedType],
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeExtensionDefinition {
def rename(newName: String): UnionTypeExtensionDefinition = copy(name = newName)
}
case class ScalarTypeExtensionDefinition(
name: String,
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeExtensionDefinition {
def rename(newName: String): ScalarTypeExtensionDefinition = copy(name = newName)
}
case class SchemaExtensionDefinition(
operationTypes: Vector[OperationTypeDefinition],
directives: Vector[Directive] = Vector.empty,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeSystemExtensionDefinition
with WithDirectives
with WithTrailingComments
case class DirectiveDefinition(
name: String,
arguments: Vector[InputValueDefinition],
locations: Vector[DirectiveLocation],
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeSystemDefinition
with WithDescription
case class DirectiveLocation(
name: String,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends SchemaAstNode
case class SchemaDefinition(
operationTypes: Vector[OperationTypeDefinition],
directives: Vector[Directive] = Vector.empty,
description: Option[StringValue] = None,
comments: Vector[Comment] = Vector.empty,
trailingComments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends TypeSystemDefinition
with WithDescription
with WithTrailingComments
with WithDirectives
case class OperationTypeDefinition(
operation: OperationType,
tpe: NamedType,
comments: Vector[Comment] = Vector.empty,
location: Option[AstLocation] = None)
extends SchemaAstNode
/** A node in the AST of a parsed GraphQL request document. */
sealed trait AstNode {
/** Location at which this node lexically begins in the GraphQL request source code. */
def location: Option[AstLocation]
def cacheKeyHash: Int = System.identityHashCode(this)
def renderPretty: String = QueryRenderer.render(this, QueryRenderer.Pretty)
def renderCompact: String = QueryRenderer.render(this, QueryRenderer.Compact)
def visit(visitor: AstVisitor): this.type =
AstVisitor.visit(this, visitor)
def visit(onEnter: AstNode => VisitorCommand, onLeave: AstNode => VisitorCommand): this.type =
AstVisitor.visit(this, onEnter, onLeave)
def visitAstWithTypeInfo(schema: Schema[_, _])(visitorFn: TypeInfo => AstVisitor): this.type =
AstVisitor.visitAstWithTypeInfo[this.type](schema, this)(visitorFn)
def visitAstWithState[S](schema: Schema[_, _], state: S)(
visitorFn: (TypeInfo, S) => AstVisitor): S =
AstVisitor.visitAstWithState(schema, this, state)(visitorFn)
}
sealed trait SchemaAstNode extends AstNode with WithComments
sealed trait TypeSystemDefinition extends SchemaAstNode with Definition
sealed trait TypeSystemExtensionDefinition extends SchemaAstNode with Definition
sealed trait TypeDefinition extends TypeSystemDefinition with WithDirectives with WithDescription {
def name: String
def rename(newName: String): TypeDefinition
}
sealed trait TypeExtensionDefinition extends TypeSystemExtensionDefinition with WithDirectives {
def name: String
def rename(newName: String): TypeExtensionDefinition
}
sealed trait ObjectLikeTypeExtensionDefinition extends TypeExtensionDefinition {
def fields: Vector[FieldDefinition]
}
object AstNode {
def withoutAstLocations[T <: AstNode](node: T, stripComments: Boolean = false): T = {
val enterComment = (_: Comment) =>
if (stripComments) VisitorCommand.Delete else VisitorCommand.Continue
visit[AstNode](
node,
Visit[Comment](enterComment),
VisitAnyField[AstNode, Option[AstLocation]]((_, _) => VisitorCommand.Transform(None)))
.asInstanceOf[T]
}
}
| OlegIlyenko/sangria | modules/core/src/main/scala/sangria/ast/QueryAst.scala | Scala | apache-2.0 | 25,408 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.streams
import akka.actor.ActorSystem
import akka.stream.OverflowStrategy
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl._
import akka.testkit.TestKit
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.squbs.metrics.MetricsExtension
import java.lang.management.ManagementFactory
import javax.management.ObjectName
import scala.concurrent.duration._
class DemandSupplyMetricsSpec extends TestKit(ActorSystem("DemandSupplyMetricsSpec")) with AsyncFlatSpecLike
with Matchers {
it should "throttle demand" in {
val name = "test1"
val testFlow = DemandSupplyMetrics[Int](name)(system)
val result = Source(1 to 10).via(testFlow).throttle(1, 1.second, 1, Shaping).runWith(Sink.ignore)
result map { _ =>
assert(jmxValue[Long](s"$name-downstream-counter", "Count").get == 10)
}
}
it should "require demand >= supply" in {
val name = "test2"
val testFlow = DemandSupplyMetrics[Int](name)(system)
val result = Source(1 to 100000).via(testFlow).throttle(10000, 1.second, 10000, Shaping).runWith(Sink.ignore)
result map { _ =>
assert(jmxValue[Long](s"$name-upstream-counter", "Count").get == 100000)
val downstreamCount = jmxValue[Long](s"$name-downstream-counter", "Count").get
assert(downstreamCount == 100000 || downstreamCount == 100001)
}
}
it should "report metrics correctly with buffering and throttling" in {
val name = "test3"
val preFlow = DemandSupplyMetrics[Int](s"$name-pre")(system)
val postFlow = DemandSupplyMetrics[Int](s"$name-post")(system)
val result = Source(1 to 100)
.via(preFlow)
.throttle(10, 1.second, 10, Shaping)
.buffer(2, OverflowStrategy.backpressure)
.throttle(20, 1.second, 20, Shaping)
.via(postFlow)
.runWith(Sink.ignore)
result map { _ =>
assert(jmxValue[Long](s"$name-pre-upstream-counter", "Count").get == 100)
assert(jmxValue[Long](s"$name-pre-downstream-counter", "Count").get == 100)
assert(jmxValue[Long](s"$name-post-upstream-counter", "Count").get == 100)
assert(jmxValue[Long](s"$name-post-downstream-counter", "Count").get == 101)
}
}
it should "report metrics correctly with buffering and upstream throttle" in {
val name = "test4"
val preFlow = DemandSupplyMetrics[Int](s"$name-pre")(system)
val postFlow = DemandSupplyMetrics[Int](s"$name-post")(system)
val result = Source(1 to 100)
.via(preFlow)
.buffer(2, OverflowStrategy.backpressure)
.throttle(20, 1.second, 20, Shaping)
.via(postFlow)
.runWith(Sink.ignore)
result map { _ =>
assert(jmxValue[Long](s"$name-pre-upstream-counter", "Count").get == 100)
assert(jmxValue[Long](s"$name-pre-downstream-counter", "Count").get == 100)
assert(jmxValue[Long](s"$name-post-upstream-counter", "Count").get == 100)
assert(jmxValue[Long](s"$name-post-downstream-counter", "Count").get == 101)
}
}
it should "report metrics correctly with multiple materializations" in {
val name = "test5"
val dsMetric = DemandSupplyMetrics[Int](s"$name")(system)
val flow = Source(1 to 10).via(dsMetric)
val f1 = flow.runWith(Sink.ignore)
val f2 = flow.runWith(Sink.ignore)
val lf = for(f1Result <- f1 ; f2Result <- f2) yield(f1Result, f2Result)
lf map { _ =>
assert(jmxValue[Long](s"$name-upstream-counter", "Count").get >= 20)
assert(jmxValue[Long](s"$name-downstream-counter", "Count").get >= 20)
}
}
def jmxValue[T](beanName: String, key: String): Option[T] = {
val oName =
ObjectName.getInstance(s"${MetricsExtension(system).Domain}:name=${MetricsExtension(system).Domain}.$beanName")
Option(ManagementFactory.getPlatformMBeanServer.getAttribute(oName, key)).map(_.asInstanceOf[T])
}
}
| akara/squbs | squbs-ext/src/test/scala/org/squbs/streams/DemandSupplyMetricsSpec.scala | Scala | apache-2.0 | 4,464 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.