code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.rocketfuel.sdbc.base.jdbc
trait DefaultUpdaters
extends AnyRefUpdater
with LongUpdater
with IntUpdater
with ShortUpdater
with ByteUpdater
with BytesUpdater
with DoubleUpdater
with FloatUpdater
with JavaBigDecimalUpdater
with ScalaBigDecimalUpdater
with TimestampUpdater
with DateUpdater
with TimeUpdater
with BooleanUpdater
with StringUpdater
with UUIDUpdater
with InputStreamUpdater
with UpdateReader
trait Java8DefaultUpdaters
extends DefaultUpdaters
with LocalDateTimeUpdater
with InstantUpdater
with LocalDateUpdater
with LocalTimeUpdater
| wdacom/sdbc | jdbc/src/main/scala/com/rocketfuel/sdbc/base/jdbc/DefaultUpdaters.scala | Scala | bsd-3-clause | 603 |
import sbt._
import sbt.Keys._
import com.typesafe.sbt.SbtScalariform._
object Build extends Build {
lazy val root = Project(
"sbt2nix",
file("plugin"),
settings = commonSettings ++ Seq(
name := "sbt2nix",
libraryDependencies ++= Seq(
"commons-codec" % "commons-codec" % "1.6",
"org.scalaz" %% "scalaz-core" % "7.0.2",
"org.scalaz" %% "scalaz-effect" % "7.0.2")
)
)
def commonSettings =
Defaults.defaultSettings ++
scalariformSettings ++
Seq(
organization := "sbt2nix",
scalacOptions ++= Seq("-unchecked", "-deprecation"),
sbtPlugin := true,
publishMavenStyle := false,
sbtVersion in GlobalScope <<= (sbtVersion in GlobalScope) { sbtVersion =>
System.getProperty("sbt.build.version", sbtVersion)
},
scalaVersion <<= (sbtVersion in GlobalScope) {
case sbt013 if sbt013.startsWith("0.13.") => "2.10.4"
case sbt012 if sbt012.startsWith("0.12.") => "2.9.3"
case _ => "2.9.3"
},
sbtDependency in GlobalScope <<= (sbtDependency in GlobalScope, sbtVersion in GlobalScope) { (dep, sbtVersion) =>
dep.copy(revision = sbtVersion)
},
publishArtifact in (Compile, packageDoc) := false,
publishArtifact in (Compile, packageSrc) := false,
resolvers := Seq(Resolver.sonatypeRepo("releases"))
)
}
| charleso/sbt2nix | project/Build.scala | Scala | mit | 1,378 |
package scalding
import sbt._
import Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings
import com.typesafe.tools.mima.plugin.MimaKeys._
import scala.collection.JavaConverters._
object ScaldingBuild extends Build {
val printDependencyClasspath = taskKey[Unit]("Prints location of the dependencies")
val sharedSettings = Project.defaultSettings ++ assemblySettings ++ Seq(
organization := "com.twitter",
//TODO: Change to 2.10.* when Twitter moves to Scala 2.10 internally
scalaVersion := "2.9.3",
crossScalaVersions := Seq("2.9.3", "2.10.3"),
javacOptions ++= Seq("-source", "1.6", "-target", "1.6"),
javacOptions in doc := Seq("-source", "1.6"),
libraryDependencies ++= Seq(
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test",
"org.mockito" % "mockito-all" % "1.8.5" % "test"
),
resolvers ++= Seq(
"snapshots" at "http://oss.sonatype.org/content/repositories/snapshots",
"releases" at "http://oss.sonatype.org/content/repositories/releases",
"Concurrent Maven Repo" at "http://conjars.org/repo",
"Clojars Repository" at "http://clojars.org/repo",
"Twitter Maven" at "http://maven.twttr.com"
),
printDependencyClasspath := {
val cp = (dependencyClasspath in Compile).value
cp.foreach(f => println(s"${f.metadata.get(moduleID.key)} => ${f.data}"))
},
parallelExecution in Test := false,
scalacOptions ++= Seq("-unchecked", "-deprecation"),
// Uncomment if you don't want to run all the tests before building assembly
// test in assembly := {},
// Publishing options:
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := {
x => false
},
publishTo <<= version { v =>
Some(
if (v.trim.endsWith("SNAPSHOT"))
Opts.resolver.sonatypeSnapshots
else
Opts.resolver.sonatypeStaging
//"twttr" at "http://artifactory.local.twitter.com/libs-releases-local"
)
},
// Janino includes a broken signature, and is not needed:
excludedJars in assembly <<= (fullClasspath in assembly) map {
cp =>
val excludes = Set("jsp-api-2.1-6.1.14.jar", "jsp-2.1-6.1.14.jar",
"jasper-compiler-5.5.12.jar", "janino-2.5.16.jar")
cp filter {
jar => excludes(jar.data.getName)
}
},
// Some of these files have duplicates, let's ignore:
mergeStrategy in assembly <<= (mergeStrategy in assembly) {
(old) => {
case s if s.endsWith(".class") => MergeStrategy.last
case s if s.endsWith("project.clj") => MergeStrategy.concat
case s if s.endsWith(".html") => MergeStrategy.last
case s if s.endsWith(".dtd") => MergeStrategy.last
case s if s.endsWith(".xsd") => MergeStrategy.last
case s if s.endsWith(".jnilib") => MergeStrategy.rename
case s if s.endsWith("jansi.dll") => MergeStrategy.rename
case x => old(x)
}
},
pomExtra := (
<url>https://github.com/twitter/scalding</url>
<licenses>
<license>
<name>Apache 2</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
<comments>A business-friendly OSS license</comments>
</license>
</licenses>
<scm>
<url>git@github.com:twitter/scalding.git</url>
<connection>scm:git:git@github.com:twitter/scalding.git</connection>
</scm>
<developers>
<developer>
<id>posco</id>
<name>Oscar Boykin</name>
<url>http://twitter.com/posco</url>
</developer>
<developer>
<id>avibryant</id>
<name>Avi Bryant</name>
<url>http://twitter.com/avibryant</url>
</developer>
<developer>
<id>argyris</id>
<name>Argyris Zymnis</name>
<url>http://twitter.com/argyris</url>
</developer>
</developers>)
) ++ mimaDefaultSettings
lazy val scalding = Project(
id = "scalding",
base = file("."),
settings = sharedSettings ++ DocGen.publishSettings
).settings(
test := {},
publish := {}, // skip publishing for this root project.
publishLocal := {}
).aggregate(
scaldingArgs,
scaldingDate,
scaldingCore,
scaldingCommons,
scaldingAvro,
scaldingParquet,
scaldingRepl,
scaldingJson,
scaldingJdbc,
maple
)
/**
* This returns the youngest jar we released that is compatible with
* the current.
*/
val unreleasedModules = Set[String]()
def youngestForwardCompatible(subProj: String) =
Some(subProj)
.filterNot(unreleasedModules.contains(_))
.map {
s => "com.twitter" % ("scalding-" + s + "_2.9.2") % "0.8.5"
}
def module(name: String) = {
val id = "scalding-%s".format(name)
Project(id = id, base = file(id), settings = sharedSettings ++ Seq(
Keys.name := id,
previousArtifact := youngestForwardCompatible(name))
)
}
lazy val scaldingArgs = module("args")
lazy val scaldingDate = module("date")
lazy val cascadingVersion =
System.getenv.asScala.getOrElse("SCALDING_CASCADING_VERSION", "2.5.4")
lazy val cascadingJDBCVersion =
System.getenv.asScala.getOrElse("SCALDING_CASCADING_JDBC_VERSION", "2.5.2")
val hadoopVersion = "1.1.2"
val algebirdVersion = "0.5.0"
val bijectionVersion = "0.6.2"
val chillVersion = "0.3.6"
val slf4jVersion = "1.6.6"
lazy val scaldingCore = module("core").settings(
libraryDependencies ++= Seq(
"cascading" % "cascading-core" % cascadingVersion,
"cascading" % "cascading-local" % cascadingVersion,
"cascading" % "cascading-hadoop" % cascadingVersion,
"com.twitter" %% "chill" % chillVersion,
"com.twitter" % "chill-hadoop" % chillVersion,
"com.twitter" % "chill-java" % chillVersion,
"com.twitter" %% "bijection-core" % bijectionVersion,
"com.twitter" %% "algebird-core" % algebirdVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "provided"
)
).dependsOn(scaldingArgs, scaldingDate, maple)
lazy val scaldingCommons = Project(
id = "scalding-commons",
base = file("scalding-commons"),
settings = sharedSettings
).settings(
name := "scalding-commons",
previousArtifact := Some("com.twitter" % "scalding-commons_2.9.2" % "0.2.0"),
libraryDependencies ++= Seq(
"com.backtype" % "dfs-datastores-cascading" % "1.3.4",
"com.backtype" % "dfs-datastores" % "1.3.4",
// TODO: split into scalding-protobuf
"com.google.protobuf" % "protobuf-java" % "2.4.1",
"com.twitter" %% "bijection-core" % bijectionVersion,
"com.twitter" %% "algebird-core" % algebirdVersion,
"com.twitter" %% "chill" % chillVersion,
"com.twitter.elephantbird" % "elephant-bird-cascading2" % "4.4",
"com.hadoop.gplcompression" % "hadoop-lzo" % "0.4.16",
// TODO: split this out into scalding-thrift
"org.apache.thrift" % "libthrift" % "0.5.0",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "provided",
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test"
)
).dependsOn(scaldingArgs, scaldingDate, scaldingCore)
lazy val scaldingAvro = Project(
id = "scalding-avro",
base = file("scalding-avro"),
settings = sharedSettings
).settings(
name := "scalding-avro",
previousArtifact := Some("com.twitter" % "scalding-avro_2.9.2" % "0.1.0"),
libraryDependencies ++= Seq(
"cascading.avro" % "avro-scheme" % "2.1.2",
"org.apache.avro" % "avro" % "1.7.4",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "test",
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test"
)
).dependsOn(scaldingCore)
lazy val scaldingParquet = Project(
id = "scalding-parquet",
base = file("scalding-parquet"),
settings = sharedSettings
).settings(
name := "scalding-parquet",
//previousArtifact := Some("com.twitter" % "scalding-parquet_2.9.2" % "0.1.0"),
previousArtifact := None,
libraryDependencies ++= Seq(
"com.twitter" % "parquet-cascading" % "1.4.0",
"org.slf4j" % "slf4j-api" % slf4jVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.slf4j" % "slf4j-log4j12" % slf4jVersion % "test",
"org.scalacheck" %% "scalacheck" % "1.10.0" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.9" % "test"
)
).dependsOn(scaldingCore)
lazy val scaldingRepl = Project(
id = "scalding-repl",
base = file("scalding-repl"),
settings = sharedSettings
).settings(
name := "scalding-repl",
previousArtifact := None,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.scala-lang" % "jline" % scalaVersion,
"org.scala-lang" % "scala-compiler" % scalaVersion,
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided"
)
}
).dependsOn(scaldingCore)
lazy val scaldingJson = Project(
id = "scalding-json",
base = file("scalding-json"),
settings = sharedSettings
).settings(
name := "scalding-json",
previousArtifact := None,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.2.3"
)
}
).dependsOn(scaldingCore)
lazy val scaldingJdbc = Project(
id = "scalding-jdbc",
base = file("scalding-jdbc"),
settings = sharedSettings
).settings(
name := "scalding-jdbc",
previousArtifact := None,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"cascading" % "cascading-jdbc-core" % cascadingJDBCVersion
)
}
).dependsOn(scaldingCore)
lazy val maple = Project(
id = "maple",
base = file("maple"),
settings = sharedSettings
).settings(
name := "maple",
previousArtifact := None,
crossPaths := false,
autoScalaLibrary := false,
libraryDependencies <++= (scalaVersion) { scalaVersion => Seq(
"org.apache.hadoop" % "hadoop-core" % hadoopVersion % "provided",
"org.apache.hbase" % "hbase" % "0.94.5" % "provided",
"cascading" % "cascading-hadoop" % cascadingVersion
)
}
)
}
| afsalthaj/scalding | project/Build.scala | Scala | apache-2.0 | 10,977 |
package mesosphere.marathon
package api
import com.fasterxml.jackson.core.JsonParseException
import com.fasterxml.jackson.databind.JsonMappingException
import mesosphere.UnitTest
import mesosphere.marathon.api.v2.Validation._
import mesosphere.marathon.api.v2.ValidationHelper
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.raml.App
import mesosphere.marathon.state.{AbsolutePathId, AppDefinition}
import play.api.libs.json.{JsObject, JsResultException, Json}
class MarathonExceptionMapperTest extends UnitTest {
implicit lazy val validAppDefinition =
AppDefinition.validAppDefinition(Set.empty[String], ValidationHelper.roleSettings())(PluginManager.None)
"MarathonExceptionMapper" should {
"Render js result exception correctly" in {
Given("A JsResultException, from an invalid json to object Reads")
val ex = intercept[JsResultException] { Json.parse("""{"id":123}""").as[App] }
val mapper = new MarathonExceptionMapper()
When("The mapper creates a response from this exception")
val response = mapper.toResponse(ex)
Then("The correct response is created")
response.getStatus should be(422)
val entityString = response.getEntity.asInstanceOf[String]
val entity = Json.parse(entityString)
(entity \\ "message").as[String] should be("Invalid JSON")
val details = (entity \\ "details").as[Seq[JsObject]]
details should have size 1
val firstDetail = details.head
(firstDetail \\ "path").as[String] should be("/id")
val errors = (firstDetail \\ "errors").as[Seq[String]]
errors should have size 1
errors.head should be("error.expected.jsstring")
}
"Render json parse exception correctly" in {
Given("A JsonParseException, from an invalid json to object Reads")
val ex = intercept[JsonParseException] { Json.parse("""{"id":"/test"""").as[App] }
val mapper = new MarathonExceptionMapper()
When("The mapper creates a response from this exception")
val response = mapper.toResponse(ex)
Then("The correct response is created")
response.getStatus should be(400)
val entityString = response.getEntity.asInstanceOf[String]
val entity = Json.parse(entityString)
(entity \\ "message").as[String] should be("Invalid JSON")
(entity \\ "details").as[String] should be(
"""Unexpected end-of-input: expected close marker for Object (start marker at [Source: (String)"{"id":"/test""; line: 1, column: 1])"""
)
}
"Render json mapping exception correctly" in {
Given("A JsonMappingException, from an invalid json to object Reads")
val ex = intercept[JsonMappingException] { Json.parse("").as[App] }
val mapper = new MarathonExceptionMapper()
When("The mapper creates a response from this exception")
val response = mapper.toResponse(ex)
Then("The correct response is created")
response.getStatus should be(400)
val entityString = response.getEntity.asInstanceOf[String]
val entity = Json.parse(entityString)
(entity \\ "message").as[String] should be("Please specify data in JSON format")
(entity \\ "details").as[String] should be("No content to map due to end-of-input\\n at [Source: (String)\\"\\"; line: 1, column: 0]")
}
"Render ConstraintValidationException correctly" in {
Given("A ConstraintValidationException from an invalid app")
val ex = intercept[ValidationFailedException] { validateOrThrow(AppDefinition(id = AbsolutePathId("/test"), role = "*")) }
val mapper = new MarathonExceptionMapper()
When("The mapper creates a response from this exception")
val response = mapper.toResponse(ex)
Then("The correct response is created")
response.getStatus should be(422)
val entityString = response.getEntity.asInstanceOf[String]
val entity = Json.parse(entityString)
(entity \\ "message").as[String] should be("Object is not valid")
val errors = (entity \\ "details").as[Seq[JsObject]]
errors should have size 1
val firstError = errors.head
(firstError \\ "path").as[String] should be("/")
val errorMsgs = (firstError \\ "errors").as[Seq[String]]
errorMsgs.head should be("AppDefinition must either contain one of 'cmd' or 'args', and/or a 'container'.")
}
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/api/MarathonExceptionMapperTest.scala | Scala | apache-2.0 | 4,365 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, NormalizingEquality, Every}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
class EveryShouldContainSpec extends Spec {
object `a List` {
val xs: Every[String] = Every("hi", "hi", "hi")
val caseLists: Every[String] = Every("tell", "them", "Hi")
object `when used with contain (value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should contain ("hi")
val e1 = intercept[TestFailedException] {
xs should contain ("ho")
}
e1.message.get should be (Resources("didNotContainExpectedElement", decorateToStringValue(xs), "\"ho\""))
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should contain ("hi")
intercept[TestFailedException] {
xs should contain ("ho")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should contain ("ho")
intercept[TestFailedException] {
xs should contain ("hi")
}
}
def `should use an explicitly provided Equality` {
intercept[TestFailedException] {
caseLists should contain ("HI")
}
(caseLists should contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
(caseLists should contain ("HI")) (after being lowerCased)
(caseLists should contain ("HI ")) (after being lowerCased and trimmed)
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
(xs should contain ("hi")) (decided by defaultEquality[String])
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
intercept[TestFailedException] {
caseLists should contain ("HI")
}
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
caseLists should contain ("HI")
normalizedInvokedCount should be (4)
}
}
object `when used with not contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should not contain "ho"
val e3 = intercept[TestFailedException] {
xs should not contain "hi"
}
e3.message.get should be (Resources("containedExpectedElement", decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should not contain "ho"
intercept[TestFailedException] {
xs should not contain "hi"
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should not contain "hi"
intercept[TestFailedException] {
xs should not contain "ho"
}
}
def `should use an explicitly provided Equality` {
caseLists should not contain "HI"
caseLists should not contain "HI "
(caseLists should not contain "HI ") (decided by defaultEquality afterBeing lowerCased)
(caseLists should not contain "HI ") (after being lowerCased)
intercept[TestFailedException] {
(caseLists should not contain "HI") (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists should not contain "HI ") (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists should not contain "HI"
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists should not contain "HI"
}
normalizedInvokedCount should be (4)
}
}
object `when used with not (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should not (contain ("ho"))
val e3 = intercept[TestFailedException] {
xs should not (contain ("hi"))
}
e3.message.get should be (Resources("containedExpectedElement", decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should not (contain ("ho"))
intercept[TestFailedException] {
xs should not (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should not (contain ("hi"))
intercept[TestFailedException] {
xs should not (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
caseLists should not (contain ("HI"))
caseLists should not (contain ("HI "))
(caseLists should not (contain ("HI "))) (decided by defaultEquality afterBeing lowerCased)
(caseLists should not (contain ("HI "))) (after being lowerCased)
intercept[TestFailedException] {
(caseLists should not (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists should not (contain ("HI"))) (after being lowerCased)
}
intercept[TestFailedException] {
(caseLists should not (contain ("HI "))) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists should not (contain ("HI"))
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists should not (contain ("HI"))
}
normalizedInvokedCount should be (4)
}
}
object `when used with (not contain value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should (not contain "ho")
val e3 = intercept[TestFailedException] {
xs should (not contain "hi")
}
e3.message.get should be (Resources("containedExpectedElement", decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should (not contain "ho")
intercept[TestFailedException] {
xs should (not contain "hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should (not contain "hi")
intercept[TestFailedException] {
xs should (not contain "ho")
}
}
def `should use an explicitly provided Equality` {
caseLists should (not contain "HI")
caseLists should (not contain "HI ")
(caseLists should (not contain "HI ")) (decided by defaultEquality afterBeing lowerCased)
(caseLists should (not contain "HI ")) (after being lowerCased)
intercept[TestFailedException] {
(caseLists should (not contain "HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists should (not contain "HI")) (after being lowerCased)
}
intercept[TestFailedException] {
(caseLists should (not contain "HI ")) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists should (not contain "HI")
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists should (not contain "HI")
}
normalizedInvokedCount should be (4)
}
}
object `when used with shouldNot contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs shouldNot contain ("ho")
val e3 = intercept[TestFailedException] {
xs shouldNot contain ("hi")
}
e3.message.get should be (Resources("containedExpectedElement", decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs shouldNot contain ("ho")
intercept[TestFailedException] {
xs shouldNot contain ("hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs shouldNot contain ("hi")
intercept[TestFailedException] {
xs shouldNot contain ("ho")
}
}
def `should use an explicitly provided Equality` {
caseLists shouldNot contain ("HI")
caseLists shouldNot contain ("HI ")
(caseLists shouldNot contain ("HI ")) (decided by defaultEquality afterBeing lowerCased)
(caseLists shouldNot contain ("HI ")) (after being lowerCased)
intercept[TestFailedException] {
(caseLists shouldNot contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists shouldNot contain ("HI ")) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists shouldNot contain ("HI")
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists shouldNot contain ("HI")
}
normalizedInvokedCount should be (4)
}
}
object `when used with shouldNot (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs shouldNot (contain ("ho"))
val e3 = intercept[TestFailedException] {
xs shouldNot (contain ("hi"))
}
e3.message.get should be (Resources("containedExpectedElement", decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs shouldNot (contain ("ho"))
intercept[TestFailedException] {
xs shouldNot (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs shouldNot (contain ("hi"))
intercept[TestFailedException] {
xs shouldNot (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
caseLists shouldNot (contain ("HI"))
caseLists shouldNot (contain ("HI "))
(caseLists shouldNot (contain ("HI "))) (decided by defaultEquality afterBeing lowerCased)
(caseLists shouldNot (contain ("HI "))) (after being lowerCased)
intercept[TestFailedException] {
(caseLists shouldNot (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists shouldNot (contain ("HI"))) (after being lowerCased)
}
intercept[TestFailedException] {
(caseLists shouldNot (contain ("HI "))) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists shouldNot (contain ("HI"))
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists shouldNot (contain ("HI"))
}
normalizedInvokedCount should be (4)
}
}
}
object `a collection of Lists` {
val list123s: Every[Every[Int]] = Every(Every(1, 2, 3), Every(1, 2, 3), Every(1, 2, 3))
val lists: Every[Every[Int]] = Every(Every(1, 2, 3), Every(1, 2, 3), Every(4, 5, 6))
val hiLists: Every[Every[String]] = Every(Every("hi"), Every("hi"), Every("hi"))
object `when used with contain (value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should contain (1)
atLeast (2, lists) should contain (1)
atMost (2, lists) should contain (4)
no (lists) should contain (7)
val e1 = intercept[TestFailedException] {
all (lists) should contain (1)
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) did not contain element 1 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
val e2 = intercept[TestFailedException] {
all (lists) should not contain (4)
}
e2.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 4 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
val e3 = intercept[TestFailedException] {
all (lists) should contain (1)
}
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 3)
e3.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) did not contain element 1 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
intercept[TestFailedException] {
all (hiLists) should contain ("ho")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should contain ("ho")
intercept[TestFailedException] {
all (hiLists) should contain ("hi")
}
}
def `should use an explicitly provided Equality` {
intercept[TestFailedException] {
all (hiLists) should contain ("HI")
}
intercept[TestFailedException] {
all (hiLists) should contain ("HI ")
}
(all (hiLists) should contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
(all (hiLists) should contain ("HI ")) (after being trimmed and lowerCased)
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
val hiHeHoLists: List[List[String]] = List(List("hi", "he", "ho"), List("hi", "he", "ho"), List("hi", "he", "ho"))
intercept[TestFailedException] {
all (hiHeHoLists) should contain ("HO")
}
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
all (hiHeHoLists) should contain ("HO")
normalizedInvokedCount should be (12)
}
}
object `when used with not contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should not contain 4
atLeast (2, lists) should not contain 4
atMost (2, lists) should not contain 4
no (list123s) should not contain 1 // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) should not contain 6
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) should not contain "ho"
intercept[TestFailedException] {
all (hiLists) should not contain "hi"
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should not contain "hi"
intercept[TestFailedException] {
all (hiLists) should not contain "ho"
}
}
def `should use an explicitly provided Equality` {
all (hiLists) should not contain "HI"
all (hiLists) should not contain "HI "
intercept[TestFailedException] {
(all (hiLists) should not contain "HI") (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) should not contain "HI ") (after being trimmed and lowerCased)
}
}
}
object `when used with not (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should not (contain (4))
atLeast (2, lists) should not (contain (4))
atMost (2, lists) should not (contain (4))
no (list123s) should not (contain (1)) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) should not (contain (6))
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) should not (contain ("ho"))
intercept[TestFailedException] {
all (hiLists) should not (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should not (contain ("hi"))
intercept[TestFailedException] {
all (hiLists) should not (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
all (hiLists) should not (contain ("HI"))
all (hiLists) should not (contain ("HI "))
intercept[TestFailedException] {
(all (hiLists) should not (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) should not (contain ("HI "))) (after being trimmed and lowerCased)
}
}
}
object `when used with (not contain value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should (not contain 4)
atLeast (2, lists) should (not contain 4)
atMost (2, lists) should (not contain 4)
no (list123s) should (not contain 1) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) should (not contain 6)
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) should (not contain "ho")
intercept[TestFailedException] {
all (hiLists) should (not contain "hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should (not contain "hi")
intercept[TestFailedException] {
all (hiLists) should (not contain "ho")
}
}
def `should use an explicitly provided Equality` {
all (hiLists) should (not contain "HI")
all (hiLists) should (not contain "HI ")
intercept[TestFailedException] {
(all (hiLists) should (not contain "HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) should (not contain "HI ")) (after being trimmed and lowerCased)
}
}
}
object `when used with shouldNot contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) shouldNot contain (4)
atLeast (2, lists) shouldNot contain (4)
atMost (2, lists) shouldNot contain (4)
no (list123s) shouldNot contain (1) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) shouldNot contain (6)
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) shouldNot contain ("ho")
intercept[TestFailedException] {
all (hiLists) shouldNot contain ("hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) shouldNot contain ("hi")
intercept[TestFailedException] {
all (hiLists) shouldNot contain ("ho")
}
}
def `should use an explicitly provided Equality` {
all (hiLists) shouldNot contain ("HI")
all (hiLists) shouldNot contain ("HI ")
intercept[TestFailedException] {
(all (hiLists) shouldNot contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) shouldNot contain ("HI ")) (after being trimmed and lowerCased)
}
}
}
object `when used with shouldNot (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) shouldNot (contain (4))
atLeast (2, lists) shouldNot (contain (4))
atMost (2, lists) shouldNot (contain (4))
no (list123s) shouldNot (contain (1)) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) shouldNot (contain (6))
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) shouldNot (contain ("ho"))
intercept[TestFailedException] {
all (hiLists) shouldNot (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) shouldNot (contain ("hi"))
intercept[TestFailedException] {
all (hiLists) shouldNot (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
all (hiLists) shouldNot (contain ("HI"))
all (hiLists) shouldNot (contain ("HI "))
intercept[TestFailedException] {
(all (hiLists) shouldNot (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) shouldNot (contain ("HI "))) (after being trimmed and lowerCased)
}
}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/EveryShouldContainSpec.scala | Scala | apache-2.0 | 28,830 |
package io.github.shogowada.scalajs.reactjs.example.todoappredux
case class State(todos: Seq[TodoItem], visibilityFilter: String)
case class TodoItem(id: Int, completed: Boolean, text: String)
| shogowada/scalajs-reactjs | example/todo-app-redux/src/main/scala/io/github/shogowada/scalajs/reactjs/example/todoappredux/Models.scala | Scala | mit | 195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import org.apache.spark.{LocalSparkContext, SparkContext, SparkFunSuite, TestUtils}
import org.apache.spark.scheduler.cluster.ExecutorInfo
/**
* Unit tests for SparkListener that require a local cluster.
*/
class SparkListenerWithClusterSuite extends SparkFunSuite with LocalSparkContext
with BeforeAndAfter with BeforeAndAfterAll {
/** Length of time to wait while draining listener events. */
val WAIT_TIMEOUT_MILLIS = 10000
before {
sc = new SparkContext("local-cluster[2,1,1024]", "SparkListenerSuite")
}
test("SparkListener sends executor added message") {
val listener = new SaveExecutorInfo
sc.addSparkListener(listener)
// This test will check if the number of executors received by "SparkListener" is same as the
// number of all executors, so we need to wait until all executors are up
TestUtils.waitUntilExecutorsUp(sc, 2, 60000)
val rdd1 = sc.parallelize(1 to 100, 4)
val rdd2 = rdd1.map(_.toString)
rdd2.setName("Target RDD")
rdd2.count()
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(listener.addedExecutorInfo.size == 2)
assert(listener.addedExecutorInfo("0").totalCores == 1)
assert(listener.addedExecutorInfo("1").totalCores == 1)
}
private class SaveExecutorInfo extends SparkListener {
val addedExecutorInfo = mutable.Map[String, ExecutorInfo]()
override def onExecutorAdded(executor: SparkListenerExecutorAdded) {
addedExecutorInfo(executor.executorId) = executor.executorInfo
}
}
}
| esi-mineset/spark | core/src/test/scala/org/apache/spark/scheduler/SparkListenerWithClusterSuite.scala | Scala | apache-2.0 | 2,447 |
package jigg.pipeline
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.io.BufferedReader
import java.io.BufferedWriter
import java.io.InputStreamReader
import java.io.OutputStreamWriter
import java.util.Properties
import scala.util.matching.Regex
import scala.collection.mutable.ArrayBuffer
import scala.xml._
import jigg.util.XMLUtil
class KNPAnnotator(override val name: String, override val props: Properties) extends SentencesAnnotator {
@Prop(gloss = "Use this command to launch KNP (-tab and -anaphora are mandatory and automatically added). Version >= 4.12 is assumed.") var command = "knp"
readProps()
//for KNP 4.12 (-ne option is unneed)
lazy private[this] val knpProcess = new java.lang.ProcessBuilder(command, "-tab", "-anaphora").start
lazy private[this] val knpIn = new BufferedReader(new InputStreamReader(knpProcess.getInputStream, "UTF-8"))
lazy private[this] val knpOut = new BufferedWriter(new OutputStreamWriter(knpProcess.getOutputStream, "UTF-8"))
/**
* Close the external process and the interface
*/
override def close() {
knpOut.close()
knpIn.close()
knpProcess.destroy()
}
def isBasicPhrase(knpStr:String) : Boolean = knpStr(0) == '+'
def isChunk(knpStr:String) : Boolean = knpStr(0) == '*'
def isDocInfo(knpStr:String) : Boolean = knpStr(0) == '#'
def isEOS(knpStr:String) : Boolean = knpStr == "EOS"
def isToken(knpStr:String) : Boolean = ! isBasicPhrase(knpStr) && ! isChunk(knpStr) && ! isDocInfo(knpStr) && ! isEOS(knpStr)
private def tid(sindex: String, tindex: Int) = sindex + "_tok" + tindex.toString
private def cid(sindex: String, cindex: Int) = sindex + "_chu" + cindex
private def bpid(sindex: String, bpindex: Int) = sindex + "_bp" + bpindex.toString
private def bpdid(sindex: String, bpdindex: Int) = sindex + "_bpdep" + bpdindex.toString
private def depid(sindex: String, depindex: Int) = sindex + "_dep" + depindex.toString
private def crid(sindex: String, crindex:Int) = sindex + "_cr" + crindex.toString
private def corefid(sindex: String, corefindex:Int) = sindex + "_coref" + corefindex.toString
private def parid(sindex: String, parindex:Int) = sindex + "_par" + parindex.toString
private def neid(sindex: String, neindex:Int) = sindex + "_ne" + neindex.toString
def getTokens(knpResult:Seq[String], sid:String) : Node = {
var tokenIndex = 0
val nodes = knpResult.filter(s => s(0) != '#' && s(0) != '*' && s(0) != '+' && s != "EOS").map{
s =>
val tok = s.split(' ')
val surf = tok(0)
val reading = tok(1)
val base = tok(2)
val pos = tok(3)
val posId = tok(4)
val pos1 = tok(5)
val pos1Id = tok(6)
val inflectionType = tok(7)
val inflectionTypeId = tok(8)
val inflectionForm = tok(9)
val inflectionFormId = tok(10)
val features = tok.drop(11).mkString(" ")
val pos2 = None
val pos3 = None
val pronounce = None
val node = <token
id={ tid(sid, tokenIndex) }
surf={ surf }
pos={ pos }
pos1={ pos1 }
pos2={ pos2 }
pos3={ pos3 }
inflectionType={ inflectionType }
inflectionForm={ inflectionForm }
base={ base }
reading={ reading }
pronounce={ pronounce }
posId={ posId }
pos1Id={ pos1Id }
inflectionTypeId={ inflectionTypeId }
inflectionFormId={ inflectionFormId }
features={ features }/>
tokenIndex += 1
node
}
<tokens>{ nodes }</tokens>
}
def getBasicPhrases(knpResult:Seq[String], sid:String) = {
var tokIdx = -1
val basicPhraseBoundaries = knpResult.zipWithIndex.filter(x=>isBasicPhrase(x._1)).map(_._2) :+ knpResult.size
val basicPhrases = basicPhraseBoundaries.sliding(2).toSeq.zipWithIndex map { case (Seq(b, e), bpIdx) =>
val knpStr = knpResult(b)
val tokenIDs = (b + 1 until e).filter(i=>isToken(knpResult(i))) map { _ =>
tokIdx += 1
tid(sid, tokIdx)
}
<basicPhrase id={ bpid(sid, bpIdx) } tokens={ tokenIDs.mkString(" ") } features={ knpStr.split(" ")(2) } />
}
<basicPhrases>{ basicPhrases }</basicPhrases>
}
def getChunks(knpResult:Seq[String], sid:String) = {
var tokIdx = -1
val chunkBoundaries = knpResult.zipWithIndex.filter(x=>isChunk(x._1)).map(_._2) :+ knpResult.size
val chunks = chunkBoundaries.sliding(2).toSeq.zipWithIndex map { case (Seq(b, e), chunkIdx) =>
val knpStr = knpResult(b)
val tokenIDs = (b + 1 until e).filter(i=>isToken(knpResult(i))) map { _ =>
tokIdx += 1
tid(sid, tokIdx)
}
<chunk id={ cid(sid, chunkIdx) } tokens={ tokenIDs.mkString(" ") } features={ knpStr.split(" ")(2) }/>
}
<chunks>{ chunks }</chunks>
}
def getBasicPhraseDependencies(knpResult:Seq[String], sid:String) = {
val bpdepStrs = knpResult.filter(knpStr => isBasicPhrase(knpStr))
val bpdepNum = bpdepStrs.length
var bpdInd = 0
// init: remove the last dependency (+ -1D ...)
val dpdXml = bpdepStrs.init.map{
bpdepStr =>
val hd = bpid(sid, bpdepStr.split(" ")(1).init.toInt)
val dep = bpid(sid, bpdInd)
val lab = bpdepStr.split(" ")(1).last.toString
val ans = <basicPhraseDependency id={bpdid(sid, bpdInd)} head={hd} dependent={dep} label={lab} />
bpdInd += 1
ans
}
<basicPhraseDependencies root={bpid(sid, bpdepNum-1)} >{ dpdXml }</basicPhraseDependencies>
}
def getDependencies(knpResult:Seq[String], sid:String) = {
val depStrs = knpResult.filter(knpStr => isChunk(knpStr))
val depNum = depStrs.length
var depInd = 0
// init: remove the last dependency (* -1D ...)
val depXml = depStrs.init.map{
depStr =>
val hd = cid(sid, depStr.split(" ")(1).init.toInt)
val dep = cid(sid, depInd)
val lab = depStr.split(" ")(1).last.toString
val ans = <dependency id={depid(sid, depInd)} head={hd} dependent={dep} label={lab} />
depInd += 1
ans
}
<dependencies root={cid(sid, depNum-1)} >{ depXml }</dependencies>
}
// "格解析結果:走る/はしる:動13:ガ/C/太郎/0/0/1;ヲ/U/-/-/-/-;ニ/U/-/-/-/-;ト/U/-/-/-/-;デ/U/-/-/-/-;カラ/U/-/-/-/-;ヨリ/U/-/-/-/-;マデ/U/-/-/-/-;時間/U/-/-/-/-;外の関係/U/-/-/-/-;ノ/U/-/-/-/-;修飾/U/-/-/-/-;トスル/U/-/-/-/-;ニオク/U/-/-/-/-;ニカンスル/U/-/-/-/-;ニヨル/U/-/-/-/-;ヲフクメル/U/-/-/-/-;ヲハジメル/U/-/-/-/-;ヲノゾク/U/-/-/-/-;ヲツウジル/U/-/-/-/-
def getCaseRelations(knpResult:Seq[String], tokensXml:NodeSeq, bpsXml:NodeSeq, sid:String) = {
var crInd = 0
val ans = knpResult.filter(str => isBasicPhrase(str)).zipWithIndex.filter(tpl => tpl._1.contains("<格解析結果:")).map{
tpl =>
val str = tpl._1
val bpInd = tpl._2
val pattern1 = "<格解析結果:[^>]+>".r
val sp = pattern1.findFirstIn(str).getOrElse("<>").init.tail.split(":")
val caseResults = sp(3) // ガ/C/太郎/0/0/1;ヲ/ ...
val hd = bpid(sid, bpInd)
caseResults.split(";").map{
str =>
val caseResult = str.split("/")
val lab = caseResult(0)
val fl = caseResult(1)
// assumes that sentence_id is as "s0"
val dependBpid = if (caseResult(3) == "-") None else Some(bpid("s" + (sid.tail.toInt - caseResult(4).toInt), caseResult(3).toInt))
val dependTok : Option[String]= dependBpid.map{
bpid =>
//find a token whose surf equals to case_result(2)
val dependBp : Option[NodeSeq] = (bpsXml \\\\ "basicPhrase").find(bp => (bp \\ "@id").toString == bpid)
val tokenIds : List[String] = dependBp.map(bp => (bp \\ "@tokens").toString.split(' ').toList).getOrElse(List() : List[String])
tokenIds.find(tokId => ((tokensXml \\ "token").find(tok => (tok \\ "@id").toString == tokId).getOrElse(<error/>) \\ "@surf").toString == caseResult(2))
}.flatten
val ansXml = <caseRelation id={crid(sid, crInd)} head={hd} depend={ dependTok.getOrElse("unk") } label={lab} flag={fl} />
crInd += 1
ansXml
}
}.flatten
<caseRelations>{ ans }</caseRelations>
}
def getCoreferences(bpXml:NodeSeq, sid:String) = {
val eidHash = scala.collection.mutable.LinkedHashMap[Int, String]()
(bpXml \\ "basicPhrase").map{
bp =>
val bpid = (bp \\ "@id").toString
val feature : String = (bp \\ "@features").text
val pattern = new Regex("""\\<EID:(\\d+)\\>""", "eid")
val eid = pattern.findFirstMatchIn(feature).map(m => m.group("eid").toInt).getOrElse(-1)
if (eidHash.contains(eid)){
eidHash(eid) = eidHash(eid) + " " + bpid
}
else{
eidHash(eid) = bpid
}
}
val ans = eidHash.map{
case (eid, bps) =>
<coreference id={corefid(sid, eid)} basicPhrases={bps} />
}
<coreferences>{ ans }</coreferences>
}
def getPredicateArgumentRelations(knpResult:Seq[String], sid:String) = {
var parInd = 0
//<述語項構造:飲む/のむ:動1:ガ/N/麻生太郎/1;ヲ/C/コーヒー/2>
val pattern = new Regex("""\\<述語項構造:[^:]+:[^:]+:(.+)\\>""", "args")
val ans = knpResult.filter(knpStr => isBasicPhrase(knpStr)).zipWithIndex.filter(tpl => tpl._1.contains("<述語項構造:")).map{
tpl =>
val knpStr = tpl._1
val bpInd = tpl._2
val argsOpt = pattern.findFirstMatchIn(knpStr).map(m => m.group("args"))
argsOpt.map{
args =>
args.split(";").map{
arg =>
val sp = arg.split("/")
val label = sp(0)
val flag = sp(1)
//val name = sp(2)
val eid = sp(3).toInt
val ans = <predicateArgumentRelation id={parid(sid, parInd)} predicate={bpid(sid, bpInd)} argument={corefid(sid, eid)} label={label} flag={flag} />
parInd += 1
ans
}
}.getOrElse(NodeSeq.Empty)
}
<predicateArgumentRelations>{ ans }</predicateArgumentRelations>
}
def getNamedEntities(knpResult:Seq[String], sid:String) = {
var neInd = 0
var lastTag = "N" //for convenience, use "N" as non-tag of "B/I/E/S"
val tempTokens = new ArrayBuffer[String]
var tempLabel = ""
val pattern = new Regex("""\\<NE:([A-Z]+):([BIES])\\>""", "reLabel", "reTag")
val namedEntities = new ArrayBuffer[Node]
for (tpl <- knpResult.filter(knpStr => isToken(knpStr)).zipWithIndex){
val knpStr = tpl._1
val tokInd = tpl._2
val (reLabel, reTag) = pattern.findFirstMatchIn(knpStr).map(m => (m.group("reLabel"), m.group("reTag"))).getOrElse(("", "N"))
if ((lastTag == "N" && reTag == "B") || (lastTag == "N" && reTag == "S")){
lastTag = reTag
tempTokens += tid(sid, tokInd)
tempLabel = reLabel
}
else if((lastTag == "S" && reTag == "N") || (lastTag == "B" && reTag == "N") || (lastTag == "E" && reTag == "N")){
namedEntities += <namedEntity id={neid(sid, neInd)} tokens={tempTokens.mkString(" ")} label={tempLabel} />
lastTag = reTag
neInd += 1
tempTokens.clear
tempLabel = ""
}
else if((lastTag == "B" && reTag == "I") || (lastTag == "B" && reTag == "E") || (lastTag == "I" && reTag == "E")){
lastTag = reTag
tempTokens += tid(sid, tokInd)
}
}
if(lastTag == "S" || (lastTag == "E")){
namedEntities += <namedEntity id={neid(sid, neInd)} tokens={tempTokens.mkString(" ")} label={tempLabel} />
}
<namedEntities>{ namedEntities }</namedEntities>
}
def makeXml(sentence:Node, knpResult:Seq[String], sid:String): Node = {
val knpTokens = getTokens(knpResult, sid)
val sentenceWithTokens = XMLUtil.replaceAll(sentence, "tokens")(node => knpTokens)
val basicPhrases = getBasicPhrases(knpResult, sid)
XMLUtil.addChild(sentenceWithTokens, Seq[Node](
basicPhrases,
getChunks(knpResult, sid),
getBasicPhraseDependencies(knpResult, sid),
getDependencies(knpResult, sid),
getCaseRelations(knpResult, knpTokens, basicPhrases, sid),
getCoreferences(basicPhrases, sid),
getPredicateArgumentRelations(knpResult, sid),
getNamedEntities(knpResult, sid)
))
}
private[this] def recoverTokenStr(tokenNode: Node, alt: Boolean) : String = (if (alt) "@ " else "") +
Seq("@surf", "@reading", "@base", "@pos", "@posId", "@pos1", "@pos1Id", "@inflectionType", "@inflectionTypeId", "@inflectionForm", "@inflectionFormId").map(tokenNode \\ _).mkString(" ") +
" " + (tokenNode \\ "@features").text + "\\n"
def recovJumanOutput(jumanTokens:Node) : Seq[String] = {
val ans = ArrayBuffer.empty[String]
(jumanTokens \\\\ "token").map{
tok =>
ans += recoverTokenStr(tok, false)
val tokenAltSeq = (tok \\ "tokenAlt")
if (tokenAltSeq.nonEmpty){
tokenAltSeq.map{
tokAlt =>
ans += recoverTokenStr(tokAlt, true)
}
}
}
ans += "EOS\\n"
ans.toSeq
}
override def newSentenceAnnotation(sentence: Node): Node = {
def runKNP(jumanTokens:Node): Seq[String] = {
knpOut.write(recovJumanOutput(jumanTokens).mkString)
knpOut.flush()
Stream.continually(knpIn.readLine()) match {
case strm @ (begin #:: _) if begin.startsWith("# S-ID") => strm.takeWhile(_ != "EOS").toSeq :+ "EOS"
case other #:: _ => argumentError("command", s"Something wrong in $name\\n$other\\n...")
}
}
val sindex = (sentence \\ "@id").toString
val jumanTokens = (sentence \\ "tokens").head
val knpResult = runKNP(jumanTokens)
makeXml(sentence, knpResult, sindex)
}
override def requires = Set(Requirement.TokenizeWithJuman)
override def requirementsSatisfied = {
import Requirement._
Set(Chunk, Dependency, BasicPhrase, BasicPhraseDependency, Coreference, PredArg, NamedEntity)
}
}
| sakabar/jigg | src/main/scala/jigg/pipeline/KNPAnnotator.scala | Scala | apache-2.0 | 14,490 |
package epic.logo;
import breeze.linalg._
import breeze.optimize.FirstOrderMinimizer.OptParams
import epic.framework.Example
import org.scalatest.FunSuite
import breeze.math._
class ClassifierTest extends FunSuite {
private def example(label: Boolean, features: Map[Int, Double]) = {
Example(label, Counter(features))
}
test("simple example") {
val trainingData = Array (
example(true, Map(0 -> 1.0, 1 -> 1.0, 2 -> 1.0)),
example(false,Map(0 -> 1.0, 1 -> 1.0, 3 -> 1.0)),
example(true, Map(1 -> 1.0, 4 -> 1.0))
)
val testData = Array(
example(true, Map(1 -> 1.0,2 -> 1.0))
)
val c = Trainer.trainL1MaxMarginMulticlassClassifier(
IndexedSeq(false, true), trainingData,
labelConjoiner = ((label: Boolean, f: Counter[Int, Double]) =>
Counter(f.toMap.map { case (k, v) => ((label, k), v) })),
C = 1000.0,
oneSlackFormulation = false,
initialConstraint = Counter[(Boolean, Int), Double]())
for(x <- trainingData) {
assert(x.label == c(x.features), x)
}
val r = c(testData(0).features)
assert(r == testData(0).label)
}
}
| langkilde/epic | src/test/scala/epic/logo/ClassifierTest.scala | Scala | apache-2.0 | 1,141 |
object P14 {
def duplicate[A](l:List[A]):List[A] = l match {
case Nil => Nil
case h::tail => h::h::duplicate(tail)
}
}
| liefswanson/S-99 | src/main/scala/14.scala | Scala | gpl-2.0 | 136 |
package org.apache.solr.handler.tesserae.pos
case class TaggedPartOfSpeech(pos: String, text: String) {
override def toString = String.valueOf(pos) + ":" + String.valueOf(text)
}
object PartOfSpeech {
val VERB = "v"
val NOUN = "n"
val KEYWORD = "k"
val UNKNOWN = "u"
def apply(pos: String, text: String): String =
TaggedPartOfSpeech(pos, text).toString
def apply(tps: TaggedPartOfSpeech): String =
tps.toString
def unapply(str: String): TaggedPartOfSpeech = str match {
case null => TaggedPartOfSpeech(UNKNOWN, null)
case s if s.length > 1 && s.charAt(1) == ':' =>
TaggedPartOfSpeech(s.substring(0, 1), s.substring(2))
case default => TaggedPartOfSpeech(UNKNOWN, default)
}
def isVerb(str: String): Boolean =
unapply(str).pos == VERB
def isVerb(tps: TaggedPartOfSpeech): Boolean =
tps.pos == VERB
def isNoun(str: String): Boolean =
unapply(str).pos == NOUN
def isNoun(tps: TaggedPartOfSpeech): Boolean =
tps.pos == NOUN
def isKeyword(str: String): Boolean =
unapply(str).pos == KEYWORD
def isKeyword(tps: TaggedPartOfSpeech): Boolean =
tps.pos == KEYWORD
def isUnknown(str: String): Boolean =
isUnknown(unapply(str))
def isUnknown(tps: TaggedPartOfSpeech): Boolean =
tps.pos != VERB && tps.pos != NOUN && tps.pos != KEYWORD
}
| eberle1080/tesserae-ng | text-analysis/src/main/scala/pos/PartOfSpeech.scala | Scala | bsd-2-clause | 1,323 |
package test.roundeights.hasher
import org.specs2.mutable._
import com.roundeights.hasher.{Algo, Hasher}
import scala.io.{Codec, Source}
class TapTest extends Specification {
def hashTest ( algo: Algo, data: TestData, hash: String ) = {
"Using " + algo + ", the Tap.hash method" in {
"Hash a Stream" in {
val tap = algo.tap( data.stream )
val string = Source.fromInputStream(tap)(Codec.UTF8).mkString
string must_== data.str
tap.hash.hex must_== hash
}
"Hash a Reader" in {
val tap = algo.tap( data.reader )
val string = tap.mkString
string must_== data.str
tap.hash.hex must_== hash
}
"Hash a Source" in {
val tap = algo.tap( data.source )
val string = tap.mkString
string must_== data.str
tap.hash.hex must_== hash
}
}
}
def compareTest ( algo: Algo, data: TestData, hash: String ) = {
"Using " + algo + ", the Tap.hash= method" in {
"match a Stream" in {
val tap = algo.tap( data.stream )
val string = Source.fromInputStream(tap)(Codec.UTF8).mkString
string must_== data.str
( tap hash= hash ) must_== true
}
"match a Reader" in {
val tap = algo.tap( data.reader )
val string = tap.mkString
string must_== data.str
( tap hash= hash ) must_== true
}
"match a Source" in {
val tap = algo.tap( data.source )
val string = tap.mkString
string must_== data.str
( tap hash= hash ) must_== true
}
}
}
// BCrypt produces salted hashes, so we can only assert against the format
// of the resulting hashes
def testBCrypt( data: TestData ) {
"Using BCrypt, the Tap.hash method" in {
"Hash a Stream" in {
val tap = Algo.bcrypt.tap( data.stream )
val string = Source.fromInputStream(tap)(Codec.UTF8).mkString
string must_== data.str
tap.hash.hex must beMatching("^[a-zA-Z0-9]{120}$")
}
"Hash a Reader" in {
val tap = Algo.bcrypt.tap( data.reader )
val string = tap.mkString
string must_== data.str
tap.hash.hex must beMatching("^[a-zA-Z0-9]{120}$")
}
"Hash a Source" in {
val tap = Algo.bcrypt.tap( data.source )
val string = tap.mkString
string must_== data.str
tap.hash.hex must beMatching("^[a-zA-Z0-9]{120}$")
}
}
}
// A simple test
"Simple plain text data" should {
testBCrypt( TestData.test )
TestData.test.runAgainstUnsalted(hashTest)
TestData.test.runAgainstAll(compareTest)
}
// Test an internationalized, multi-byte string
"Internationalized plain text values" should {
testBCrypt( TestData.international )
TestData.international.runAgainstUnsalted(hashTest)
TestData.international.runAgainstAll(compareTest)
}
// Test a string large enough that it blows out the buffers
"Large values" should {
testBCrypt( TestData.large )
TestData.large.runAgainstUnsalted(hashTest)
TestData.large.runAgainstAll(compareTest)
}
// Test hashing a blank string
"Blank string" should {
testBCrypt( TestData.blank )
TestData.blank.runAgainstUnsalted(hashTest)
TestData.blank.runAgainstAll(compareTest)
}
}
| Nycto/Hasher | src/test/scala/hasher/TapTest.scala | Scala | mit | 3,814 |
package gitbucket.core.service
import javax.servlet.http.HttpServletRequest
import com.nimbusds.jose.JWSAlgorithm
import com.nimbusds.oauth2.sdk.auth.Secret
import com.nimbusds.oauth2.sdk.id.{ClientID, Issuer}
import gitbucket.core.service.SystemSettingsService.{getOptionValue, _}
import gitbucket.core.util.ConfigUtil._
import gitbucket.core.util.Directory._
import scala.util.Using
trait SystemSettingsService {
def baseUrl(implicit request: HttpServletRequest): String = loadSystemSettings().baseUrl(request)
def saveSystemSettings(settings: SystemSettings): Unit = {
val props = new java.util.Properties()
settings.baseUrl.foreach(x => props.setProperty(BaseURL, x.replaceFirst("/\\\\Z", "")))
settings.information.foreach(x => props.setProperty(Information, x))
props.setProperty(AllowAccountRegistration, settings.allowAccountRegistration.toString)
props.setProperty(AllowAnonymousAccess, settings.allowAnonymousAccess.toString)
props.setProperty(IsCreateRepoOptionPublic, settings.isCreateRepoOptionPublic.toString)
props.setProperty(RepositoryOperationCreate, settings.repositoryOperation.create.toString)
props.setProperty(RepositoryOperationDelete, settings.repositoryOperation.delete.toString)
props.setProperty(RepositoryOperationRename, settings.repositoryOperation.rename.toString)
props.setProperty(RepositoryOperationTransfer, settings.repositoryOperation.transfer.toString)
props.setProperty(RepositoryOperationFork, settings.repositoryOperation.fork.toString)
props.setProperty(Gravatar, settings.gravatar.toString)
props.setProperty(Notification, settings.notification.toString)
props.setProperty(LimitVisibleRepositories, settings.limitVisibleRepositories.toString)
props.setProperty(SshEnabled, settings.ssh.enabled.toString)
settings.ssh.bindAddress.foreach { bindAddress =>
props.setProperty(SshBindAddressHost, bindAddress.host.trim())
props.setProperty(SshBindAddressPort, bindAddress.port.toString)
}
settings.ssh.publicAddress.foreach { publicAddress =>
props.setProperty(SshPublicAddressHost, publicAddress.host.trim())
props.setProperty(SshPublicAddressPort, publicAddress.port.toString)
}
props.setProperty(UseSMTP, settings.useSMTP.toString)
if (settings.useSMTP) {
settings.smtp.foreach { smtp =>
props.setProperty(SmtpHost, smtp.host)
smtp.port.foreach(x => props.setProperty(SmtpPort, x.toString))
smtp.user.foreach(props.setProperty(SmtpUser, _))
smtp.password.foreach(props.setProperty(SmtpPassword, _))
smtp.ssl.foreach(x => props.setProperty(SmtpSsl, x.toString))
smtp.starttls.foreach(x => props.setProperty(SmtpStarttls, x.toString))
smtp.fromAddress.foreach(props.setProperty(SmtpFromAddress, _))
smtp.fromName.foreach(props.setProperty(SmtpFromName, _))
}
}
props.setProperty(LdapAuthentication, settings.ldapAuthentication.toString)
if (settings.ldapAuthentication) {
settings.ldap.foreach { ldap =>
props.setProperty(LdapHost, ldap.host)
ldap.port.foreach(x => props.setProperty(LdapPort, x.toString))
ldap.bindDN.foreach(x => props.setProperty(LdapBindDN, x))
ldap.bindPassword.foreach(x => props.setProperty(LdapBindPassword, x))
props.setProperty(LdapBaseDN, ldap.baseDN)
props.setProperty(LdapUserNameAttribute, ldap.userNameAttribute)
ldap.additionalFilterCondition.foreach(x => props.setProperty(LdapAdditionalFilterCondition, x))
ldap.fullNameAttribute.foreach(x => props.setProperty(LdapFullNameAttribute, x))
ldap.mailAttribute.foreach(x => props.setProperty(LdapMailAddressAttribute, x))
ldap.tls.foreach(x => props.setProperty(LdapTls, x.toString))
ldap.ssl.foreach(x => props.setProperty(LdapSsl, x.toString))
ldap.keystore.foreach(x => props.setProperty(LdapKeystore, x))
}
}
props.setProperty(OidcAuthentication, settings.oidcAuthentication.toString)
if (settings.oidcAuthentication) {
settings.oidc.foreach { oidc =>
props.setProperty(OidcIssuer, oidc.issuer.getValue)
props.setProperty(OidcClientId, oidc.clientID.getValue)
props.setProperty(OidcClientSecret, oidc.clientSecret.getValue)
oidc.jwsAlgorithm.foreach { x =>
props.setProperty(OidcJwsAlgorithm, x.getName)
}
}
}
props.setProperty(SkinName, settings.skinName)
settings.userDefinedCss.foreach(x => props.setProperty(UserDefinedCss, x))
props.setProperty(ShowMailAddress, settings.showMailAddress.toString)
props.setProperty(WebHookBlockPrivateAddress, settings.webHook.blockPrivateAddress.toString)
props.setProperty(WebHookWhitelist, settings.webHook.whitelist.mkString("\\n"))
props.setProperty(UploadMaxFileSize, settings.upload.maxFileSize.toString)
props.setProperty(UploadTimeout, settings.upload.timeout.toString)
props.setProperty(UploadLargeMaxFileSize, settings.upload.largeMaxFileSize.toString)
props.setProperty(UploadLargeTimeout, settings.upload.largeTimeout.toString)
props.setProperty(RepositoryViewerMaxFiles, settings.repositoryViewer.maxFiles.toString)
Using.resource(new java.io.FileOutputStream(GitBucketConf)) { out =>
props.store(out, null)
}
}
def loadSystemSettings(): SystemSettings = {
val props = new java.util.Properties()
if (GitBucketConf.exists) {
Using.resource(new java.io.FileInputStream(GitBucketConf)) { in =>
props.load(in)
}
}
loadSystemSettings(props)
}
def loadSystemSettings(props: java.util.Properties): SystemSettings = {
SystemSettings(
getOptionValue[String](props, BaseURL, None).map(x => x.replaceFirst("/\\\\Z", "")),
getOptionValue(props, Information, None),
getValue(props, AllowAccountRegistration, false),
getValue(props, AllowAnonymousAccess, true),
getValue(props, IsCreateRepoOptionPublic, true),
RepositoryOperation(
create = getValue(props, RepositoryOperationCreate, true),
delete = getValue(props, RepositoryOperationDelete, true),
rename = getValue(props, RepositoryOperationRename, true),
transfer = getValue(props, RepositoryOperationTransfer, true),
fork = getValue(props, RepositoryOperationFork, true)
),
getValue(props, Gravatar, false),
getValue(props, Notification, false),
getValue(props, LimitVisibleRepositories, false),
Ssh(
enabled = getValue(props, SshEnabled, false),
bindAddress = {
// try the new-style configuration first
getOptionValue[String](props, SshBindAddressHost, None)
.map(h => SshAddress(h, getValue(props, SshBindAddressPort, DefaultSshPort), GenericSshUser))
.orElse(
// otherwise try to get old-style configuration
getOptionValue[String](props, SshHost, None)
.map(_.trim)
.map(h => SshAddress(h, getValue(props, SshPort, DefaultSshPort), GenericSshUser))
)
},
publicAddress = getOptionValue[String](props, SshPublicAddressHost, None)
.map(h => SshAddress(h, getValue(props, SshPublicAddressPort, PublicSshPort), GenericSshUser))
),
getValue(
props,
UseSMTP,
getValue(props, Notification, false)
), // handle migration scenario from only notification to useSMTP
if (getValue(props, UseSMTP, getValue(props, Notification, false))) {
Some(
Smtp(
getValue(props, SmtpHost, ""),
getOptionValue(props, SmtpPort, Some(DefaultSmtpPort)),
getOptionValue(props, SmtpUser, None),
getOptionValue(props, SmtpPassword, None),
getOptionValue[Boolean](props, SmtpSsl, None),
getOptionValue[Boolean](props, SmtpStarttls, None),
getOptionValue(props, SmtpFromAddress, None),
getOptionValue(props, SmtpFromName, None)
)
)
} else None,
getValue(props, LdapAuthentication, false),
if (getValue(props, LdapAuthentication, false)) {
Some(
Ldap(
getValue(props, LdapHost, ""),
getOptionValue(props, LdapPort, Some(DefaultLdapPort)),
getOptionValue(props, LdapBindDN, None),
getOptionValue(props, LdapBindPassword, None),
getValue(props, LdapBaseDN, ""),
getValue(props, LdapUserNameAttribute, ""),
getOptionValue(props, LdapAdditionalFilterCondition, None),
getOptionValue(props, LdapFullNameAttribute, None),
getOptionValue(props, LdapMailAddressAttribute, None),
getOptionValue[Boolean](props, LdapTls, None),
getOptionValue[Boolean](props, LdapSsl, None),
getOptionValue(props, LdapKeystore, None)
)
)
} else None,
getValue(props, OidcAuthentication, false),
if (getValue(props, OidcAuthentication, false)) {
Some(
OIDC(
getValue(props, OidcIssuer, ""),
getValue(props, OidcClientId, ""),
getValue(props, OidcClientSecret, ""),
getOptionValue(props, OidcJwsAlgorithm, None)
)
)
} else {
None
},
getValue(props, SkinName, "skin-blue"),
getOptionValue(props, UserDefinedCss, None),
getValue(props, ShowMailAddress, false),
WebHook(getValue(props, WebHookBlockPrivateAddress, false), getSeqValue(props, WebHookWhitelist, "")),
Upload(
getValue(props, UploadMaxFileSize, 3 * 1024 * 1024),
getValue(props, UploadTimeout, 3 * 10000),
getValue(props, UploadLargeMaxFileSize, 3 * 1024 * 1024),
getValue(props, UploadLargeTimeout, 3 * 10000)
),
RepositoryViewerSettings(
getValue(props, RepositoryViewerMaxFiles, 0)
)
)
}
}
object SystemSettingsService {
import scala.reflect.ClassTag
private val HttpProtocols = Vector("http", "https")
case class SystemSettings(
baseUrl: Option[String],
information: Option[String],
allowAccountRegistration: Boolean,
allowAnonymousAccess: Boolean,
isCreateRepoOptionPublic: Boolean,
repositoryOperation: RepositoryOperation,
gravatar: Boolean,
notification: Boolean,
limitVisibleRepositories: Boolean,
ssh: Ssh,
useSMTP: Boolean,
smtp: Option[Smtp],
ldapAuthentication: Boolean,
ldap: Option[Ldap],
oidcAuthentication: Boolean,
oidc: Option[OIDC],
skinName: String,
userDefinedCss: Option[String],
showMailAddress: Boolean,
webHook: WebHook,
upload: Upload,
repositoryViewer: RepositoryViewerSettings
) {
def baseUrl(request: HttpServletRequest): String =
baseUrl.getOrElse(parseBaseUrl(request)).stripSuffix("/")
def parseBaseUrl(req: HttpServletRequest): String = {
val url = req.getRequestURL.toString
val path = req.getRequestURI
val contextPath = req.getContextPath
val len = url.length - path.length + contextPath.length
val base = url.substring(0, len).stripSuffix("/")
Option(req.getHeader("X-Forwarded-Proto"))
.map(_.toLowerCase())
.filter(HttpProtocols.contains)
.fold(base)(_ + base.dropWhile(_ != ':'))
}
def sshBindAddress: Option[SshAddress] =
ssh.bindAddress
def sshPublicAddress: Option[SshAddress] =
ssh.publicAddress.orElse(ssh.bindAddress)
def sshUrl: Option[String] =
ssh.getUrl
def sshUrl(owner: String, name: String): Option[String] =
ssh.getUrl(owner: String, name: String)
}
case class RepositoryOperation(
create: Boolean,
delete: Boolean,
rename: Boolean,
transfer: Boolean,
fork: Boolean
)
case class Ssh(
enabled: Boolean,
bindAddress: Option[SshAddress],
publicAddress: Option[SshAddress]
) {
def getUrl: Option[String] =
if (enabled) {
publicAddress.map(_.getUrl).orElse(bindAddress.map(_.getUrl))
} else {
None
}
def getUrl(owner: String, name: String): Option[String] =
if (enabled) {
publicAddress
.map(_.getUrl(owner, name))
.orElse(bindAddress.map(_.getUrl(owner, name)))
} else {
None
}
}
object Ssh {
def apply(
enabled: Boolean,
bindAddress: Option[SshAddress],
publicAddress: Option[SshAddress]
): Ssh =
new Ssh(enabled, bindAddress, publicAddress.orElse(bindAddress))
}
case class Ldap(
host: String,
port: Option[Int],
bindDN: Option[String],
bindPassword: Option[String],
baseDN: String,
userNameAttribute: String,
additionalFilterCondition: Option[String],
fullNameAttribute: Option[String],
mailAttribute: Option[String],
tls: Option[Boolean],
ssl: Option[Boolean],
keystore: Option[String]
)
case class OIDC(issuer: Issuer, clientID: ClientID, clientSecret: Secret, jwsAlgorithm: Option[JWSAlgorithm])
object OIDC {
def apply(issuer: String, clientID: String, clientSecret: String, jwsAlgorithm: Option[String]): OIDC =
new OIDC(
new Issuer(issuer),
new ClientID(clientID),
new Secret(clientSecret),
jwsAlgorithm.map(JWSAlgorithm.parse)
)
}
case class Smtp(
host: String,
port: Option[Int],
user: Option[String],
password: Option[String],
ssl: Option[Boolean],
starttls: Option[Boolean],
fromAddress: Option[String],
fromName: Option[String]
)
case class Proxy(
host: String,
port: Int,
user: Option[String],
password: Option[String]
)
case class SshAddress(host: String, port: Int, genericUser: String) {
def isDefaultPort: Boolean =
port == PublicSshPort
def getUrl: String =
if (isDefaultPort) {
s"${genericUser}@${host}"
} else {
s"${genericUser}@${host}:${port}"
}
def getUrl(owner: String, name: String): String =
if (isDefaultPort) {
s"${genericUser}@${host}:${owner}/${name}.git"
} else {
s"ssh://${genericUser}@${host}:${port}/${owner}/${name}.git"
}
}
case class WebHook(blockPrivateAddress: Boolean, whitelist: Seq[String])
case class Upload(maxFileSize: Long, timeout: Long, largeMaxFileSize: Long, largeTimeout: Long)
case class RepositoryViewerSettings(maxFiles: Int)
val GenericSshUser = "git"
val PublicSshPort = 22
val DefaultSshPort = 29418
val DefaultSmtpPort = 25
val DefaultLdapPort = 389
private val BaseURL = "base_url"
private val Information = "information"
private val AllowAccountRegistration = "allow_account_registration"
private val AllowAnonymousAccess = "allow_anonymous_access"
private val IsCreateRepoOptionPublic = "is_create_repository_option_public"
private val RepositoryOperationCreate = "repository_operation_create"
private val RepositoryOperationDelete = "repository_operation_delete"
private val RepositoryOperationRename = "repository_operation_rename"
private val RepositoryOperationTransfer = "repository_operation_transfer"
private val RepositoryOperationFork = "repository_operation_fork"
private val Gravatar = "gravatar"
private val Notification = "notification"
private val ActivityLogLimit = "activity_log_limit"
private val LimitVisibleRepositories = "limitVisibleRepositories"
private val SshEnabled = "ssh"
private val SshHost = "ssh.host"
private val SshPort = "ssh.port"
private val SshBindAddressHost = "ssh.bindAddress.host"
private val SshBindAddressPort = "ssh.bindAddress.port"
private val SshPublicAddressHost = "ssh.publicAddress.host"
private val SshPublicAddressPort = "ssh.publicAddress.port"
private val UseSMTP = "useSMTP"
private val SmtpHost = "smtp.host"
private val SmtpPort = "smtp.port"
private val SmtpUser = "smtp.user"
private val SmtpPassword = "smtp.password"
private val SmtpSsl = "smtp.ssl"
private val SmtpStarttls = "smtp.starttls"
private val SmtpFromAddress = "smtp.from_address"
private val SmtpFromName = "smtp.from_name"
private val LdapAuthentication = "ldap_authentication"
private val LdapHost = "ldap.host"
private val LdapPort = "ldap.port"
private val LdapBindDN = "ldap.bindDN"
private val LdapBindPassword = "ldap.bind_password"
private val LdapBaseDN = "ldap.baseDN"
private val LdapUserNameAttribute = "ldap.username_attribute"
private val LdapAdditionalFilterCondition = "ldap.additional_filter_condition"
private val LdapFullNameAttribute = "ldap.fullname_attribute"
private val LdapMailAddressAttribute = "ldap.mail_attribute"
private val LdapTls = "ldap.tls"
private val LdapSsl = "ldap.ssl"
private val LdapKeystore = "ldap.keystore"
private val OidcAuthentication = "oidc_authentication"
private val OidcIssuer = "oidc.issuer"
private val OidcClientId = "oidc.client_id"
private val OidcClientSecret = "oidc.client_secret"
private val OidcJwsAlgorithm = "oidc.jws_algorithm"
private val SkinName = "skinName"
private val UserDefinedCss = "userDefinedCss"
private val ShowMailAddress = "showMailAddress"
private val WebHookBlockPrivateAddress = "webhook.block_private_address"
private val WebHookWhitelist = "webhook.whitelist"
private val UploadMaxFileSize = "upload.maxFileSize"
private val UploadTimeout = "upload.timeout"
private val UploadLargeMaxFileSize = "upload.largeMaxFileSize"
private val UploadLargeTimeout = "upload.largeTimeout"
private val RepositoryViewerMaxFiles = "repository_viewer_max_files"
private def getValue[A: ClassTag](props: java.util.Properties, key: String, default: A): A = {
getConfigValue(key).getOrElse {
val value = props.getProperty(key)
if (value == null || value.isEmpty) {
default
} else {
convertType(value).asInstanceOf[A]
}
}
}
private def getSeqValue[A: ClassTag](props: java.util.Properties, key: String, default: A): Seq[A] = {
getValue[String](props, key, "").split("\\n").toIndexedSeq.map { value =>
if (value == null || value.isEmpty) {
default
} else {
convertType(value).asInstanceOf[A]
}
}
}
private def getOptionValue[A: ClassTag](props: java.util.Properties, key: String, default: Option[A]): Option[A] = {
getConfigValue(key).orElse {
val value = props.getProperty(key)
if (value == null || value.isEmpty) {
default
} else {
Some(convertType(value)).asInstanceOf[Option[A]]
}
}
}
}
| takezoe/gitbucket | src/main/scala/gitbucket/core/service/SystemSettingsService.scala | Scala | apache-2.0 | 18,611 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.dao
import org.beangle.commons.collection.page._
/** 基于查询的分页
* 当使用或导出大批量数据时,使用者仍以List的方式进行迭代。<br>
* 该实现则是内部采用分页方式。
*
* @author chaostone
*/
abstract class AbstractQueryPage[T](val query: LimitQuery[T]) extends Page[T] {
var page: Page[T] = _
var pageIndex: Int = if (null != query.limit) query.limit.pageIndex - 1 else 0
var totalPages = 0
if (null == query.limit) query.limit(PageLimit(Page.DefaultPageNo, Page.DefaultPageSize))
/** 按照单个分页数据设置.
*
* @param page a { @link org.beangle.commons.collection.page.SinglePage} object.
*/
protected def updatePage(page: SinglePage[T]): Unit = {
this.page = page
this.pageIndex = page.pageIndex
this.totalPages = page.totalPages
}
override def next(): Page[T] = moveTo(pageIndex + 1)
override def previous(): Page[T] = moveTo(pageIndex - 1)
override def hasNext: Boolean = totalPages > pageIndex
override def hasPrevious: Boolean = pageIndex > 1
override def pageSize: Int = query.limit.pageSize
override def totalItems: Int = page.totalItems
override def items: collection.Seq[T] = page.items
override def length: Int = page.length
override def apply(index: Int): T = page(index)
override def iterator: Iterator[T] = new PageIterator[T](this)
}
class PageIterator[T](val queryPage: AbstractQueryPage[T]) extends Iterator[T] {
private var dataIndex: Int = 0
private var innerIter = queryPage.page.iterator
override def hasNext: Boolean = (dataIndex < queryPage.page.items.size) || queryPage.hasNext
override def next(): T = {
if (dataIndex < queryPage.page.size) {
dataIndex += 1
innerIter.next()
} else {
queryPage.next()
dataIndex = 1
innerIter = queryPage.page.iterator
innerIter.next()
}
}
}
| beangle/data | orm/src/main/scala/org/beangle/data/dao/AbstractQueryPage.scala | Scala | lgpl-3.0 | 2,647 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spot.proxy
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spot.proxy.ProxySchema._
import org.apache.spot.utilities.data.InputOutputDataHandler.getFeedbackRDD
object ProxyFeedback {
/**
* Load the feedback file for proxy data.
*
* @param sparkSession Spark Session
* @param feedbackFile Local machine path to the proxy feedback file.
* @param duplicationFactor Number of words to create per flagged feedback entry.
* @return DataFrame of the feedback events.
*/
def loadFeedbackDF(sparkSession: SparkSession,
feedbackFile: String,
duplicationFactor: Int): DataFrame = {
val feedbackSchema = StructType(
List(StructField(Date, StringType, nullable = true),
StructField(Time, StringType, nullable = true),
StructField(ClientIP, StringType, nullable = true),
StructField(Host, StringType, nullable = true),
StructField(ReqMethod, StringType, nullable = true),
StructField(UserAgent, StringType, nullable = true),
StructField(ResponseContentType, StringType, nullable = true),
StructField(RespCode, StringType, nullable = true),
StructField(FullURI, StringType, nullable = true)))
val feedback: RDD[String] = getFeedbackRDD(sparkSession, feedbackFile)
if (!feedback.isEmpty()) {
val dateIndex = 0
val timeIndex = 1
val clientIpIndex = 2
val hostIndex = 3
val reqMethodIndex = 4
val userAgentIndex = 5
val resContTypeIndex = 6
val respCodeIndex = 11
val fullURIIndex = 18
val fullURISeverityIndex = 22
sparkSession.createDataFrame(feedback.map(_.split("\\t"))
.filter(row => row(fullURISeverityIndex).trim.toInt == 3)
.map(row => Row.fromSeq(List(row(dateIndex),
row(timeIndex),
row(clientIpIndex),
row(hostIndex),
row(reqMethodIndex),
row(userAgentIndex),
row(resContTypeIndex),
row(respCodeIndex),
row(fullURIIndex))))
.flatMap(row => List.fill(duplicationFactor)(row)), feedbackSchema)
.select(Date, Time, ClientIP, Host, ReqMethod, UserAgent, ResponseContentType, RespCode, FullURI)
} else {
sparkSession.createDataFrame(sparkSession.sparkContext.emptyRDD[Row], feedbackSchema)
}
}
}
| brandon-edwards/incubator-spot | spot-ml/src/main/scala/org/apache/spot/proxy/ProxyFeedback.scala | Scala | apache-2.0 | 3,276 |
package io.iohk.ethereum.ledger
import akka.util.ByteString
import io.iohk.ethereum.ObjectGenerators
import io.iohk.ethereum.domain.{Block, BlockBody, BlockchainImpl, ChainWeight}
import io.iohk.ethereum.Fixtures
import io.iohk.ethereum.ledger.BlockQueue.Leaf
import io.iohk.ethereum.utils.Config
import io.iohk.ethereum.utils.Config.SyncConfig
import org.scalamock.scalatest.MockFactory
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class BlockQueueSpec extends AnyFlatSpec with Matchers with MockFactory {
"BlockQueue" should "ignore block if it's already in the queue" in new TestConfig {
val block = getBlock(1)
val parentWeight = ChainWeight.zero
setBestBlockNumber(1).twice()
setChainWeightForParent(block, Some(parentWeight))
blockQueue.enqueueBlock(block) shouldEqual Some(Leaf(block.header.hash, parentWeight.increase(block.header)))
blockQueue.enqueueBlock(block) shouldEqual None
blockQueue.isQueued(block.header.hash) shouldBe true
}
it should "ignore blocks outside of range" in new TestConfig {
val block1 = getBlock(1)
val block30 = getBlock(30)
setBestBlockNumber(15).twice()
blockQueue.enqueueBlock(block1)
blockQueue.isQueued(block1.header.hash) shouldBe false
blockQueue.enqueueBlock(block30)
blockQueue.isQueued(block30.header.hash) shouldBe false
}
it should "remove the blocks that fall out of range" in new TestConfig {
val block1 = getBlock(1)
setBestBlockNumber(1)
setChainWeightForParent(block1)
blockQueue.enqueueBlock(block1)
blockQueue.isQueued(block1.header.hash) shouldBe true
val block20 = getBlock(20)
setBestBlockNumber(20)
setChainWeightForParent(block20)
blockQueue.enqueueBlock(block20)
blockQueue.isQueued(block20.header.hash) shouldBe true
blockQueue.isQueued(block1.header.hash) shouldBe false
}
it should "enqueue a block with parent on the main chain updating its total difficulty" in new TestConfig {
val block1 = getBlock(1, 13)
val parentWeight = ChainWeight.totalDifficultyOnly(42)
setBestBlockNumber(1)
setChainWeightForParent(block1, Some(parentWeight))
blockQueue.enqueueBlock(block1) shouldEqual Some(Leaf(block1.header.hash, parentWeight.increase(block1.header)))
}
it should "enqueue a block with queued ancestors rooted to the main chain updating its total difficulty" in new TestConfig {
val block1 = getBlock(1, 101)
val block2a = getBlock(2, 102, block1.header.hash)
val block2b = getBlock(2, 99, block1.header.hash)
val block3 = getBlock(3, 103, block2a.header.hash)
val parentWeight = ChainWeight.totalDifficultyOnly(42)
setBestBlockNumber(1).anyNumberOfTimes()
setChainWeightForParent(block1, Some(parentWeight))
setChainWeightForParent(block2a, None)
setChainWeightForParent(block2b, None)
setChainWeightForParent(block3, None)
blockQueue.enqueueBlock(block1)
blockQueue.enqueueBlock(block2a)
blockQueue.enqueueBlock(block2b)
val expectedWeight = List(block1, block2a, block3).map(_.header).foldLeft(parentWeight)(_ increase _)
blockQueue.enqueueBlock(block3) shouldEqual Some(Leaf(block3.header.hash, expectedWeight))
}
it should "enqueue an orphaned block" in new TestConfig {
val block1 = getBlock(1)
setBestBlockNumber(1)
setChainWeightForParent(block1)
blockQueue.enqueueBlock(block1) shouldBe None
blockQueue.isQueued(block1.header.hash) shouldBe true
}
it should "remove a branch from a leaf up to the first shared ancestor" in new TestConfig {
val block1 = getBlock(1)
val block2a = getBlock(2, parent = block1.header.hash)
val block2b = getBlock(2, parent = block1.header.hash)
val block3 = getBlock(3, parent = block2a.header.hash)
setBestBlockNumber(1).anyNumberOfTimes()
setChainWeightForParent(block1)
setChainWeightForParent(block2a)
setChainWeightForParent(block2b)
setChainWeightForParent(block3)
blockQueue.enqueueBlock(block1)
blockQueue.enqueueBlock(block2a)
blockQueue.enqueueBlock(block2b)
blockQueue.enqueueBlock(block3)
blockQueue.getBranch(block3.header.hash, dequeue = true) shouldEqual List(block1, block2a, block3)
blockQueue.isQueued(block3.header.hash) shouldBe false
blockQueue.isQueued(block2a.header.hash) shouldBe false
blockQueue.isQueued(block2b.header.hash) shouldBe true
blockQueue.isQueued(block1.header.hash) shouldBe true
}
it should "remove a whole subtree down from an ancestor to all its leaves" in new TestConfig {
val block1a = getBlock(1)
val block1b = getBlock(1)
val block2a = getBlock(2, parent = block1a.header.hash)
val block2b = getBlock(2, parent = block1a.header.hash)
val block3 = getBlock(3, parent = block2a.header.hash)
setBestBlockNumber(1).anyNumberOfTimes()
setChainWeightForParent(block1a)
setChainWeightForParent(block1b)
setChainWeightForParent(block2a)
setChainWeightForParent(block2b)
setChainWeightForParent(block3)
blockQueue.enqueueBlock(block1a)
blockQueue.enqueueBlock(block1b)
blockQueue.enqueueBlock(block2a)
blockQueue.enqueueBlock(block2b)
blockQueue.enqueueBlock(block3)
blockQueue.isQueued(block3.header.hash) shouldBe true
blockQueue.isQueued(block2a.header.hash) shouldBe true
blockQueue.isQueued(block2b.header.hash) shouldBe true
blockQueue.isQueued(block1a.header.hash) shouldBe true
blockQueue.isQueued(block1b.header.hash) shouldBe true
blockQueue.removeSubtree(block1a.header.hash)
blockQueue.isQueued(block3.header.hash) shouldBe false
blockQueue.isQueued(block2a.header.hash) shouldBe false
blockQueue.isQueued(block2b.header.hash) shouldBe false
blockQueue.isQueued(block1a.header.hash) shouldBe false
blockQueue.isQueued(block1b.header.hash) shouldBe true
}
trait TestConfig {
val syncConfig = SyncConfig(Config.config).copy(maxQueuedBlockNumberAhead = 10, maxQueuedBlockNumberBehind = 10)
val blockchain = mock[BlockchainImpl]
val blockQueue = BlockQueue(blockchain, syncConfig)
def setBestBlockNumber(n: BigInt) =
(blockchain.getBestBlockNumber _).expects().returning(n)
def setChainWeightForParent(block: Block, weight: Option[ChainWeight] = None) =
(blockchain.getChainWeightByHash _).expects(block.header.parentHash).returning(weight)
def randomHash(): ByteString =
ObjectGenerators.byteStringOfLengthNGen(32).sample.get
val defaultHeader = Fixtures.Blocks.ValidBlock.header.copy(
difficulty = 1000000,
number = 1,
gasLimit = 1000000,
gasUsed = 0,
unixTimestamp = 0
)
def getBlock(
number: BigInt,
difficulty: BigInt = 1000000,
parent: ByteString = randomHash(),
salt: ByteString = randomHash()
): Block =
Block(
defaultHeader.copy(parentHash = parent, difficulty = difficulty, number = number, extraData = salt),
BlockBody.empty
)
}
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/ledger/BlockQueueSpec.scala | Scala | mit | 7,034 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.yggdrasil.vfs
import quasar.contrib.pathy.{ADir, RDir, RFile}
import quasar.contrib.scalaz.stateT, stateT._
import argonaut.{Argonaut, Parse}
import fs2.Stream
import fs2.interop.scalaz.StreamScalazOps
import pathy.Path
import scalaz.{:<:, Free, Monad, StateT}
import scalaz.concurrent.Task
import scalaz.std.list._
import scalaz.std.option._
import scalaz.std.string._
import scalaz.syntax.monad._
import scalaz.syntax.traverse._
import scodec.bits.ByteVector
import java.util.UUID
final case class VersionLog(baseDir: ADir, committed: List[Version], versions: Set[Version]) {
def head: Option[Version] = committed.headOption
}
// TODO implement VERSION files
object VersionLog {
import Argonaut._
private type ST[F[_], A] = StateT[F, VersionLog, A]
private val Head: RDir = Path.dir("HEAD")
private val VersionsJson: RFile = Path.file("versions.json")
private val VersionsJsonNew: RFile = Path.file("versions.json.new")
// keep the 5 most recent versions, by default
private val KeepLimit = 5
// TODO failure recovery
def init[S[_]](baseDir: ADir)(implicit IP: POSIXOp :<: S, IT: Task :<: S): Free[S, VersionLog] = {
for {
exists <- POSIX.exists[S](baseDir </> VersionsJson)
committed <- if (exists) {
for {
fileStream <- POSIX.openR[S](baseDir </> VersionsJson)
// TODO character encoding!
fileString = fileStream.map(_.toArray).map(new String(_)).foldMonoid
json <- POSIXWithTask.generalize[S](fileString.runLast)
} yield json.flatMap(Parse.decodeOption[List[Version]](_)).getOrElse(Nil)
} else {
for {
vnew <- POSIX.openW[S](baseDir </> VersionsJsonNew)
json = List[Version]().asJson.nospaces
// TODO character encoding!
writer = Stream.emit(ByteVector(json.getBytes)).to(vnew).run
_ <- POSIXWithTask.generalize(writer)
_ <- POSIX.move[S](baseDir </> VersionsJsonNew, baseDir </> VersionsJson)
} yield Nil
}
paths <- POSIX.ls[S](baseDir)
versions = for {
path <- paths
dir <- Path.maybeDir(path).toList
dirName <- Path.dirName(dir).toList
version <- try {
Version(UUID.fromString(dirName.value)) :: Nil
} catch {
case _: IllegalArgumentException => Nil
}
} yield version
} yield VersionLog(baseDir, committed, versions.toSet)
}
def fresh[S[_]](implicit I: POSIXOp :<: S): StateT[Free[S, ?], VersionLog, Version] = {
for {
log <- StateTContrib.get[Free[S, ?], VersionLog]
uuid <- POSIX.genUUID[S].liftM[ST]
v = Version(uuid)
back <- if (log.versions.contains(v)) {
fresh[S]
} else {
for {
_ <- StateTContrib.put[Free[S, ?], VersionLog](log.copy(versions = log.versions + v))
target <- underlyingDir[Free[S, ?]](v)
_ <- POSIX.mkDir[S](target).liftM[ST]
} yield v
}
} yield back
}
// TODO there isn't any error handling here
def underlyingDir[F[_]: Monad](v: Version): StateT[F, VersionLog, ADir] =
StateTContrib.get[F, VersionLog].map(_.baseDir </> Path.dir(v.value.toString))
// TODO add symlink
def commit[S[_]](v: Version)(implicit IP: POSIXOp :<: S, IT: Task :<: S): StateT[Free[S, ?], VersionLog, Unit] = {
for {
log <- StateTContrib.get[Free[S, ?], VersionLog]
log2 = log.copy(committed = v :: log.committed)
_ <- if (log.versions.contains(v)) {
for {
_ <- StateTContrib.put[Free[S, ?], VersionLog](log2)
vnew <- POSIX.openW[S](log.baseDir </> VersionsJsonNew).liftM[ST]
json = log2.committed.asJson.nospaces
// TODO character encoding!
writer = Stream.emit(ByteVector(json.getBytes)).to(vnew).run
_ <- POSIXWithTask.generalize(writer).liftM[ST]
_ <- POSIX.move[S](log.baseDir </> VersionsJsonNew, log.baseDir </> VersionsJson).liftM[ST]
_ <- POSIX.delete[S](log.baseDir </> Head).liftM[ST]
_ <- POSIX.linkDir[S](
log.baseDir </> Path.dir(v.value.toString),
log.baseDir </> Head).liftM[ST]
} yield ()
} else {
().point[StateT[Free[S, ?], VersionLog, ?]]
}
} yield ()
}
def underlyingHeadDir[F[_]: Monad]: StateT[F, VersionLog, Option[ADir]] = {
for {
log <- StateTContrib.get[F, VersionLog]
back <- log.head.traverse(underlyingDir[F](_))
} yield back
}
def purgeOld[S[_]](implicit I: POSIXOp :<: S): StateT[Free[S, ?], VersionLog, Unit] = {
for {
log <- StateTContrib.get[Free[S, ?], VersionLog]
toPurge = log.committed.drop(KeepLimit)
_ <- toPurge traverse { v =>
for {
dir <- underlyingDir[Free[S, ?]](v)
_ <- POSIX.delete[S](dir).liftM[ST]
} yield ()
}
log2 = log.copy(
committed = log.committed.take(KeepLimit),
versions = log.versions -- toPurge)
// TODO write new versions.json
_ <- StateTContrib.put[Free[S, ?], VersionLog](log2)
} yield ()
}
}
| drostron/quasar | yggdrasil/src/main/scala/quasar/yggdrasil/vfs/VersionLog.scala | Scala | apache-2.0 | 5,710 |
package cn.hjmao.learning.akka.http.demo.model.db
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
/**
* Created by hjmao on 17-5-10.
*/
class DataSource(classname: String, url: String, user: String, password: String) {
private val config = new HikariConfig()
config.setJdbcUrl(url)
config.setUsername(user)
config.setPassword(password)
private val datasource = new HikariDataSource(config)
val driver = slick.jdbc.MySQLProfile
import driver.api._
val db = Database.forDataSource(datasource, None)
db.createSession()
}
| huajianmao/learning | framework/akka-http/demo/src/main/scala/cn/hjmao/learning/akka/http/demo/model/db/DataSource.scala | Scala | mit | 552 |
package com.mentatlabs.example
import java.sql.Connection
import org.postgresql.PGConnection
import org.postgresql.ds.PGSimpleDataSource
import scala.collection.mutable.ArrayBuffer
class PostgresListener(ds: PGSimpleDataSource) {
def getConnection() = ds.getConnection() match {
case pg: PGConnection => pg
case _ => sys.error("Could not retrieve PG connection!")
}
def readNotification(channels: String) = {
val directive = channels.split('/').map(c => s"""LISTEN "$c";""").mkString("\n")
println(directive)
val conn = getConnection()
val stmt = conn.createStatement()
stmt.execute(directive)
stmt.close()
val result = awaitNotification(conn)
conn.close()
result
}
private def awaitNotification(conn: Connection with PGConnection): String = {
val stmt = conn.createStatement()
stmt.execute(";")
stmt.close()
val notifications = conn.getNotifications()
if (notifications != null) {
notifications map { n =>
n.getName + ": " + n.getParameter
} mkString "\n"
} else {
Thread.sleep(100) // JDBC does not support async notifications :(
awaitNotification(conn)
}
}
def waitForTableUpdate(name: String) = {
println(readNotification(name) + "(table updated)")
val conn = getConnection()
try {
val st = conn.prepareStatement(s"TABLE $name ORDER BY 1;")
try {
val rs = st.executeQuery()
try {
val md = rs.getMetaData
val columns = (1 to md.getColumnCount) map {
md.getColumnName
}
val rows = new ArrayBuffer[Seq[String]]
while (rs.next()) {
rows += (1 to columns.length) map {
rs.getString
}
}
(columns, rows)
} finally rs.close()
} finally st.close()
} finally conn.close()
}
}
| melezov/postgresql-overview-listen-notify | src/main/scala/com/mentatlabs/example/PostgresListener.scala | Scala | unlicense | 1,882 |
package at.logic.gapt.cutintro
import at.logic.gapt.expr._
import at.logic.gapt.expr.hol.CNFp
import at.logic.gapt.proofs.{ FOLClause, Sequent }
/**
* Schematic extended Herbrand sequent for schematic Pi-2 grammars
* @param reducedRepresentation The schematic extended Herbrand sequent without placeholder for the cut ( F[x\U_1] |- G[y\U_2] )
* @param universalEigenvariable The variable that is introduced for the universally quantified variable of the cut formula (alpha)
* @param existentialEigenvariables The variables that are introduced for the existentially quantified variable of the cut
* formula (beta_1,...,beta_m)
* @param substitutionsForAlpha The terms (except from the eigenvariable) that are introduced for the universally quantified variable of
* the cut formula (r_1,...,r_m)
* @param substitutionsForBetaWithAlpha The terms (except from the eigenvariables) that are introduced for the existentially quantified variable
* of the cut formula independent from the existential eigenvariables (t_1(alpha),...,t_p(alpha))
*/
case class Pi2SeHs(
reducedRepresentation: Sequent[Formula], // F[x\U_1] |- G[y\U_2]
universalEigenvariable: Var, // alpha
existentialEigenvariables: List[Var], // beta_1,...,beta_m
substitutionsForAlpha: List[Expr], // r_1,...,r_m
substitutionsForBetaWithAlpha: List[Expr] // t_1(alpha),...,t_p(alpha)
) {
require( existentialEigenvariables.length == substitutionsForAlpha.length )
/**
* Number of substitutions for the eigenvariable of the universally quantified variable (m)
*/
val multiplicityOfAlpha: Int = substitutionsForAlpha.length
/**
* Number of substitutions for the eigenvariables of the existentially quantified variable independent from the substitution of the universal
* eigenvariable (p)
*/
val multiplicityOfBeta: Int = substitutionsForBetaWithAlpha.length
/**
* Pairs of the universal eigenvariable with the substitutions for the universal eigenvariable ((alpha,r_1),...,(alpha,r_m))
*/
val substitutionPairsAlpha: List[( Expr, Expr )] = {
substitutionsForAlpha.map( instance => ( universalEigenvariable, instance ) )
}
/**
* Pairs of a existential eigenvariable with the substitutions for this existential eigenvariable
* ((beta_i,t_1(alpha)),...,(beta_i,t_p(alpha)) with i=index)
* @param index Indicates the considered existential eigenvariable (1 <= index <= m)
* @return
*/
def substitutionPairsBetaI( index: Int ): List[( Expr, Expr )] = {
require( 1 <= index && index <= this.multiplicityOfAlpha )
substitutionsForBetaWithAlpha.map( instanceB => ( existentialEigenvariables( index - 1 ), instanceB ) )
}
/**
* Pairs of the existential eigenvariables with the substitutions for the existential eigenvariables
* ((beta_1,t_1(alpha)),...,(beta_1,t_p(alpha)),...,(beta_m,t_1(alpha)),...,(beta_m,t_p(alpha)))
*/
val substitutionPairsBeta: List[( Expr, Expr )] = {
(
for ( index <- 1 to multiplicityOfAlpha )
yield substitutionPairsBetaI( multiplicityOfAlpha - index + 1 )
).toList.flatten
}
/**
* List of all substitution pairs (alpha,r_i) and (r_i,alpha)
*/
val productionRulesXS: List[( Expr, Expr )] = substitutionPairsAlpha ++ substitutionPairsAlpha.map( _.swap )
/**
* List of all substitution pairs (beta_j,t_i(alpha)) and (t_i(alpha),beta_j)
*/
val productionRulesYS: List[( Expr, Expr )] = substitutionPairsBeta ++ substitutionPairsBeta.map( _.swap )
/**
* List of substitutions ((alpha->r_1),...,(alpha->r_m))
*/
val substitutionsAlpha: List[Substitution] = {
substitutionsForAlpha.map( instanceA => Substitution( universalEigenvariable, instanceA ) )
}
/**
* List of substitutions ((beta_i->t_1(r_i)),...,(beta_i->t_p(r_i)) with i=index)
* @param index Indicates the considered existential eigenvariable (1 <= index <= m)
* @return
*/
def substitutionsBetaI( index: Int ): List[Substitution] = {
require( 1 <= index && index <= this.multiplicityOfAlpha )
val subs: Substitution = Substitution( universalEigenvariable, substitutionsForAlpha( index - 1 ) )
substitutionsForBetaWithAlpha.map( instanceB => Substitution( existentialEigenvariables( index - 1 ), subs( instanceB ) ) )
}
private def substituteRightSideOnce( sequent: Sequent[Formula], index: Int ): Sequent[Formula] = {
var resultingSequent: Sequent[Formula] = Sequent()
sequent.succedent.foreach( formula => {
formula.find( existentialEigenvariables( index - 1 ) ) match {
case List() => resultingSequent = resultingSequent :+ formula
case _ => substitutionsBetaI( index ).foreach( subs => {
resultingSequent = resultingSequent :+ subs( formula )
} )
}
} )
resultingSequent
}
private def substituteLeftSideOnce( sequent: Sequent[Formula], index: Int ): Sequent[Formula] = {
var resultingSequent: Sequent[Formula] = Sequent()
sequent.antecedent.foreach( formula => {
formula.find( existentialEigenvariables( index - 1 ) ) match {
case List() => resultingSequent = formula +: resultingSequent
case _ => substitutionsBetaI( index ).foreach( subs => {
resultingSequent = subs( formula ) +: resultingSequent
} )
}
} )
resultingSequent
}
/**
* Computes the Herbrand sequent that corresponds to the schematic Pi-2 grammar (F[x\T_1] |- G[y\T_2])
* @return
*/
def herbrandSequent(): Sequent[Formula] = {
var herbrandSequent: Sequent[Formula] = Sequent() :++ reducedRepresentation.succedent
for ( indexM <- 0 until multiplicityOfAlpha ) {
herbrandSequent = substituteRightSideOnce( herbrandSequent, multiplicityOfAlpha - indexM )
}
reducedRepresentation.antecedent.foreach( formula => {
substitutionsForAlpha.foreach( instanceA => {
val subs: Substitution = Substitution( universalEigenvariable, instanceA )
herbrandSequent = subs( formula ) +: herbrandSequent
} )
} )
val sequent: Sequent[Formula] = herbrandSequent
for ( indexM <- 0 until multiplicityOfAlpha ) {
herbrandSequent = substituteLeftSideOnce( herbrandSequent.antecedent ++: Sequent(), multiplicityOfAlpha - indexM ) :++ sequent.succedent
}
herbrandSequent
}
/**
* Transforms the reduced representation from a sequent to a formula
*/
val reducedRepresentationToFormula: Formula = reducedRepresentation.toImplication
/**
* Computes simultaneously a set of all atoms occurring in the leaves of the reduced representation (the atoms are negated if they
* occur on the right side of the sequent) and a list of all relevant normalized (everything is shifted to the left side) leaves
* of the reduced representation
*/
val literalsInTheDNTAsAndTheDNTAs: ( Set[Formula], List[Sequent[Formula]] ) = {
val literals = scala.collection.mutable.Set[Formula]()
val DNTA = scala.collection.mutable.Set[Sequent[Formula]]()
CNFp( this.reducedRepresentationToFormula ).foreach( clause => if ( !clause.isTaut ) {
val NTAClause: Sequent[Formula] = clause.succedent.map( literal => Neg( literal ) ) ++: clause.antecedent ++: Sequent()
val DNTABuffer = DNTA.toList
var dontAdd: Boolean = false
DNTABuffer.foreach( DNTAClause => {
if ( !dontAdd ) {
if ( NTAClause.isSubsetOf( DNTAClause ) ) {
DNTA -= DNTAClause
} else if ( DNTAClause.isSubsetOf( NTAClause ) ) {
dontAdd = true
}
}
} )
if ( !dontAdd ) {
DNTA += NTAClause
}
clause.antecedent.foreach( atom => literals += atom )
clause.succedent.foreach( atom => literals += Neg( atom ) )
} )
val DNTAList = DNTA.toList
( literals.toSet, DNTAList )
}
/**
* The set of all relevant normalized (everything is shifted to the left side) leaves
*/
val dualNonTautologicalAxioms: List[Sequent[Formula]] = {
val ( _, dNTAs ) = this.literalsInTheDNTAsAndTheDNTAs
dNTAs
}
/**
* Three sets A,B,N containing all atoms occurring in the leaves of the reduced representation (the atoms are negated if they occur on
* the right side of the sequent) such that in all atoms (literals) of N no eigenvariables occur, in all atoms (literals) of A only the
* universal eigenvariable occur, and in all atoms (literals) of B only the existential eigenvariables occur
*/
val literalsInTheDNTAs: ( Set[Formula], Set[Formula], Set[Formula] ) = {
val ( literals, _ ) = this.literalsInTheDNTAsAndTheDNTAs
val alpha = scala.collection.mutable.Set[Formula]()
val beta = scala.collection.mutable.Set[Formula]()
val gamma = scala.collection.mutable.Set[Formula]()
literals.foreach( literal => {
if ( literal.contains( this.universalEigenvariable ) ) {
if ( !this.existentialEigenvariables.exists( exEi => literal.contains( exEi ) ) ) {
alpha += literal
}
} else if ( this.existentialEigenvariables.exists( exEi => literal.contains( exEi ) ) ) {
beta += literal
} else {
gamma += literal
}
} )
( alpha.toSet, beta.toSet, gamma.toSet )
}
/**
* List of all relevant normalized (everything is shifted to the left side) leaves of the reduced representation
* in a reduced signature/language that contains the unified literals (work in progress)
* @param unifiedLiterals A set of formulas (unified literals) that define the reduced signature/language
* @return
*/
def theDNTAsInTheLanguage( unifiedLiterals: Set[Formula] ): ( List[Sequent[Formula]] ) = {
val newDNTAs = this.dualNonTautologicalAxioms.map( leaf => {
leaf.antecedent.filter( literal => {
literal match {
case Neg( t ) => {
val isInLanguage: Boolean = unifiedLiterals.exists( opponent => {
val Apps( nameOfLiteral, _ ) = t
val Apps( nameOfOpponent, _ ) = opponent
nameOfLiteral.syntaxEquals( nameOfOpponent )
} )
isInLanguage
}
case t => {
val isInLanguage: Boolean = unifiedLiterals.exists( opponent => {
val Apps( nameOfLiteral, _ ) = t
val Apps( nameOfOpponent, _ ) = opponent
nameOfLiteral.syntaxEquals( nameOfOpponent )
} )
isInLanguage
}
}
} ) ++: Sequent()
} )
val DNTA = scala.collection.mutable.Set( newDNTAs.head )
newDNTAs.tail.foreach( DNTAClause => {
var dontAdd: Boolean = false
val DNTABuffer = DNTA
DNTABuffer.foreach( existingClause => {
if ( !dontAdd ) {
if ( existingClause.isSubsetOf( DNTAClause ) ) {
DNTA -= existingClause
} else if ( DNTAClause.isSubsetOf( existingClause ) ) {
dontAdd = true
}
}
} )
if ( !dontAdd ) {
DNTA += DNTAClause
}
} )
DNTA.toList
}
/**
* Computes two sets of atoms P,N for a given set of literals such that P contains all positive literals and N all atoms of the negative literals
* @param literals
* @return
*/
def sortAndAtomize( literals: Set[Formula] ): ( Set[Formula], Set[Formula] ) = {
val posLiterals: scala.collection.mutable.Set[Formula] = scala.collection.mutable.Set()
val negLiterals: scala.collection.mutable.Set[Formula] = scala.collection.mutable.Set()
for ( literal <- literals ) {
literal match {
case Neg( t ) => negLiterals += t
case _ => posLiterals += literal
}
}
( posLiterals.toSet, negLiterals.toSet )
}
}
/**
* Contains two sets to store integers
* @param oneToMList Supposed to be a subset of {1,...,m}
* @param oneToPList Supposed to be a subset of {1,...,p}
*/
class LeafIndex(
val oneToMList: Set[Int],
val oneToPList: Set[Int]
) {}
/**
* Supposed to contain the data of a unified literal and whether it makes a non-tautological leaf of the reduced representation true
* @param literal
* @param leafIndexList Supposed to contain the data which leaf of the reduced representation becomes true for which substitution of the literal
* @param numberOfDNTAs
* @param foundNonEmptyPList Supposed to be true if there is a at least one leaf of the reduced representation and one substitution of the
* form (xCut->alpha,yCut->t_i(alpha)) such that the leaf becomes true
* @param foundEmptyMList Supposed to be true if there is a at least one leaf of the reduced representation and one substitution of the
* form (xCut->r_j,yCut->beta_j) such that the leaf becomes true
*/
class LiteralWithIndexLists(
val literal: Formula,
val leafIndexList: List[LeafIndex],
val numberOfDNTAs: Int,
val foundNonEmptyPList: Boolean,
val foundEmptyMList: Boolean
) {
require( numberOfDNTAs == leafIndexList.length )
}
/**
* Combined data of many unified literals in a clause and whether the clause is a potential part of a formula in disjunctive normal form
* that makes all leaves of the reduced representation true
* @param literals
*/
class ClauseWithIndexLists(
val literals: List[LiteralWithIndexLists]
) {
require( literals.tail.forall( _.numberOfDNTAs == literals.head.numberOfDNTAs ) )
def numberOfDNTAs: Int = this.literals.head.numberOfDNTAs
/**
* Computes an 'average' LeafIndex for the whole clause, i.e. the new oneToMList is the union of all oneToMLists of each literal and the new
* oneToPList is the intersection of all oneToPLists of each literal
*/
val leafIndexListClause: List[LeafIndex] = {
if ( literals.length == 1 ) {
literals.head.leafIndexList
} else {
var leafIndexListClauseBuffer: List[LeafIndex] = Nil
for ( leafNumber <- 0 until this.literals.head.numberOfDNTAs ) {
var leafIndexListClauseBufferM = this.literals.head.leafIndexList( leafNumber ).oneToMList
var leafIndexListClauseBufferP = this.literals.head.leafIndexList( leafNumber ).oneToPList
this.literals.tail.foreach( literal => {
leafIndexListClauseBufferM = leafIndexListClauseBufferM.union( literal.leafIndexList( leafNumber ).oneToMList )
leafIndexListClauseBufferP = leafIndexListClauseBufferP.intersect( literal.leafIndexList( leafNumber ).oneToPList )
} )
val leafIn = new LeafIndex( leafIndexListClauseBufferM, leafIndexListClauseBufferP )
leafIndexListClauseBuffer = leafIndexListClauseBuffer :+ leafIn
}
leafIndexListClauseBuffer
}
}
/**
* Computes whether the clause is potentially a part of the cut formula
*/
val isAllowed: Boolean = {
if ( literals.length == 1 ) {
literals.head.foundNonEmptyPList
} else {
var bool: Boolean = false
this.leafIndexListClause.foreach( leafNumber => {
if ( leafNumber.oneToPList.nonEmpty ) {
bool = true
}
} )
bool
}
}
/**
* Computes whether the supersets of the given clause have to be considered. True = supersets have to be considered
*/
val isAllowedAtLeastAsSubformula: Boolean = {
var bool: Boolean = true
if ( this.isAllowed ) {
if ( literals.length == 1 ) {
bool = !literals.head.foundEmptyMList
} else {
this.leafIndexListClause.foreach( leafNumber => {
if ( leafNumber.oneToMList.isEmpty ) {
bool = false
}
} )
}
}
bool
}
/**
* Computes the formula that corresponds to the clause
* @return
*/
def formula: Formula = {
var formulaBuffer: Formula = literals.head.literal
literals.tail.foreach( literal => formulaBuffer = And( formulaBuffer, literal.literal ) )
formulaBuffer
}
}
/**
* Combined data of many clauses in a set of clauses and whether the clauses translate to a formula in disjunctive normal form
* that makes all leaves of the reduced representation true
* @param clauses
*/
class ClausesWithIndexLists(
val clauses: List[ClauseWithIndexLists]
) {
/**
* Computes an 'average' LeafIndex for the whole set of clauses, i.e. the new oneToMList is the intersection of all oneToMLists of each clause
* and the new oneToPList is the union of all oneToPLists of each clause
* @return
*/
private def leafIndexListClauses: List[LeafIndex] = {
if ( clauses.length == 1 ) {
clauses.head.leafIndexListClause
} else {
var emptyList: Boolean = false
var leafIndexListClausesBuffer: List[LeafIndex] = Nil
for ( leafNumber <- 0 until this.clauses.head.numberOfDNTAs; if !emptyList ) {
var leafIndexListClausesBufferM = this.clauses.head.leafIndexListClause( leafNumber ).oneToMList
var leafIndexListClausesBufferP = this.clauses.head.leafIndexListClause( leafNumber ).oneToPList
this.clauses.tail.foreach( clause => {
if ( !emptyList ) {
leafIndexListClausesBufferM = leafIndexListClausesBufferM.intersect( clause.leafIndexListClause( leafNumber ).oneToMList )
leafIndexListClausesBufferP = leafIndexListClausesBufferP.union( clause.leafIndexListClause( leafNumber ).oneToPList )
}
if ( leafIndexListClausesBufferM.isEmpty ) {
emptyList = true
}
} )
val leafIn = new LeafIndex( leafIndexListClausesBufferM, leafIndexListClausesBufferP )
leafIndexListClausesBuffer = leafIndexListClausesBuffer :+ leafIn
}
leafIndexListClausesBuffer
}
}
/**
* Computes whether the set of clauses is a solution, i.e. the clauses translate to a formula in disjunctive normal form
* that makes all leaves of the reduced representation true
* @return
*/
def isSolution: Boolean = {
var bool: Boolean = true
if ( clauses.length == 1 ) {
if ( clauses.head.isAllowedAtLeastAsSubformula ) {
this.leafIndexListClauses.forall( leafNumber => {
if ( leafNumber.oneToPList.isEmpty ) {
bool = false
} else if ( leafNumber.oneToMList.isEmpty ) {
bool = false
}
bool
} )
} else {
false
}
} else {
this.leafIndexListClauses.forall( leafNumber => {
if ( leafNumber.oneToPList.isEmpty ) {
bool = false
} else if ( leafNumber.oneToMList.isEmpty ) {
bool = false
}
bool
} )
}
}
/**
* Computes the formula that corresponds to the clauses
* @return
*/
def formula: Formula = {
var formulaBuffer: Formula = this.clauses.head.formula
this.clauses.tail.foreach( clause => formulaBuffer = Or( formulaBuffer, clause.formula ) )
formulaBuffer
}
}
/**
* Computes the cut formula for a given schematic extended Herbrand sequent
*/
object introducePi2Cut {
def apply(
seHs: Pi2SeHs,
nameOfExistentialVariable: Var = fov"yCut",
nameOfUniversalVariable: Var = fov"xCut"
): ( Option[Formula], Var, Var ) = {
val nameOfExistentialVariableChecked = rename.awayFrom( freeVariables( seHs.reducedRepresentationToFormula ) ).fresh( nameOfExistentialVariable )
val nameOfUniversalVariableChecked = rename.awayFrom( freeVariables( seHs.reducedRepresentationToFormula ) ).fresh( nameOfUniversalVariable )
val unifiedLiterals: Set[Formula] = gStarUnify(
seHs,
nameOfExistentialVariableChecked,
nameOfUniversalVariableChecked
)
val literalsWithIndexListsOrAndSolution: ( Set[LiteralWithIndexLists], Option[Formula] ) = computeTheIndexListsForTheLiterals(
unifiedLiterals,
seHs.dualNonTautologicalAxioms,
seHs,
nameOfExistentialVariableChecked,
nameOfUniversalVariableChecked
)
val ( literalsWithIndexLists, optionSolution1 ) = literalsWithIndexListsOrAndSolution
optionSolution1 match {
case Some( t ) => return ( Some( t ), nameOfExistentialVariableChecked, nameOfUniversalVariableChecked )
case None =>
}
/// Only for additional data ///
////////////////////////////////
var numberOfAllowedClauses: Option[Int] = None
var numberOfCheckedFormulas: Int = literalsWithIndexLists.size
////////////////////////////////
if ( literalsWithIndexLists.size > 1 ) {
val allowedClausesWithIndexListsOrAndSolution: ( Set[ClauseWithIndexLists], Option[Formula] ) = checkAndBuildAllowedClausesHead(
literalsWithIndexLists,
seHs
)
val ( allowedClausesWithIndexLists, optionSolution2 ) = allowedClausesWithIndexListsOrAndSolution
optionSolution2 match {
case Some( t ) => return ( Some( t ), nameOfExistentialVariableChecked, nameOfUniversalVariableChecked )
case None =>
}
/// Only for additional data ///
////////////////////////////////
numberOfAllowedClauses = Option( allowedClausesWithIndexLists.size )
numberOfCheckedFormulas = allowedClausesWithIndexLists.size
////////////////////////////////
for ( numberOfClauses <- 2 to allowedClausesWithIndexLists.size ) {
for ( subset <- allowedClausesWithIndexLists.subsets( numberOfClauses ) ) {
val clausesWithIndexLists = new ClausesWithIndexLists( subset.toList )
if ( clausesWithIndexLists.isSolution ) {
return ( Option( clausesWithIndexLists.formula ), nameOfExistentialVariableChecked, nameOfUniversalVariableChecked )
}
/// Only for additional data ///
////////////////////////////////
numberOfCheckedFormulas += 1
////////////////////////////////
}
}
}
/*
/// Prints the most interesting data ///
////////////////////////////////////////
println( "Number of non-tautological leaves" )
println( seHs.dualNonTautologicalAxioms.length )
println( "Number of unified literals" )
println( unifiedLiterals.size )
numberOfAllowedClauses match {
case Some( t ) => {
println( "Number of allowed clauses" )
println( t )
}
case None => println( "No 'allowed clauses' were computed" )
}
println( "Number of checked Formulas" )
println( numberOfCheckedFormulas )
*/
( None, nameOfExistentialVariableChecked, nameOfUniversalVariableChecked )
}
private def checkAndBuildAllowedClausesHead(
literalsWithIndexLists: Set[LiteralWithIndexLists],
seHs: Pi2SeHs
): ( ( Set[ClauseWithIndexLists], Option[Formula] ) ) = {
var allowedClausesWithIndexListsMutable = scala.collection.mutable.Set[ClauseWithIndexLists]()
val literalsWithIndexListsMutable = scala.collection.mutable.Set( literalsWithIndexLists.toList: _* )
for ( literalWithIndexLists <- literalsWithIndexLists ) {
val clause = new ClauseWithIndexLists( List( literalWithIndexLists ) )
val ( clauseIsUnnecessary, listOfUnnecessaryClauses ) = checkNecessityOfNewAndOldClause( clause, allowedClausesWithIndexListsMutable.toList )
if ( !clauseIsUnnecessary ) {
allowedClausesWithIndexListsMutable += clause
if ( !clause.isAllowedAtLeastAsSubformula && !clause.isAllowed ) {
literalsWithIndexListsMutable -= literalWithIndexLists
}
for ( unnecessaryClause <- listOfUnnecessaryClauses ) {
allowedClausesWithIndexListsMutable -= unnecessaryClause
}
}
}
val ( mutable, optionSolution ) = checkAndBuildAllowedClauses(
literalsWithIndexListsMutable,
allowedClausesWithIndexListsMutable,
seHs,
2
)
( mutable.toSet, optionSolution )
}
private def checkAndBuildAllowedClauses(
literalsWithIndexLists: scala.collection.mutable.Set[LiteralWithIndexLists],
allowedClausesWithIndexLists: scala.collection.mutable.Set[ClauseWithIndexLists],
seHs: Pi2SeHs,
subsetSize: Int
): ( ( scala.collection.mutable.Set[ClauseWithIndexLists], Option[Formula] ) ) = {
for ( subset <- literalsWithIndexLists.subsets( subsetSize ) ) {
val clauseWithIndexLists = new ClauseWithIndexLists( subset.toList )
if ( clauseWithIndexLists.isAllowed ) {
val ( clauseIsUnnecessary, listOfUnnecessaryClauses ) = checkNecessityOfNewAndOldClause( clauseWithIndexLists, allowedClausesWithIndexLists.toList )
if ( !clauseIsUnnecessary ) {
allowedClausesWithIndexLists += clauseWithIndexLists
val clausesWithIndexLists = new ClausesWithIndexLists( List( clauseWithIndexLists ) )
if ( clausesWithIndexLists.isSolution ) {
return ( allowedClausesWithIndexLists, Option( clausesWithIndexLists.formula ) )
}
for ( unnecessaryClause <- listOfUnnecessaryClauses ) {
allowedClausesWithIndexLists -= unnecessaryClause
}
}
} else if ( !clauseWithIndexLists.isAllowedAtLeastAsSubformula ) {
for ( literal <- subset ) {
literalsWithIndexLists -= literal
}
}
}
if ( literalsWithIndexLists.size > subsetSize ) {
checkAndBuildAllowedClauses(
literalsWithIndexLists,
allowedClausesWithIndexLists,
seHs,
subsetSize + 1
)
} else {
( allowedClausesWithIndexLists, None )
}
}
private def computeTheIndexListsForTheLiterals(
unifiedLiterals: Set[Formula],
nonTautologicalLeaves: List[Sequent[Formula]],
seHs: Pi2SeHs,
y: Var,
x: Var
): ( ( Set[LiteralWithIndexLists], Option[Formula] ) ) = {
val literalWithIndexListsSet = scala.collection.mutable.Set[LiteralWithIndexLists]()
for ( literal <- unifiedLiterals ) {
var foundEmptyMOrPList: Boolean = false
var foundNonEmptyPList: Boolean = false
var foundEmptyMList: Boolean = false
var leafOfIndexList: List[LeafIndex] = Nil
val substitutedLiteralAsSequentListAlpha = for ( existsIndex <- 0 until seHs.multiplicityOfBeta )
yield existsIndex -> ( Substitution( ( x, seHs.universalEigenvariable ), ( y, seHs.substitutionsForBetaWithAlpha( existsIndex ) ) )( literal ) +: Sequent() )
val substitutedLiteralAsSequentListBeta = for ( forallIndex <- 0 until seHs.multiplicityOfAlpha )
yield forallIndex -> ( Neg( Substitution( ( x, seHs.substitutionsForAlpha( forallIndex ) ), ( y, seHs.existentialEigenvariables( forallIndex ) ) )( literal ) ) +: Sequent() )
for ( leaf <- nonTautologicalLeaves ) {
//var leafIndexP = Set[Int]()
var leafIndexM = Set[Int]()
/*
for ( existsIndex <- 0 until seHs.multiplicityOfBeta ) {
val subs = Substitution( ( x, seHs.universalEigenvariable ), ( y, seHs.substitutionsForBetaWithAlpha( existsIndex ) ) )
val subsetSequent: Sequent[Formula] = subs( literal ).asInstanceOf[Formula] +: Sequent()
if ( subsetSequent.isSubsetOf( leaf ) ) {
leafIndexP += existsIndex
}
}
*/
val leafIndexP: Set[Int] = substitutedLiteralAsSequentListAlpha.map( subsetSequent => {
val ( index, sequent ) = subsetSequent
if ( sequent.isSubsetOf( leaf ) ) {
index
} else {
-1
}
} ).toSet.filter( i => i != -1 )
for ( forallIndex <- 0 until seHs.multiplicityOfAlpha ) {
val subs: Substitution = Substitution( ( x, seHs.substitutionsForAlpha( forallIndex ) ), ( y, seHs.existentialEigenvariables( forallIndex ) ) )
val subsetSequent: Sequent[Formula] = Neg( subs( literal ) ) +: Sequent()
if ( !leaf.intersect( subsetSequent ).isEmpty ) {
leafIndexM += forallIndex
}
}
if ( leafIndexM.isEmpty ) {
foundEmptyMList = true
foundEmptyMOrPList = true
} else if ( leafIndexP.isEmpty ) {
foundEmptyMOrPList = true
}
if ( leafIndexP.nonEmpty ) {
foundNonEmptyPList = true
}
val leafIndex = new LeafIndex( leafIndexM, leafIndexP )
leafOfIndexList = leafOfIndexList :+ leafIndex
}
val literalWithIndexLists = new LiteralWithIndexLists(
literal,
leafOfIndexList,
nonTautologicalLeaves.length,
foundNonEmptyPList,
foundEmptyMList
)
if ( foundNonEmptyPList ) {
literalWithIndexListsSet += literalWithIndexLists
if ( !foundEmptyMOrPList ) {
val clauseWithIndexLists = new ClauseWithIndexLists( List( literalWithIndexLists ) )
val clausesWithIndexLists = new ClausesWithIndexLists( List( clauseWithIndexLists ) )
if ( clausesWithIndexLists.isSolution ) {
return ( literalWithIndexListsSet.toSet, Option( clausesWithIndexLists.formula ) )
}
}
}
}
( literalWithIndexListsSet.toSet, None )
}
private def checkNecessityOfNewAndOldClause(
newClause: ClauseWithIndexLists,
oldClauses: List[ClauseWithIndexLists]
): ( Boolean, List[ClauseWithIndexLists] ) = {
if ( oldClauses == Nil ) {
( false, Nil )
} else {
val clauseIsNotSubsetOfI = new Array[Boolean]( oldClauses.length )
val iIsNotSubsetOfClause = new Array[Boolean]( oldClauses.length )
for ( leafNumber <- 0 until newClause.numberOfDNTAs ) {
for (
oldClause <- oldClauses; if !clauseIsNotSubsetOfI( oldClauses.indexOf( oldClause ) ) ||
!iIsNotSubsetOfClause( oldClauses.indexOf( oldClause ) )
) {
if ( !clauseIsNotSubsetOfI( oldClauses.indexOf( oldClause ) ) ) {
if ( !newClause.leafIndexListClause( leafNumber ).oneToMList.subsetOf( oldClause.leafIndexListClause( leafNumber ).oneToMList ) ||
!newClause.leafIndexListClause( leafNumber ).oneToPList.subsetOf( oldClause.leafIndexListClause( leafNumber ).oneToPList ) ) {
clauseIsNotSubsetOfI( oldClauses.indexOf( oldClause ) ) = true
}
}
if ( !iIsNotSubsetOfClause( oldClauses.indexOf( oldClause ) ) ) {
if ( !oldClause.leafIndexListClause( leafNumber ).oneToMList.subsetOf( newClause.leafIndexListClause( leafNumber ).oneToMList ) ||
!oldClause.leafIndexListClause( leafNumber ).oneToPList.subsetOf( newClause.leafIndexListClause( leafNumber ).oneToPList ) ) {
iIsNotSubsetOfClause( oldClauses.indexOf( oldClause ) ) = true
}
}
}
}
var clauseIsUnnecessary: Boolean = false
var listOfUnnecessaryClauses: List[ClauseWithIndexLists] = Nil
for ( i <- 0 until oldClauses.length; if !clauseIsUnnecessary ) {
if ( !clauseIsNotSubsetOfI( i ) ) {
clauseIsUnnecessary = true
} else if ( !iIsNotSubsetOfClause( i ) ) {
listOfUnnecessaryClauses = listOfUnnecessaryClauses :+ oldClauses( i )
}
}
( clauseIsUnnecessary, listOfUnnecessaryClauses )
}
}
} | gebner/gapt | core/src/main/scala/at/logic/gapt/cutintro/introducePiCut.scala | Scala | gpl-3.0 | 31,212 |
package org.dohrm.toolkit.context
import com.typesafe.config.Config
/**
* @author michaeldohr
* @since 14/05/16
*/
trait ConfigContext {
implicit val config:Config
}
| dohr-michael/storyline | src/main/scala/org/dohrm/toolkit/context/ConfigContext.scala | Scala | mit | 178 |
/*
Copyright 2015 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization
import java.io._
object JavaStreamEnrichments {
def eof: Nothing = throw new EOFException()
// We use this to avoid allocating a closure to make
// a lazy parameter to require
private def illegal(s: String): Nothing =
throw new IllegalArgumentException(s)
/**
* Note this is only recommended for testing.
* You may want to use ByteArrayInputOutputStream for performance critical concerns
*/
implicit class RichByteArrayOutputStream(val baos: ByteArrayOutputStream) extends AnyVal {
def toInputStream: ByteArrayInputStream = new ByteArrayInputStream(baos.toByteArray)
}
/**
* enrichment to treat an Array like an OutputStream
*/
implicit class RichByteArray(val bytes: Array[Byte]) extends AnyVal {
def wrapAsOutputStream: ArrayWrappingOutputStream = wrapAsOutputStreamAt(0)
def wrapAsOutputStreamAt(pos: Int): ArrayWrappingOutputStream =
new ArrayWrappingOutputStream(bytes, pos)
}
/**
* Wraps an Array so that you can write into it as a stream without reallocations
* or copying at the end. Useful if you know an upper bound on the number of bytes
* you will write
*/
class ArrayWrappingOutputStream(val buffer: Array[Byte], initPos: Int) extends OutputStream {
if (buffer.length < initPos) {
illegal(s"Initial position cannot be more than length: $initPos > ${buffer.length}")
}
private[this] var pos = initPos
def position: Int = pos
override def write(b: Int) { buffer(pos) = b.toByte; pos += 1 }
override def write(b: Array[Byte], off: Int, len: Int) {
Array.copy(b, off, buffer, pos, len)
pos += len
}
}
def posVarIntSize(i: Int): Int = {
if (i < 0) illegal(s"negative numbers not allowed: $i")
if (i < ((1 << 8) - 1)) 1
else {
if (i < ((1 << 16) - 1)) {
3
} else {
7
}
}
}
/**
* This has a lot of methods from DataInputStream without
* having to allocate to get them
* This code is similar to those algorithms
*/
implicit class RichInputStream(val s: InputStream) extends AnyVal {
/**
* If s supports marking, we mark it. Otherwise we read the needed
* bytes out into a ByteArrayStream and return that.
* This is intended for the case where you need possibly
* read size bytes but may stop early, then skip this exact
* number of bytes.
* Intended use is:
* {code}
* val size = 100
* val marked = s.markOrBuffer(size)
* val y = fn(marked)
* marked.reset
* marked.skipFully(size)
* {/code}
*/
def markOrBuffer(size: Int): InputStream = {
val ms = if (s.markSupported) s else {
val buf = new Array[Byte](size)
s.readFully(buf)
new ByteArrayInputStream(buf)
}
// Make sure we can reset after we read this many bytes
ms.mark(size)
ms
}
def readBoolean: Boolean = (readUnsignedByte != 0)
/**
* Like read, but throws eof on error
*/
def readByte: Byte = readUnsignedByte.toByte
def readUnsignedByte: Int = {
// Note that Java, when you read a byte, returns a Int holding an unsigned byte.
// if the value is < 0, you hit EOF.
val c1 = s.read
if (c1 < 0) eof else c1
}
def readUnsignedShort: Int = {
val c1 = s.read
val c2 = s.read
if ((c1 | c2) < 0) eof else ((c1 << 8) | c2)
}
final def readFully(bytes: Array[Byte]): Unit = readFully(bytes, 0, bytes.length)
final def readFully(bytes: Array[Byte], offset: Int, len: Int): Unit = {
if (len < 0) throw new IndexOutOfBoundsException()
@annotation.tailrec
def go(o: Int, l: Int): Unit =
if (l == 0) ()
else {
val count = s.read(bytes, o, l)
if (count < 0) eof
else go(o + count, l - count)
}
go(offset, len)
}
def readDouble: Double = java.lang.Double.longBitsToDouble(readLong)
def readFloat: Float = java.lang.Float.intBitsToFloat(readInt)
/**
* This is the algorithm from DataInputStream
* it was also benchmarked against the approach
* used in readLong and found to be faster
*/
def readInt: Int = {
val c1 = s.read
val c2 = s.read
val c3 = s.read
val c4 = s.read
if ((c1 | c2 | c3 | c4) < 0) eof else ((c1 << 24) | (c2 << 16) | (c3 << 8) | c4)
}
/*
* This is the algorithm from DataInputStream
* it was also benchmarked against the same approach used
* in readInt (buffer-less) and found to be faster.
*/
def readLong: Long = {
val buf = new Array[Byte](8)
readFully(buf)
(buf(0).toLong << 56) +
((buf(1) & 255).toLong << 48) +
((buf(2) & 255).toLong << 40) +
((buf(3) & 255).toLong << 32) +
((buf(4) & 255).toLong << 24) +
((buf(5) & 255) << 16) +
((buf(6) & 255) << 8) +
(buf(7) & 255)
}
def readChar: Char = {
val c1 = s.read
val c2 = s.read
// This is the algorithm from DataInputStream
if ((c1 | c2) < 0) eof else ((c1 << 8) | c2).toChar
}
def readShort: Short = {
val c1 = s.read
val c2 = s.read
// This is the algorithm from DataInputStream
if ((c1 | c2) < 0) eof else ((c1 << 8) | c2).toShort
}
/**
* This reads a varInt encoding that only encodes non-negative
* numbers. It uses:
* 1 byte for values 0 - 255,
* 3 bytes for 256 - 65535,
* 7 bytes for 65536 - Int.MaxValue
*/
final def readPosVarInt: Int = {
val c1 = readUnsignedByte
if (c1 < ((1 << 8) - 1)) c1
else {
val c2 = readUnsignedShort
if (c2 < ((1 << 16) - 1)) c2
else readInt
}
}
final def skipFully(count: Long): Unit = {
@annotation.tailrec
def go(c: Long): Unit = {
val skipped = s.skip(c)
if (skipped == c) ()
else if (skipped == 0L) throw new IOException(s"could not skipFully: count, c, skipped = ${(count, c, skipped)}")
else go(c - skipped)
}
if (count != 0L) go(count) else ()
}
}
implicit class RichOutputStream(val s: OutputStream) extends AnyVal {
def writeBoolean(b: Boolean): Unit = if (b) s.write(1: Byte) else s.write(0: Byte)
def writeBytes(b: Array[Byte], off: Int, len: Int): Unit = {
s.write(b, off, len)
}
def writeByte(b: Byte): Unit = s.write(b)
def writeBytes(b: Array[Byte]): Unit = writeBytes(b, 0, b.length)
/**
* This reads a varInt encoding that only encodes non-negative
* numbers. It uses:
* 1 byte for values 0 - 255,
* 3 bytes for 256 - 65535,
* 7 bytes for 65536 - Int.MaxValue
*/
def writePosVarInt(i: Int): Unit = {
if (i < 0) illegal(s"must be non-negative: ${i}")
if (i < ((1 << 8) - 1)) s.write(i)
else {
s.write(-1: Byte)
if (i < ((1 << 16) - 1)) {
s.write(i >> 8)
s.write(i)
} else {
s.write(-1)
s.write(-1)
writeInt(i)
}
}
}
def writeDouble(d: Double): Unit = writeLong(java.lang.Double.doubleToLongBits(d))
def writeFloat(f: Float): Unit = writeInt(java.lang.Float.floatToIntBits(f))
def writeLong(l: Long): Unit = {
s.write((l >>> 56).toInt)
s.write((l >>> 48).toInt)
s.write((l >>> 40).toInt)
s.write((l >>> 32).toInt)
s.write((l >>> 24).toInt)
s.write((l >>> 16).toInt)
s.write((l >>> 8).toInt)
s.write(l.toInt)
}
def writeInt(i: Int): Unit = {
s.write(i >>> 24)
s.write(i >>> 16)
s.write(i >>> 8)
s.write(i)
}
def writeChar(sh: Char): Unit = {
s.write(sh >>> 8)
s.write(sh.toInt)
}
def writeShort(sh: Short): Unit = {
s.write(sh >>> 8)
s.write(sh.toInt)
}
}
}
| sriramkrishnan/scalding | scalding-serialization/src/main/scala/com/twitter/scalding/serialization/JavaStreamEnrichments.scala | Scala | apache-2.0 | 8,467 |
package quizleague.web.site.chat
import quizleague.util.collection._
import quizleague.web.core.{@@, Module}
import quizleague.web.model._
import quizleague.web.service.chat.{ChatGetService, ChatMessageGetService, ChatMessagePutService, ChatPutService}
import quizleague.web.site.user.SiteUserService
import rxscalajs.Observable
import scala.scalajs.js
object ChatModule extends Module {
override val components = @@(ChatComponent, LoginButton, HotChats)
}
object ChatMessageService extends ChatMessageGetService with ChatMessagePutService {
val userService = SiteUserService
val chatService = ChatService
def listMessages(chatKey:Key):Observable[js.Array[ChatMessage]] =
list(chatKey).map(_.sortBy(_.date)(Desc))
def hotChats() = {
query(db.collectionGroup(typeName).orderBy("date","desc").limit(5))
}
}
object ChatService extends ChatGetService with ChatPutService {
val chatMessageService = ChatMessageService
def add(parentKey:Key, name:String):Observable[Key] = {
val chat = instance(parentKey,name)
save(chat).map(x => chat.key)
}
}
| gumdrop/quizleague-maintain | js/src/main/scala/quizleague/web/site/chat/ChatModule.scala | Scala | mit | 1,090 |
package im.tox.antox.wrapper
import im.tox.tox4j.core.enums.ToxUserStatus
object UserStatus {
def getToxUserStatusFromString(status: String): ToxUserStatus = {
if (status == "online") return ToxUserStatus.NONE
if (status == "away") return ToxUserStatus.AWAY
if (status == "busy") return ToxUserStatus.BUSY
ToxUserStatus.NONE
}
def getStringFromToxUserStatus(status: ToxUserStatus): String = {
if (status == ToxUserStatus.NONE) return "online"
if (status == ToxUserStatus.AWAY) return "away"
if (status == ToxUserStatus.BUSY) return "busy"
"invalid"
}
}
| mGhassen/Antox | app/src/main/scala/im/tox/antox/wrapper/UserStatus.scala | Scala | gpl-3.0 | 598 |
package com.socrata.tileserver.handlers
/** The file type that this object handles. */
trait FileType {
def extension: String
}
| socrata-platform/tileserver | src/main/scala/com.socrata.tileserver/handlers/FileType.scala | Scala | apache-2.0 | 131 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, Properties}
import scala.collection.JavaConverters._
import com.fasterxml.jackson.databind.ObjectMapper
import com.univocity.parsers.csv.CsvParser
import org.apache.spark.Partition
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{DataSource, FailureSafeParser}
import org.apache.spark.sql.execution.datasources.csv._
import org.apache.spark.sql.execution.datasources.jdbc._
import org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils
import org.apache.spark.sql.sources.v2.{DataSourceOptions, DataSourceV2, ReadSupport}
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.unsafe.types.UTF8String
/**
* Interface used to load a [[Dataset]] from external storage systems (e.g. file systems,
* key-value stores, etc). Use `SparkSession.read` to access this.
*
* @since 1.4.0
*/
@InterfaceStability.Stable
class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Specifies the input data source format.
*
* @since 1.4.0
*/
def format(source: String): DataFrameReader = {
this.source = source
this
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): DataFrameReader = {
this.userSpecifiedSchema = Option(schema)
this
}
/**
* Specifies the schema by using the input DDL-formatted string. Some data sources (e.g. JSON) can
* infer the input schema automatically from data. By specifying the schema here, the underlying
* data source can skip the schema inference step, and thus speed up data loading.
*
* {{{
* spark.read.schema("a INT, b STRING, c DOUBLE").csv("test.csv")
* }}}
*
* @since 2.3.0
*/
def schema(schemaString: String): DataFrameReader = {
this.userSpecifiedSchema = Option(StructType.fromDDL(schemaString))
this
}
/**
* Adds an input option for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameReader = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameReader = option(key, value.toString)
/**
* (Scala-specific) Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameReader = {
this.extraOptions ++= options
this
}
/**
* Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameReader = {
this.options(options.asScala)
this
}
/**
* Loads input in as a `DataFrame`, for data sources that don't require a path (e.g. external
* key-value stores).
*
* @since 1.4.0
*/
def load(): DataFrame = {
load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that require a path (e.g. data backed by
* a local or distributed file system).
*
* @since 1.4.0
*/
def load(path: String): DataFrame = {
// force invocation of `load(...varargs...)`
option(DataSourceOptions.PATH_KEY, path).load(Seq.empty: _*)
}
/**
* Loads input in as a `DataFrame`, for data sources that support multiple paths.
* Only works if the source is a HadoopFsRelationProvider.
*
* @since 1.6.0
*/
@scala.annotation.varargs
def load(paths: String*): DataFrame = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"read files of Hive data source directly.")
}
val cls = DataSource.lookupDataSource(source, sparkSession.sessionState.conf)
if (classOf[DataSourceV2].isAssignableFrom(cls)) {
val ds = cls.newInstance().asInstanceOf[DataSourceV2]
if (ds.isInstanceOf[ReadSupport]) {
val sessionOptions = DataSourceV2Utils.extractSessionConfigs(
ds = ds, conf = sparkSession.sessionState.conf)
val pathsOption = {
val objectMapper = new ObjectMapper()
DataSourceOptions.PATHS_KEY -> objectMapper.writeValueAsString(paths.toArray)
}
Dataset.ofRows(sparkSession, DataSourceV2Relation.create(
ds, extraOptions.toMap ++ sessionOptions + pathsOption,
userSpecifiedSchema = userSpecifiedSchema))
} else {
loadV1Source(paths: _*)
}
} else {
loadV1Source(paths: _*)
}
}
private def loadV1Source(paths: String*) = {
// Code path for data source v1.
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
userSpecifiedSchema = userSpecifiedSchema,
className = source,
options = extraOptions.toMap).resolveRelation())
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table and connection properties.
*
* @since 1.4.0
*/
def jdbc(url: String, table: String, properties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// properties should override settings in extraOptions.
this.extraOptions ++= properties.asScala
// explicit url and dbtable should override all
this.extraOptions += (JDBCOptions.JDBC_URL -> url, JDBCOptions.JDBC_TABLE_NAME -> table)
format("jdbc").load()
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table. Partitions of the table will be retrieved in parallel based on the parameters
* passed to this function.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`.
* @param table Name of the table in the external database.
* @param columnName the name of a column of integral type that will be used for partitioning.
* @param lowerBound the minimum value of `columnName` used to decide partition stride.
* @param upperBound the maximum value of `columnName` used to decide partition stride.
* @param numPartitions the number of partitions. This, along with `lowerBound` (inclusive),
* `upperBound` (exclusive), form partition strides for generated WHERE
* clause expressions used to split the column `columnName` evenly. When
* the input is less than 1, the number is set to 1.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch and "queryTimeout" can be used to wait
* for a Statement object to execute to the given number of seconds.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
columnName: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
connectionProperties: Properties): DataFrame = {
// columnName, lowerBound, upperBound and numPartitions override settings in extraOptions.
this.extraOptions ++= Map(
JDBCOptions.JDBC_PARTITION_COLUMN -> columnName,
JDBCOptions.JDBC_LOWER_BOUND -> lowerBound.toString,
JDBCOptions.JDBC_UPPER_BOUND -> upperBound.toString,
JDBCOptions.JDBC_NUM_PARTITIONS -> numPartitions.toString)
jdbc(url, table, connectionProperties)
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table using connection properties. The `predicates` parameter gives a list
* expressions suitable for inclusion in WHERE clauses; each one defines one partition
* of the `DataFrame`.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param predicates Condition in the where clause for each partition.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// connectionProperties should override settings in extraOptions.
val params = extraOptions.toMap ++ connectionProperties.asScala.toMap
val options = new JDBCOptions(url, table, params)
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i) : Partition
}
val relation = JDBCRelation(parts, options)(sparkSession)
sparkSession.baseRelationToDataFrame(relation)
}
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
* See the documentation on the overloaded `json()` method with varargs for more details.
*
* @since 1.4.0
*/
def json(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
json(Seq(path): _*)
}
/**
* Loads JSON files and returns the results as a `DataFrame`.
*
* <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
* default. For JSON (one record per file), set the `multiLine` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <ul>
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
* <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal
* type. If the values do not fit in decimal, then it infers them as doubles.</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
* </li>
* <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers
* (e.g. 00012)</li>
* <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all
* character using backslash quoting mechanism</li>
* <li>`allowUnquotedControlChars` (default `false`): allows JSON Strings to contain unquoted
* control characters (ASCII characters with value less than 32, including tab and line feed
* characters) or not.</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets other fields to `null`. To
* keep corrupt records, an user can set a string type field named
* `columnNameOfCorruptRecord` in an user-defined schema. If a schema does not have the
* field, it drops corrupt records during parsing. When inferring a schema, it implicitly
* adds a `columnNameOfCorruptRecord` field in an output schema.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines,
* per file</li>
* <li>`encoding` (by default it is not set): allows to forcibly set one of standard basic
* or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If the encoding
* is not specified and `multiLine` is set to `true`, it will be detected automatically.</li>
* <li>`lineSep` (default covers all `\r`, `\r\n` and `\n`): defines the line separator
* that should be used for parsing.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of input JSON objects used
* for schema inferring.</li>
* <li>`dropFieldIfAllNull` (default `false`): whether to ignore column of all null values or
* empty array/struct during schema inference.</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads a `JavaRDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON
* Lines text format or newline-delimited JSON</a>) and returns the result as
* a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: JavaRDD[String]): DataFrame = json(jsonRDD.rdd)
/**
* Loads an `RDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: RDD[String]): DataFrame = {
json(sparkSession.createDataset(jsonRDD)(Encoders.STRING))
}
/**
* Loads a `Dataset[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonDataset input Dataset with one JSON object per record
* @since 2.2.0
*/
def json(jsonDataset: Dataset[String]): DataFrame = {
val parsedOptions = new JSONOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val schema = userSpecifiedSchema.getOrElse {
TextInputJsonDataSource.inferFromDataset(jsonDataset, parsedOptions)
}
verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val createParser = CreateJacksonParser.string _
val parsed = jsonDataset.rdd.mapPartitions { iter =>
val rawParser = new JacksonParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => rawParser.parse(input, createParser, UTF8String.fromString),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = jsonDataset.isStreaming)
}
/**
* Loads a CSV file and returns the result as a `DataFrame`. See the documentation on the
* other overloaded `csv()` method for more details.
*
* @since 2.0.0
*/
def csv(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
csv(Seq(path): _*)
}
/**
* Loads an `Dataset[String]` storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* If the enforceSchema is set to `false`, only the CSV header in the first line is checked
* to conform specified or inferred schema.
*
* @param csvDataset input Dataset with one CSV row per record
* @since 2.2.0
*/
def csv(csvDataset: Dataset[String]): DataFrame = {
val parsedOptions: CSVOptions = new CSVOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.csvColumnPruning,
sparkSession.sessionState.conf.sessionLocalTimeZone)
val filteredLines: Dataset[String] =
CSVUtils.filterCommentAndEmpty(csvDataset, parsedOptions)
val maybeFirstLine: Option[String] = filteredLines.take(1).headOption
val schema = userSpecifiedSchema.getOrElse {
TextInputCSVDataSource.inferFromDataset(
sparkSession,
csvDataset,
maybeFirstLine,
parsedOptions)
}
verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val linesWithoutHeader: RDD[String] = maybeFirstLine.map { firstLine =>
CSVDataSource.checkHeader(
firstLine,
new CsvParser(parsedOptions.asParserSettings),
actualSchema,
csvDataset.getClass.getCanonicalName,
parsedOptions.enforceSchema,
sparkSession.sessionState.conf.caseSensitiveAnalysis)
filteredLines.rdd.mapPartitions(CSVUtils.filterHeaderLine(_, firstLine, parsedOptions))
}.getOrElse(filteredLines.rdd)
val parsed = linesWithoutHeader.mapPartitions { iter =>
val rawParser = new UnivocityParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => Seq(rawParser.parse(input)),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = csvDataset.isStreaming)
}
/**
* Loads CSV files and returns the result as a `DataFrame`.
*
* This function will go through the input once to determine the input schema if `inferSchema`
* is enabled. To avoid going through the entire data once, disable `inferSchema` option or
* specify the schema explicitly using `schema`.
*
* You can set the following CSV-specific options to deal with CSV files:
* <ul>
* <li>`sep` (default `,`): sets a single character as a separator for each
* field and value.</li>
* <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding
* type.</li>
* <li>`quote` (default `"`): sets a single character used for escaping quoted values where
* the separator can be part of the value. If you would like to turn off quotations, you need to
* set not `null` but an empty string. This behaviour is different from
* `com.databricks.spark.csv`.</li>
* <li>`escape` (default `\`): sets a single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`charToEscapeQuoteEscaping` (default `escape` or `\0`): sets a single character used for
* escaping the escape for the quote character. The default value is escape character when escape
* and quote characters are different, `\0` otherwise.</li>
* <li>`comment` (default empty string): sets a single character used for skipping lines
* beginning with this character. By default, it is disabled.</li>
* <li>`header` (default `false`): uses the first line as names of columns.</li>
* <li>`enforceSchema` (default `true`): If it is set to `true`, the specified or inferred schema
* will be forcibly applied to datasource files, and headers in CSV files will be ignored.
* If the option is set to `false`, the schema will be validated against all headers in CSV files
* in the case when the `header` option is set to `true`. Field names in the schema
* and column names in CSV headers are checked by their positions taking into account
* `spark.sql.caseSensitive`. Though the default value is true, it is recommended to disable
* the `enforceSchema` option to avoid incorrect results.</li>
* <li>`inferSchema` (default `false`): infers the input schema automatically from data. It
* requires one extra pass over the data.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of rows used for schema inferring.</li>
* <li>`ignoreLeadingWhiteSpace` (default `false`): a flag indicating whether or not leading
* whitespaces from values being read should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `false`): a flag indicating whether or not trailing
* whitespaces from values being read should be skipped.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value. Since
* 2.0.1, this applies to all supported types including the string type.</li>
* <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li>
* <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity
* value.</li>
* <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity
* value.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`maxColumns` (default `20480`): defines a hard limit of how many columns
* a record can have.</li>
* <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed
* for any given value being read. By default, it is -1 meaning unlimited length</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing. It supports the following case-insensitive modes.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets other fields to `null`. To keep
* corrupt records, an user can set a string type field named `columnNameOfCorruptRecord`
* in an user-defined schema. If a schema does not have the field, it drops corrupt records
* during parsing. A record with less/more tokens than schema is not a corrupted record to
* CSV. When it meets a record having fewer tokens than the length of the schema, sets
* `null` to extra fields. When the record has more tokens than the length of the schema,
* it drops extra tokens.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines.</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def csv(paths: String*): DataFrame = format("csv").load(paths : _*)
/**
* Loads a Parquet file, returning the result as a `DataFrame`. See the documentation
* on the other overloaded `parquet()` method for more details.
*
* @since 2.0.0
*/
def parquet(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
parquet(Seq(path): _*)
}
/**
* Loads a Parquet file, returning the result as a `DataFrame`.
*
* You can set the following Parquet-specific option(s) for reading Parquet files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets
* whether we should merge schemas collected from all Parquet part-files. This will override
* `spark.sql.parquet.mergeSchema`.</li>
* </ul>
* @since 1.4.0
*/
@scala.annotation.varargs
def parquet(paths: String*): DataFrame = {
format("parquet").load(paths: _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param path input path
* @since 1.5.0
* @note Currently, this method can only be used after enabling Hive support.
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
orc(Seq(path): _*)
}
/**
* Loads ORC files and returns the result as a `DataFrame`.
*
* @param paths input paths
* @since 2.0.0
* @note Currently, this method can only be used after enabling Hive support.
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
/**
* Returns the specified table as a `DataFrame`.
*
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
assertNoSpecifiedSchema("table")
sparkSession.table(tableName)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any. See the documentation on
* the other overloaded `text()` method for more details.
*
* @since 2.0.0
*/
def text(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
text(Seq(path): _*)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.text("/path/to/spark/README.md")
*
* // Java:
* spark.read().text("/path/to/spark/README.md")
* }}}
*
* You can set the following text-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\n".
* </li>
* <li>`lineSep` (default covers all `\r`, `\r\n` and `\n`): defines the line separator
* that should be used for parsing.</li>
* </ul>
*
* @param paths input paths
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths : _*)
/**
* Loads text files and returns a [[Dataset]] of String. See the documentation on the
* other overloaded `textFile()` method for more details.
* @since 2.0.0
*/
def textFile(path: String): Dataset[String] = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
textFile(Seq(path): _*)
}
/**
* Loads text files and returns a [[Dataset]] of String. The underlying schema of the Dataset
* contains a single string column named "value".
*
* If the directory structure of the text files contains partitioning information, those are
* ignored in the resulting Dataset. To include partitioning information as columns, use `text`.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.textFile("/path/to/spark/README.md")
*
* // Java:
* spark.read().textFile("/path/to/spark/README.md")
* }}}
*
* You can set the following textFile-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\n".
* </li>
* <li>`lineSep` (default covers all `\r`, `\r\n` and `\n`): defines the line separator
* that should be used for parsing.</li>
* </ul>
*
* @param paths input path
* @since 2.0.0
*/
@scala.annotation.varargs
def textFile(paths: String*): Dataset[String] = {
assertNoSpecifiedSchema("textFile")
text(paths : _*).select("value").as[String](sparkSession.implicits.newStringEncoder)
}
/**
* A convenient function for schema validation in APIs.
*/
private def assertNoSpecifiedSchema(operation: String): Unit = {
if (userSpecifiedSchema.nonEmpty) {
throw new AnalysisException(s"User specified schema not supported with `$operation`")
}
}
/**
* A convenient function for schema validation in datasources supporting
* `columnNameOfCorruptRecord` as an option.
*/
private def verifyColumnNameOfCorruptRecord(
schema: StructType,
columnNameOfCorruptRecord: String): Unit = {
schema.getFieldIndex(columnNameOfCorruptRecord).foreach { corruptFieldIndex =>
val f = schema(corruptFieldIndex)
if (f.dataType != StringType || !f.nullable) {
throw new AnalysisException(
"The field for corrupt records must be string type and nullable")
}
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = sparkSession.sessionState.conf.defaultDataSourceName
private var userSpecifiedSchema: Option[StructType] = None
private val extraOptions = new scala.collection.mutable.HashMap[String, String]
}
| eyalfa/spark | sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala | Scala | apache-2.0 | 33,678 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import java.io._
import com.google.common.io.ByteStreams
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.internal.Logging
import org.apache.spark.io.NioBufferedFileInputStream
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.shuffle.IndexShuffleBlockResolver.NOOP_REDUCE_ID
import org.apache.spark.storage._
import org.apache.spark.util.Utils
/**
* Create and maintain the shuffle blocks' mapping between logic block and physical file location.
* Data of shuffle blocks from the same map task are stored in a single consolidated data file.
* The offsets of the data blocks in the data file are stored in a separate index file.
*
* We use the name of the shuffle data's shuffleBlockId with reduce ID set to 0 and add ".data"
* as the filename postfix for data file, and ".index" as the filename postfix for index file.
*
*/
// Note: Changes to the format in this file should be kept in sync with
// org.apache.spark.network.shuffle.ExternalShuffleBlockResolver#getSortBasedShuffleBlockData().
private[spark] class IndexShuffleBlockResolver(
conf: SparkConf,
_blockManager: BlockManager = null)
extends ShuffleBlockResolver
with Logging {
private lazy val blockManager = Option(_blockManager).getOrElse(SparkEnv.get.blockManager)
private val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle")
def getDataFile(shuffleId: Int, mapId: Int): File = {
blockManager.diskBlockManager.getFile(ShuffleDataBlockId(shuffleId, mapId, NOOP_REDUCE_ID))
}
private def getIndexFile(shuffleId: Int, mapId: Int): File = {
blockManager.diskBlockManager.getFile(ShuffleIndexBlockId(shuffleId, mapId, NOOP_REDUCE_ID))
}
/**
* Remove data file and index file that contain the output data from one map.
*/
def removeDataByMap(shuffleId: Int, mapId: Int): Unit = {
var file = getDataFile(shuffleId, mapId)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting data ${file.getPath()}")
}
}
file = getIndexFile(shuffleId, mapId)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting index ${file.getPath()}")
}
}
}
/**
* Check whether the given index and data files match each other.
* If so, return the partition lengths in the data file. Otherwise return null.
*/
private def checkIndexAndDataFile(index: File, data: File, blocks: Int): Array[Long] = {
// the index file should have `block + 1` longs as offset.
if (index.length() != (blocks + 1) * 8) {
return null
}
val lengths = new Array[Long](blocks)
// Read the lengths of blocks
val in = try {
new DataInputStream(new NioBufferedFileInputStream(index))
} catch {
case e: IOException =>
return null
}
try {
// Convert the offsets into lengths of each block
var offset = in.readLong()
if (offset != 0L) {
return null
}
var i = 0
while (i < blocks) {
val off = in.readLong()
lengths(i) = off - offset
offset = off
i += 1
}
} catch {
case e: IOException =>
return null
} finally {
in.close()
}
// the size of data file should match with index file
if (data.length() == lengths.sum) {
lengths
} else {
null
}
}
/**
* Write an index file with the offsets of each block, plus a final offset at the end for the
* end of the output file. This will be used by getBlockData to figure out where each block
* begins and ends.
*
* It will commit the data and index file as an atomic operation, use the existing ones, or
* replace them with new ones.
*
* Note: the `lengths` will be updated to match the existing index file if use the existing ones.
*/
def writeIndexFileAndCommit(
shuffleId: Int,
mapId: Int,
lengths: Array[Long],
dataTmp: File): Unit = {
val indexFile = getIndexFile(shuffleId, mapId)
val indexTmp = Utils.tempFileWith(indexFile)
try {
val out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(indexTmp)))
Utils.tryWithSafeFinally {
// We take in lengths of each block, need to convert it to offsets.
var offset = 0L
out.writeLong(offset)
for (length <- lengths) {
offset += length
out.writeLong(offset)
}
} {
out.close()
}
val dataFile = getDataFile(shuffleId, mapId)
// There is only one IndexShuffleBlockResolver per executor, this synchronization make sure
// the following check and rename are atomic.
synchronized {
val existingLengths = checkIndexAndDataFile(indexFile, dataFile, lengths.length)
if (existingLengths != null) {
// Another attempt for the same task has already written our map outputs successfully,
// so just use the existing partition lengths and delete our temporary map outputs.
System.arraycopy(existingLengths, 0, lengths, 0, lengths.length)
if (dataTmp != null && dataTmp.exists()) {
dataTmp.delete()
}
indexTmp.delete()
} else {
// This is the first successful attempt in writing the map outputs for this task,
// so override any existing index and data files with the ones we wrote.
if (indexFile.exists()) {
indexFile.delete()
}
if (dataFile.exists()) {
dataFile.delete()
}
if (!indexTmp.renameTo(indexFile)) {
throw new IOException("fail to rename file " + indexTmp + " to " + indexFile)
}
if (dataTmp != null && dataTmp.exists() && !dataTmp.renameTo(dataFile)) {
throw new IOException("fail to rename file " + dataTmp + " to " + dataFile)
}
}
}
} finally {
if (indexTmp.exists() && !indexTmp.delete()) {
logError(s"Failed to delete temporary index file at ${indexTmp.getAbsolutePath}")
}
}
}
override def getBlockData(blockId: ShuffleBlockId): ManagedBuffer = {
// The block is actually going to be a range of a single map output file for this map, so
// find out the consolidated file, then the offset within that from our index
val indexFile = getIndexFile(blockId.shuffleId, blockId.mapId)
val in = new DataInputStream(new FileInputStream(indexFile))
try {
ByteStreams.skipFully(in, blockId.reduceId * 8)
val offset = in.readLong()
val nextOffset = in.readLong()
new FileSegmentManagedBuffer(
transportConf,
getDataFile(blockId.shuffleId, blockId.mapId),
offset,
nextOffset - offset)
} finally {
in.close()
}
}
override def stop(): Unit = {}
}
private[spark] object IndexShuffleBlockResolver {
// No-op reduce ID used in interactions with disk store.
// The disk store currently expects puts to relate to a (map, reduce) pair, but in the sort
// shuffle outputs for several reduces are glommed into a single file.
val NOOP_REDUCE_ID = 0
}
| wangyixiaohuihui/spark2-annotation | core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockResolver.scala | Scala | apache-2.0 | 8,299 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package internal
import java.util.concurrent.ConcurrentHashMap
import sbt.internal.inc.Stamper
import xsbti.{ FileConverter, VirtualFile, VirtualFileRef }
import xsbti.compile.DefinesClass
import xsbti.compile.analysis.{ Stamp => XStamp }
import sbt.internal.inc.Locate
/**
* Cache based on path and its stamp.
*/
sealed trait VirtualFileValueCache[A] {
def clear(): Unit
def get: VirtualFile => A
}
object VirtualFileValueCache {
def definesClassCache(converter: FileConverter): VirtualFileValueCache[DefinesClass] = {
apply(converter) { x: VirtualFile =>
if (x.name.toString != "rt.jar") Locate.definesClass(x)
else (_: String) => false
}
}
def apply[A](converter: FileConverter)(f: VirtualFile => A): VirtualFileValueCache[A] = {
import collection.mutable.{ HashMap, Map }
val stampCache: Map[VirtualFileRef, (Long, XStamp)] = new HashMap
make(
Stamper.timeWrap(stampCache, converter, {
case (vf: VirtualFile) => Stamper.forContentHash(vf)
})
)(f)
}
def make[A](stamp: VirtualFile => XStamp)(f: VirtualFile => A): VirtualFileValueCache[A] =
new VirtualFileValueCache0[A](stamp, f)
}
private[this] final class VirtualFileValueCache0[A](
getStamp: VirtualFile => XStamp,
make: VirtualFile => A
)(
implicit equiv: Equiv[XStamp]
) extends VirtualFileValueCache[A] {
private[this] val backing = new ConcurrentHashMap[VirtualFile, VirtualFileCache]
def clear(): Unit = backing.clear()
def get = file => {
val ifAbsent = new VirtualFileCache(file)
val cache = backing.putIfAbsent(file, ifAbsent)
(if (cache eq null) ifAbsent else cache).get()
}
private[this] final class VirtualFileCache(file: VirtualFile) {
private[this] var stampedValue: Option[(XStamp, A)] = None
def get(): A = synchronized {
val latest = getStamp(file)
stampedValue match {
case Some((stamp, value)) if (equiv.equiv(latest, stamp)) => value
case _ => update(latest)
}
}
private[this] def update(stamp: XStamp): A = {
val value = make(file)
stampedValue = Some((stamp, value))
value
}
}
}
| xuwei-k/xsbt | main/src/main/scala/sbt/internal/VirtualFileValueCache.scala | Scala | apache-2.0 | 2,357 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.grade.course.domain
import org.openurp.edu.grade.course.model.CourseGrade
/** 加权平均值
*/
object WeightedMean {
def calcGa(grades: collection.Iterable[CourseGrade]): Float = {
var credits = 0f
var creditGas = 0f
for (grade <- grades) {
if (grade.score.isDefined || !grade.passed) {
val score = grade.score.getOrElse(0f)
val credit = grade.course.credits
credits += credit
creditGas += credit * score
}
}
if ((credits == 0)) 0f else (creditGas / credits)
}
def calcGpa(grades: collection.Iterable[CourseGrade]): Float = {
var credits = 0f
var creditGps = 0f
for (grade <- grades if grade.gp.isDefined) {
val credit = grade.course.credits
credits += credit
creditGps += credit * (grade.gp.get)
}
if ((credits == 0)) 0f else (creditGps / credits)
}
}
| openurp/api | edu/src/main/scala/org/openurp/edu/grade/course/domain/WeightedMean.scala | Scala | lgpl-3.0 | 1,616 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal
import org.neo4j.cypher.internal.compatibility.{CompatibilityFor1_9, CompatibilityFor2_2, CompatibilityFor2_2Cost, CompatibilityFor2_2Rule, CompatibilityFor2_3, CompatibilityFor2_3Cost, CompatibilityFor2_3Rule}
import org.neo4j.cypher.internal.compiler.v2_3.CypherCompilerConfiguration
import org.neo4j.cypher.{CypherPlanner, CypherRuntime}
import org.neo4j.graphdb.GraphDatabaseService
import org.neo4j.kernel.api.KernelAPI
import org.neo4j.kernel.monitoring.{Monitors => KernelMonitors}
import org.neo4j.logging.Log
import scala.collection.mutable
sealed trait PlannerSpec
case object PlannerSpec_v1_9 extends PlannerSpec
final case class PlannerSpec_v2_2(planner: CypherPlanner) extends PlannerSpec
final case class PlannerSpec_v2_3(planner: CypherPlanner, runtime: CypherRuntime) extends PlannerSpec
class PlannerFactory(graph: GraphDatabaseService, kernelAPI: KernelAPI, kernelMonitors: KernelMonitors, log: Log,
config: CypherCompilerConfiguration) {
def create(spec: PlannerSpec_v1_9.type) = CompatibilityFor1_9(graph, config.queryCacheSize, kernelMonitors)
def create(spec: PlannerSpec_v2_2) = spec.planner match {
case CypherPlanner.rule => CompatibilityFor2_2Rule(graph, config.queryCacheSize, config.statsDivergenceThreshold,
config.queryPlanTTL, CypherCompiler.CLOCK, kernelMonitors, kernelAPI)
case _ => CompatibilityFor2_2Cost(graph, config.queryCacheSize, config.statsDivergenceThreshold, config.queryPlanTTL,
CypherCompiler.CLOCK, kernelMonitors, kernelAPI, log, spec.planner)
}
def create(spec: PlannerSpec_v2_3) = spec.planner match {
case CypherPlanner.rule => CompatibilityFor2_3Rule(graph, config, CypherCompiler.CLOCK, kernelMonitors, kernelAPI)
case _ => CompatibilityFor2_3Cost(graph, config,
CypherCompiler.CLOCK, kernelMonitors, kernelAPI, log, spec.planner, spec.runtime)
}
}
class PlannerCache(factory: PlannerFactory) {
private val cache_v1_9 = new CachingValue[CompatibilityFor1_9]
private val cache_v2_2 = new mutable.HashMap[PlannerSpec_v2_2, CompatibilityFor2_2]
private val cache_v2_3 = new mutable.HashMap[PlannerSpec_v2_3, CompatibilityFor2_3]
def apply(spec: PlannerSpec_v1_9.type) = cache_v1_9.getOrElseUpdate(factory.create(spec))
def apply(spec: PlannerSpec_v2_2) = cache_v2_2.getOrElseUpdate(spec, factory.create(spec))
def apply(spec: PlannerSpec_v2_3) = cache_v2_3.getOrElseUpdate(spec, factory.create(spec))
}
class CachingValue[T]() {
private var innerOption: Option[T] = None
def getOrElseUpdate(op: => T) = innerOption match {
case None =>
innerOption = Some(op)
innerOption.get
case Some(value) => value
}
}
| HuangLS/neo4j | community/cypher/cypher/src/main/scala/org/neo4j/cypher/internal/PlannerCache.scala | Scala | apache-2.0 | 3,505 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.lang.ref.WeakReference
import java.util.concurrent.TimeUnit
import scala.collection.mutable.HashSet
import scala.util.Random
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.SpanSugar._
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config._
import org.apache.spark.rdd.{RDD, ReliableRDDCheckpointData}
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.storage._
/**
* An abstract base class for context cleaner tests, which sets up a context with a config
* suitable for cleaner tests and provides some utility functions. Subclasses can use different
* config options, in particular, a different shuffle manager class
*/
abstract class ContextCleanerSuiteBase(val shuffleManager: Class[_] = classOf[SortShuffleManager])
extends SparkFunSuite with BeforeAndAfter with LocalSparkContext
{
implicit val defaultTimeout = timeout(10.seconds)
val conf = new SparkConf()
.setMaster("local[2]")
.setAppName("ContextCleanerSuite")
.set(CLEANER_REFERENCE_TRACKING_BLOCKING, true)
.set(CLEANER_REFERENCE_TRACKING_BLOCKING_SHUFFLE, true)
.set(CLEANER_REFERENCE_TRACKING_CLEAN_CHECKPOINTS, true)
.set(config.SHUFFLE_MANAGER, shuffleManager.getName)
before {
sc = new SparkContext(conf)
}
after {
if (sc != null) {
sc.stop()
sc = null
}
}
// ------ Helper functions ------
protected def newRDD() = sc.makeRDD(1 to 10)
protected def newPairRDD() = newRDD().map(_ -> 1)
protected def newShuffleRDD() = newPairRDD().reduceByKey(_ + _)
protected def newBroadcast() = sc.broadcast(1 to 100)
protected def newRDDWithShuffleDependencies():
(RDD[(Int, Int)], Seq[ShuffleDependency[Int, Int, Int]]) = {
def getAllDependencies(rdd: RDD[(Int, Int)]): Seq[Dependency[_]] = {
rdd.dependencies ++ rdd.dependencies.flatMap { dep =>
getAllDependencies(dep.rdd.asInstanceOf[RDD[(Int, Int)]])
}
}
val rdd = newShuffleRDD()
// Get all the shuffle dependencies
val shuffleDeps = getAllDependencies(rdd)
.filter(_.isInstanceOf[ShuffleDependency[_, _, _]])
.map(_.asInstanceOf[ShuffleDependency[Int, Int, Int]])
(rdd, shuffleDeps)
}
protected def randomRdd() = {
val rdd: RDD[_] = Random.nextInt(3) match {
case 0 => newRDD()
case 1 => newShuffleRDD()
case 2 => newPairRDD.join(newPairRDD())
}
if (Random.nextBoolean()) rdd.persist()
rdd.count()
rdd
}
/** Run GC and make sure it actually has run */
protected def runGC(): Unit = {
val weakRef = new WeakReference(new Object())
val startTimeNs = System.nanoTime()
System.gc() // Make a best effort to run the garbage collection. It *usually* runs GC.
// Wait until a weak reference object has been GCed
while (System.nanoTime() - startTimeNs < TimeUnit.SECONDS.toNanos(10) && weakRef.get != null) {
System.gc()
Thread.sleep(200)
}
}
protected def cleaner = sc.cleaner.get
}
/**
* Basic ContextCleanerSuite, which uses sort-based shuffle
*/
class ContextCleanerSuite extends ContextCleanerSuiteBase {
test("cleanup RDD") {
val rdd = newRDD().persist()
val collected = rdd.collect().toList
val tester = new CleanerTester(sc, rddIds = Seq(rdd.id))
// Explicit cleanup
cleaner.doCleanupRDD(rdd.id, blocking = true)
tester.assertCleanup()
// Verify that RDDs can be re-executed after cleaning up
assert(rdd.collect().toList === collected)
}
test("cleanup shuffle") {
val (rdd, shuffleDeps) = newRDDWithShuffleDependencies()
val collected = rdd.collect().toList
val tester = new CleanerTester(sc, shuffleIds = shuffleDeps.map(_.shuffleId))
// Explicit cleanup
shuffleDeps.foreach(s => cleaner.doCleanupShuffle(s.shuffleId, blocking = true))
tester.assertCleanup()
// Verify that shuffles can be re-executed after cleaning up
assert(rdd.collect().toList.equals(collected))
}
test("cleanup broadcast") {
val broadcast = newBroadcast()
val tester = new CleanerTester(sc, broadcastIds = Seq(broadcast.id))
// Explicit cleanup
cleaner.doCleanupBroadcast(broadcast.id, blocking = true)
tester.assertCleanup()
}
test("automatically cleanup RDD") {
var rdd = newRDD().persist()
rdd.count()
// Test that GC does not cause RDD cleanup due to a strong reference
val preGCTester = new CleanerTester(sc, rddIds = Seq(rdd.id))
runGC()
intercept[Exception] {
preGCTester.assertCleanup()(timeout(1.second))
}
// Test that GC causes RDD cleanup after dereferencing the RDD
// Note rdd is used after previous GC to avoid early collection by the JVM
val postGCTester = new CleanerTester(sc, rddIds = Seq(rdd.id))
rdd = null // Make RDD out of scope
runGC()
postGCTester.assertCleanup()
}
test("automatically cleanup shuffle") {
var rdd = newShuffleRDD()
rdd.count()
// Test that GC does not cause shuffle cleanup due to a strong reference
val preGCTester = new CleanerTester(sc, shuffleIds = Seq(0))
runGC()
intercept[Exception] {
preGCTester.assertCleanup()(timeout(1.second))
}
rdd.count() // Defeat early collection by the JVM
// Test that GC causes shuffle cleanup after dereferencing the RDD
val postGCTester = new CleanerTester(sc, shuffleIds = Seq(0))
rdd = null // Make RDD out of scope, so that corresponding shuffle goes out of scope
runGC()
postGCTester.assertCleanup()
}
test("automatically cleanup broadcast") {
var broadcast = newBroadcast()
// Test that GC does not cause broadcast cleanup due to a strong reference
val preGCTester = new CleanerTester(sc, broadcastIds = Seq(broadcast.id))
runGC()
intercept[Exception] {
preGCTester.assertCleanup()(timeout(1.second))
}
// Test that GC causes broadcast cleanup after dereferencing the broadcast variable
// Note broadcast is used after previous GC to avoid early collection by the JVM
val postGCTester = new CleanerTester(sc, broadcastIds = Seq(broadcast.id))
broadcast = null // Make broadcast variable out of scope
runGC()
postGCTester.assertCleanup()
}
test("automatically cleanup normal checkpoint") {
withTempDir { checkpointDir =>
checkpointDir.delete()
var rdd = newPairRDD()
sc.setCheckpointDir(checkpointDir.toString)
rdd.checkpoint()
rdd.cache()
rdd.collect()
var rddId = rdd.id
// Confirm the checkpoint directory exists
assert(ReliableRDDCheckpointData.checkpointPath(sc, rddId).isDefined)
val path = ReliableRDDCheckpointData.checkpointPath(sc, rddId).get
val fs = path.getFileSystem(sc.hadoopConfiguration)
assert(fs.exists(path))
// the checkpoint is not cleaned by default (without the configuration set)
var postGCTester = new CleanerTester(sc, Seq(rddId), Nil, Nil, Seq(rddId))
rdd = null // Make RDD out of scope, ok if collected earlier
runGC()
postGCTester.assertCleanup()
assert(!fs.exists(ReliableRDDCheckpointData.checkpointPath(sc, rddId).get))
// Verify that checkpoints are NOT cleaned up if the config is not enabled
sc.stop()
val conf = new SparkConf()
.setMaster("local[2]")
.setAppName("cleanupCheckpoint")
.set(CLEANER_REFERENCE_TRACKING_CLEAN_CHECKPOINTS, false)
sc = new SparkContext(conf)
rdd = newPairRDD()
sc.setCheckpointDir(checkpointDir.toString)
rdd.checkpoint()
rdd.cache()
rdd.collect()
rddId = rdd.id
// Confirm the checkpoint directory exists
assert(fs.exists(ReliableRDDCheckpointData.checkpointPath(sc, rddId).get))
// Reference rdd to defeat any early collection by the JVM
rdd.count()
// Test that GC causes checkpoint data cleanup after dereferencing the RDD
postGCTester = new CleanerTester(sc, Seq(rddId))
rdd = null // Make RDD out of scope
runGC()
postGCTester.assertCleanup()
assert(fs.exists(ReliableRDDCheckpointData.checkpointPath(sc, rddId).get))
}
}
test("automatically clean up local checkpoint") {
// Note that this test is similar to the RDD cleanup
// test because the same underlying mechanism is used!
var rdd = newPairRDD().localCheckpoint()
assert(rdd.checkpointData.isDefined)
assert(rdd.checkpointData.get.checkpointRDD.isEmpty)
rdd.count()
assert(rdd.checkpointData.get.checkpointRDD.isDefined)
// Test that GC does not cause checkpoint cleanup due to a strong reference
val preGCTester = new CleanerTester(sc, rddIds = Seq(rdd.id))
runGC()
intercept[Exception] {
preGCTester.assertCleanup()(timeout(1.second))
}
// Test that RDD going out of scope does cause the checkpoint blocks to be cleaned up
val postGCTester = new CleanerTester(sc, rddIds = Seq(rdd.id))
rdd = null
runGC()
postGCTester.assertCleanup()
}
test("automatically cleanup RDD + shuffle + broadcast") {
val numRdds = 100
val numBroadcasts = 4 // Broadcasts are more costly
val rddBuffer = (1 to numRdds).map(i => randomRdd()).toBuffer
val broadcastBuffer = (1 to numBroadcasts).map(i => newBroadcast()).toBuffer
val rddIds = sc.persistentRdds.keys.toSeq
val shuffleIds = 0 until sc.newShuffleId
val broadcastIds = broadcastBuffer.map(_.id)
val preGCTester = new CleanerTester(sc, rddIds, shuffleIds, broadcastIds.toSeq)
runGC()
intercept[Exception] {
preGCTester.assertCleanup()(timeout(1.second))
}
// Test that GC triggers the cleanup of all variables after the dereferencing them
val postGCTester = new CleanerTester(sc, rddIds, shuffleIds, broadcastIds.toSeq)
broadcastBuffer.clear()
rddBuffer.clear()
runGC()
postGCTester.assertCleanup()
// Make sure the broadcasted task closure no longer exists after GC.
val taskClosureBroadcastId = broadcastIds.max + 1
assert(sc.env.blockManager.master.getMatchingBlockIds({
case BroadcastBlockId(`taskClosureBroadcastId`, _) => true
case _ => false
}, askStorageEndpoints = true).isEmpty)
}
test("automatically cleanup RDD + shuffle + broadcast in distributed mode") {
sc.stop()
val conf2 = new SparkConf()
.setMaster("local-cluster[2, 1, 1024]")
.setAppName("ContextCleanerSuite")
.set(CLEANER_REFERENCE_TRACKING_BLOCKING, true)
.set(CLEANER_REFERENCE_TRACKING_BLOCKING_SHUFFLE, true)
.set(config.SHUFFLE_MANAGER, shuffleManager.getName)
sc = new SparkContext(conf2)
val numRdds = 10
val numBroadcasts = 4 // Broadcasts are more costly
val rddBuffer = (1 to numRdds).map(i => randomRdd()).toBuffer
val broadcastBuffer = (1 to numBroadcasts).map(i => newBroadcast()).toBuffer
val rddIds = sc.persistentRdds.keys.toSeq
val shuffleIds = 0 until sc.newShuffleId
val broadcastIds = broadcastBuffer.map(_.id)
val preGCTester = new CleanerTester(sc, rddIds, shuffleIds, broadcastIds.toSeq)
runGC()
intercept[Exception] {
preGCTester.assertCleanup()(timeout(1.second))
}
// Test that GC triggers the cleanup of all variables after the dereferencing them
val postGCTester = new CleanerTester(sc, rddIds, shuffleIds, broadcastIds.toSeq)
broadcastBuffer.clear()
rddBuffer.clear()
runGC()
postGCTester.assertCleanup()
// Make sure the broadcasted task closure no longer exists after GC.
val taskClosureBroadcastId = broadcastIds.max + 1
assert(sc.env.blockManager.master.getMatchingBlockIds({
case BroadcastBlockId(`taskClosureBroadcastId`, _) => true
case _ => false
}, askStorageEndpoints = true).isEmpty)
}
}
/**
* Class to test whether RDDs, shuffles, etc. have been successfully cleaned.
* The checkpoint here refers only to normal (reliable) checkpoints, not local checkpoints.
*/
class CleanerTester(
sc: SparkContext,
rddIds: Seq[Int] = Seq.empty,
shuffleIds: Seq[Int] = Seq.empty,
broadcastIds: Seq[Long] = Seq.empty,
checkpointIds: Seq[Long] = Seq.empty)
extends Logging {
val toBeCleanedRDDIds = new HashSet[Int] ++= rddIds
val toBeCleanedShuffleIds = new HashSet[Int] ++= shuffleIds
val toBeCleanedBroadcstIds = new HashSet[Long] ++= broadcastIds
val toBeCheckpointIds = new HashSet[Long] ++= checkpointIds
val isDistributed = !sc.isLocal
val cleanerListener = new CleanerListener {
def rddCleaned(rddId: Int): Unit = {
toBeCleanedRDDIds.synchronized { toBeCleanedRDDIds -= rddId }
logInfo("RDD " + rddId + " cleaned")
}
def shuffleCleaned(shuffleId: Int): Unit = {
toBeCleanedShuffleIds.synchronized { toBeCleanedShuffleIds -= shuffleId }
logInfo("Shuffle " + shuffleId + " cleaned")
}
def broadcastCleaned(broadcastId: Long): Unit = {
toBeCleanedBroadcstIds.synchronized { toBeCleanedBroadcstIds -= broadcastId }
logInfo("Broadcast " + broadcastId + " cleaned")
}
def accumCleaned(accId: Long): Unit = {
logInfo("Cleaned accId " + accId + " cleaned")
}
def checkpointCleaned(rddId: Long): Unit = {
toBeCheckpointIds.synchronized { toBeCheckpointIds -= rddId }
logInfo("checkpoint " + rddId + " cleaned")
}
}
val MAX_VALIDATION_ATTEMPTS = 10
val VALIDATION_ATTEMPT_INTERVAL = 100
logInfo("Attempting to validate before cleanup:\\n" + uncleanedResourcesToString)
preCleanupValidate()
sc.cleaner.get.attachListener(cleanerListener)
/** Assert that all the stuff has been cleaned up */
def assertCleanup()(implicit waitTimeout: PatienceConfiguration.Timeout): Unit = {
try {
eventually(waitTimeout, interval(100.milliseconds)) {
assert(isAllCleanedUp,
"The following resources were not cleaned up:\\n" + uncleanedResourcesToString)
}
postCleanupValidate()
} finally {
logInfo("Resources left from cleaning up:\\n" + uncleanedResourcesToString)
}
}
/** Verify that RDDs, shuffles, etc. occupy resources */
private def preCleanupValidate(): Unit = {
assert(rddIds.nonEmpty || shuffleIds.nonEmpty || broadcastIds.nonEmpty ||
checkpointIds.nonEmpty, "Nothing to cleanup")
// Verify the RDDs have been persisted and blocks are present
rddIds.foreach { rddId =>
assert(
sc.persistentRdds.contains(rddId),
"RDD " + rddId + " have not been persisted, cannot start cleaner test"
)
assert(
!getRDDBlocks(rddId).isEmpty,
"Blocks of RDD " + rddId + " cannot be found in block manager, " +
"cannot start cleaner test"
)
}
// Verify the shuffle ids are registered and blocks are present
shuffleIds.foreach { shuffleId =>
assert(
mapOutputTrackerMaster.containsShuffle(shuffleId),
"Shuffle " + shuffleId + " have not been registered, cannot start cleaner test"
)
assert(
!getShuffleBlocks(shuffleId).isEmpty,
"Blocks of shuffle " + shuffleId + " cannot be found in block manager, " +
"cannot start cleaner test"
)
}
// Verify that the broadcast blocks are present
broadcastIds.foreach { broadcastId =>
assert(
!getBroadcastBlocks(broadcastId).isEmpty,
"Blocks of broadcast " + broadcastId + "cannot be found in block manager, " +
"cannot start cleaner test"
)
}
}
/**
* Verify that RDDs, shuffles, etc. do not occupy resources. Tests multiple times as there is
* as there is not guarantee on how long it will take clean up the resources.
*/
private def postCleanupValidate(): Unit = {
// Verify the RDDs have been persisted and blocks are present
rddIds.foreach { rddId =>
assert(
!sc.persistentRdds.contains(rddId),
"RDD " + rddId + " was not cleared from sc.persistentRdds"
)
assert(
getRDDBlocks(rddId).isEmpty,
"Blocks of RDD " + rddId + " were not cleared from block manager"
)
}
// Verify the shuffle ids are registered and blocks are present
shuffleIds.foreach { shuffleId =>
assert(
!mapOutputTrackerMaster.containsShuffle(shuffleId),
"Shuffle " + shuffleId + " was not deregistered from map output tracker"
)
assert(
getShuffleBlocks(shuffleId).isEmpty,
"Blocks of shuffle " + shuffleId + " were not cleared from block manager"
)
}
// Verify that the broadcast blocks are present
broadcastIds.foreach { broadcastId =>
assert(
getBroadcastBlocks(broadcastId).isEmpty,
"Blocks of broadcast " + broadcastId + " were not cleared from block manager"
)
}
}
private def uncleanedResourcesToString = {
val s1 = toBeCleanedRDDIds.synchronized {
toBeCleanedRDDIds.toSeq.sorted.mkString("[", ", ", "]")
}
val s2 = toBeCleanedShuffleIds.synchronized {
toBeCleanedShuffleIds.toSeq.sorted.mkString("[", ", ", "]")
}
val s3 = toBeCleanedBroadcstIds.synchronized {
toBeCleanedBroadcstIds.toSeq.sorted.mkString("[", ", ", "]")
}
s"""
|\\tRDDs = $s1
|\\tShuffles = $s2
|\\tBroadcasts = $s3
""".stripMargin
}
private def isAllCleanedUp =
toBeCleanedRDDIds.synchronized { toBeCleanedRDDIds.isEmpty } &&
toBeCleanedShuffleIds.synchronized { toBeCleanedShuffleIds.isEmpty } &&
toBeCleanedBroadcstIds.synchronized { toBeCleanedBroadcstIds.isEmpty } &&
toBeCheckpointIds.synchronized { toBeCheckpointIds.isEmpty }
private def getRDDBlocks(rddId: Int): Seq[BlockId] = {
blockManager.master.getMatchingBlockIds( _ match {
case RDDBlockId(`rddId`, _) => true
case _ => false
}, askStorageEndpoints = true)
}
private def getShuffleBlocks(shuffleId: Int): Seq[BlockId] = {
blockManager.master.getMatchingBlockIds( _ match {
case ShuffleBlockId(`shuffleId`, _, _) => true
case ShuffleIndexBlockId(`shuffleId`, _, _) => true
case _ => false
}, askStorageEndpoints = true)
}
private def getBroadcastBlocks(broadcastId: Long): Seq[BlockId] = {
blockManager.master.getMatchingBlockIds( _ match {
case BroadcastBlockId(`broadcastId`, _) => true
case _ => false
}, askStorageEndpoints = true)
}
private def blockManager = sc.env.blockManager
private def mapOutputTrackerMaster = sc.env.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster]
}
| dbtsai/spark | core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala | Scala | apache-2.0 | 19,427 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.processor
import akka.actor.{ Actor, ActorLogging, ActorRef, Props }
import akka.cluster.pubsub.DistributedPubSubMediator.Unsubscribe
import akka.cluster.pubsub.{ DistributedPubSub, DistributedPubSubMediator }
import akka.event.{ DiagnosticLoggingAdapter, Logging }
import com.wegtam.tensei.adt.ElementReference
import com.wegtam.tensei.agent.adt.TenseiForeignKeyValueType
import com.wegtam.tensei.agent.helpers.LoggingHelpers
import com.wegtam.tensei.agent.processor.AutoIncrementValueBuffer.AutoIncrementValueBufferMessages
/**
* This actor buffers values that are changed when written into the target
* because they are represented as auto-increment columns.
*
* @param agentRunIdentifier An optional agent run identifier which is usually an uuid.
*/
class AutoIncrementValueBuffer(agentRunIdentifier: Option[String]) extends Actor with ActorLogging {
// Create a distributed pub sub mediator.
import DistributedPubSubMediator.{ Subscribe, SubscribeAck }
val mediator = DistributedPubSub(context.system).mediator
override val log
: DiagnosticLoggingAdapter = Logging(this) // Override the standard logger to be able to add stuff via MDC.
log.mdc(LoggingHelpers.generateMdcEntryForRunIdentifier(agentRunIdentifier))
val buffer: scala.collection.mutable.Map[ElementReference, ActorRef] =
scala.collection.mutable.Map.empty[ElementReference, ActorRef]
// Subscribe to our event channel.
mediator ! Subscribe(AutoIncrementValueBuffer.AUTO_INCREMENT_BUFFER_CHANNEL, self) // Subscribe to the pub sub channel.
@scala.throws[Exception](classOf[Exception])
override def postStop(): Unit = {
mediator ! Unsubscribe(AutoIncrementValueBuffer.AUTO_INCREMENT_BUFFER_CHANNEL, self) // Unsubscribe from the pub sub channel.
super.postStop()
}
override def receive: Receive = {
case SubscribeAck(msg) =>
if (msg.topic == AutoIncrementValueBuffer.AUTO_INCREMENT_BUFFER_CHANNEL)
log.debug("Successfully subscribes to auto increment buffer channel.")
else
log.warning("Got subscribe ack for unknown channel topic!")
case AutoIncrementValueBufferMessages.Store(ref, values) =>
log.debug("Relaying {} auto increment values to storage for {}.", values.size, ref)
if (buffer.get(ref).isEmpty)
buffer.put(ref,
context.actorOf(AutoIncrementValueBufferWorker.props(agentRunIdentifier, ref)))
buffer(ref) forward AutoIncrementValueBufferMessages.Store(ref, values)
case AutoIncrementValueBufferMessages.Return(ref, value) =>
log.debug("Received return auto increment value for {} and {}.", ref, value)
if (buffer.get(ref).isEmpty)
sender() ! AutoIncrementValueBufferMessages.ValueNotFound(ref, value)
else
buffer(ref) forward AutoIncrementValueBufferMessages.Return(ref, value)
}
}
object AutoIncrementValueBuffer {
val AUTO_INCREMENT_BUFFER_CHANNEL = "AutoIncrementBuffer" // The channel name for the pub sub mediator.
val AUTO_INCREMENT_BUFFER_NAME = "AutoIncBuffer" // The actor name that should be used for this actor.
def props(agentRunIdentifier: Option[String]): Props =
Props(new AutoIncrementValueBuffer(agentRunIdentifier))
/**
* A wrapper to hold the old and the new value for an auto-increment column.
*
* @param oldValue The value from the parser (e.g. the source database or file).
* @param newValue The value generated by the target database.
*/
final case class AutoIncrementValuePair(oldValue: TenseiForeignKeyValueType,
newValue: TenseiForeignKeyValueType)
/**
* A sealed trait for the messages regarding the storing and retrieving
* of auto-increment values.
*/
sealed trait AutoIncrementValueBufferMessages
/**
* A companion object for the trait to keep the namespace clean.
*/
object AutoIncrementValueBufferMessages {
/**
* The successful return of a stored auto-increment value.
*
* @param ref The reference to the DFASDL element.
* @param oldValue The "old" e.g. parsed value.
* @param newValue The "new" e.g. generated value.
*/
final case class ChangedValue(ref: ElementReference,
oldValue: TenseiForeignKeyValueType,
newValue: TenseiForeignKeyValueType)
extends AutoIncrementValueBufferMessages
/**
* Request the return of the value stored for the given reference and "old" value.
*
* @param ref The reference to the DFASDL element.
* @param value The "old" e.g. parsed value.
*/
final case class Return(ref: ElementReference, value: TenseiForeignKeyValueType)
extends AutoIncrementValueBufferMessages
/**
* Store a list of auto-increment values for the given element reference.
*
* @param ref The reference to the DFASDL element.
* @param values A list of value pairs (old e.g. parsed value and new value).
*/
final case class Store(ref: ElementReference, values: Vector[AutoIncrementValuePair])
extends AutoIncrementValueBufferMessages
/**
* Indicates that the requested value specified by the given reference and old
* value was not found.
*
* @param ref The element reference of the DFASDL element.
* @param value The "old" e.g. parsed value.
*/
final case class ValueNotFound(ref: ElementReference, value: TenseiForeignKeyValueType)
extends AutoIncrementValueBufferMessages
}
}
| Tensei-Data/tensei-agent | src/main/scala/com/wegtam/tensei/agent/processor/AutoIncrementValueBuffer.scala | Scala | agpl-3.0 | 6,335 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.models
import breeze.linalg.DenseVector
import breeze.stats.distributions.{ContinuousDistr, Moments}
import io.github.mandar2812.dynaml.kernels.CovarianceFunction
import io.github.mandar2812.dynaml.models.gp.{AbstractGPRegressionModel, GaussianProcessMixture}
import io.github.mandar2812.dynaml.models.stp.{AbstractSTPRegressionModel, MVStudentsTModel, MVTMixture, StudentTProcessMixture}
import io.github.mandar2812.dynaml.pipes.DataPipe
import io.github.mandar2812.dynaml.probability._
import io.github.mandar2812.dynaml.probability.distributions.HasErrorBars
import org.apache.log4j.Logger
import spire.algebra.{InnerProductSpace, VectorSpace}
import scala.reflect.ClassTag
/**
* High Level description of a stochastic process based predictive model.
*
* @tparam T The underlying data structure storing the training & test data.
* @tparam I The type of the index set (i.e. Double for time series, DenseVector for GP regression)
* @tparam Y The type of the output label
* @tparam W Implementing class of the posterior distribution
* @author mandar2812 date 26/08/16.
*
* */
trait StochasticProcessModel[T, I, Y, W] extends Model[T, I, Y] {
/** Calculates posterior predictive distribution for
* a particular set of test data points.
*
* @param test A Sequence or Sequence like data structure
* storing the values of the input patters.
* */
def predictiveDistribution[U <: Seq[I]](test: U): W
/**
* Convert from the underlying data structure to
* Seq[(I, Y)] where I is the index set of the GP
* and Y is the value/label type.
* */
def dataAsSeq(data: T): Seq[(I,Y)]
/**
* Convert from the underlying data structure to
* Seq[I] where I is the index set of the GP
* */
def dataAsIndexSeq(data: T): Seq[I] = dataAsSeq(data).map(_._1)
}
/**
* Processes which can be specified by upto second order statistics i.e. mean and covariance
* @tparam T The underlying data structure storing the training & test data.
* @tparam I The type of the index set (i.e. Double for time series, DenseVector for GP regression)
* @tparam Y The type of the output label
* @tparam K The type returned by the kernel function.
* @tparam M The data structure holding the kernel/covariance matrix
* @tparam W Implementing class of the posterior distribution
* @author mandar2812
*
* */
trait SecondOrderProcessModel[T, I, Y, K, M, W] extends StochasticProcessModel[T, I, Y, W] {
/**
* Mean Function: Takes a member of the index set (input)
* and returns the corresponding mean of the distribution
* corresponding to input.
* */
val mean: DataPipe[I, Y]
/**
* Underlying covariance function of the
* Gaussian Processes.
* */
val covariance: CovarianceFunction[I, K, M]
}
/**
* Blueprint for a continuous valued stochastic process, abstracts away the behavior
* common to sub-classes such as [[io.github.mandar2812.dynaml.models.gp.GPRegression]],
* [[io.github.mandar2812.dynaml.models.stp.StudentTRegression]] and others.
*
* @author mandar2812 date: 11/10/2016
*
* */
abstract class ContinuousProcessModel[T, I, Y, W <: ContinuousRandomVariable[_]]
extends StochasticProcessModel[T, I, Y, W] {
private val logger = Logger.getLogger(this.getClass)
/**
* Represents how many intervals away from the mean the error bars lie, defaults to 1
*/
private var errorSigma: Int = 1
def _errorSigma = errorSigma
def errorSigma_(s: Int) = errorSigma = s
/**
* Draw three predictions from the posterior predictive distribution
* 1) Mean or MAP estimate Y
* 2) Y- : The lower error bar estimate (mean - sigma*stdDeviation)
* 3) Y+ : The upper error bar. (mean + sigma*stdDeviation)
**/
def predictionWithErrorBars[U <: Seq[I]](testData: U, sigma: Int): Seq[(I, Y, Y, Y)]
/**
* Returns a prediction with error bars for a test set of indexes and labels.
* (Index, Actual Value, Prediction, Lower Bar, Higher Bar)
* */
def test(testData: T): Seq[(I, Y, Y, Y, Y)] = {
println("Generating predictions for test set")
//Calculate the posterior predictive distribution for the test points.
val predictionWithError = predictionWithErrorBars(dataAsIndexSeq(testData), errorSigma)
//Collate the test data with the predictions and error bars
dataAsSeq(testData)
.zip(predictionWithError)
.map(i => (
i._1._1, i._1._2,
i._2._2, i._2._3,
i._2._4))
}
}
abstract class StochasticProcessMixtureModel[
I, Y, W <: ContinuousMixtureRV[_, _]] extends
ContinuousProcessModel[Seq[(I, Y)], I, Y, W]
object StochasticProcessMixtureModel {
def apply[T, I: ClassTag](
component_processes: Seq[AbstractGPRegressionModel[T, I]],
weights: DenseVector[Double]) =
new GaussianProcessMixture[T, I](component_processes, weights)
def apply[T, I: ClassTag](
component_processes: Seq[AbstractSTPRegressionModel[T, I]],
weights: DenseVector[Double]) =
new StudentTProcessMixture[T, I](component_processes, weights)
def apply[T, I: ClassTag](
component_processes: Seq[MVStudentsTModel[T, I]],
weights: DenseVector[Double]) =
new MVTMixture[T, I](component_processes, weights)
}
/**
* A process which is a multinomial mixture of
* continuous component processes.
* @tparam I The type of the index set (i.e. Double for time series, DenseVector for GP regression)
* @tparam Y The type of the output label
* @tparam W1 Implementing class of the posterior distribution for the base processes
* should inherit from [[ContinuousMixtureRV]]
* @author mandar2812 date 19/06/2017
* */
abstract class ContinuousMixtureModel[
T, I: ClassTag, Y, YDomain,
W1 <: ContinuousDistrRV[YDomain],
BaseProcesses <: ContinuousProcessModel[T, I, Y, W1]](
val component_processes: Seq[BaseProcesses],
val weights: DenseVector[Double]) extends
StochasticProcessMixtureModel[I, Y, ContinuousDistrMixture[YDomain, W1]] {
/** Calculates posterior predictive distribution for
* a particular set of test data points.
*
* @param test A Sequence or Sequence like data structure
* storing the values of the input patters.
* */
override def predictiveDistribution[U <: Seq[I]](test: U) =
ContinuousDistrMixture[YDomain, W1](component_processes.map(_.predictiveDistribution(test)), weights)
}
/**
* A multinomial mixture of component processes, each
* of which can output predictive distributions which have
* error bars around the mean/mode.
*
* @tparam T The training data type of each component
* @tparam I The input feature type accepted by each component
* @tparam Y The type of the output label
* @tparam YDomain The type of a collection of outputs, e.g. vector
* @tparam YDomainVar The type of the second moment (variance) returned
* by the predictive distribution of each component process
* @tparam BaseDistr The type of the predictive distribution of each process.
* @tparam W1 The random variable type returned by the [[predictiveDistribution()]] method
* of each component.
* @tparam BaseProcesses The type of the stochastic process components
*
*
* @param component_processes The stochastic processes which form the components of the mixture
* @param weights The probability weights assigned to each component.
* @author mandar2812 date 19/06/2017
* */
abstract class GenContinuousMixtureModel[
T, I: ClassTag, Y, YDomain, YDomainVar,
BaseDistr <: ContinuousDistr[YDomain] with Moments[YDomain, YDomainVar] with HasErrorBars[YDomain],
W1 <: ContinuousRVWithDistr[YDomain, BaseDistr],
BaseProcesses <: ContinuousProcessModel[T, I, Y, W1]](
val component_processes: Seq[BaseProcesses],
val weights: DenseVector[Double]) extends
StochasticProcessMixtureModel[I, Y, ContMixtureRVBars[YDomain, YDomainVar, BaseDistr]] {
private val logger = Logger.getLogger(this.getClass)
/**
* The training data
* */
override protected val g: Seq[(I, Y)] = Seq()
/**
* Convert from the underlying data structure to
* Seq[(I, Y)] where I is the index set of the GP
* and Y is the value/label type.
* */
override def dataAsSeq(data: Seq[(I, Y)]) = data
/**
* Predict the value of the
* target variable given a
* point.
*
* */
override def predict(point: I) = predictionWithErrorBars(Seq(point), 1).head._2
protected def toStream(y: YDomain): Stream[Y]
protected def getVectorSpace(num_dim: Int): VectorSpace[YDomain, Double]
/** Calculates posterior predictive distribution for
* a particular set of test data points.
*
* @param test A Sequence or Sequence like data structure
* storing the values of the input patters.
* */
override def predictiveDistribution[U <: Seq[I]](test: U): ContMixtureRVBars[YDomain, YDomainVar, BaseDistr] =
ContinuousDistrMixture[YDomain, YDomainVar, BaseDistr](
component_processes.map(_.predictiveDistribution(test).underlyingDist),
weights)(getVectorSpace(test.length))
/**
* Draw three predictions from the posterior predictive distribution
* 1) Mean or MAP estimate Y
* 2) Y- : The lower error bar estimate (mean - sigma*stdDeviation)
* 3) Y+ : The upper error bar. (mean + sigma*stdDeviation)
* */
override def predictionWithErrorBars[U <: Seq[I]](testData: U, sigma: Int) = {
val posterior = predictiveDistribution(testData)
val mean = toStream(posterior.underlyingDist.mean)
val (lower, upper) = posterior.underlyingDist.confidenceInterval(sigma.toDouble)
val lowerErrorBars = toStream(lower)
val upperErrorBars = toStream(upper)
println("Generating error bars")
val preds = mean.zip(lowerErrorBars.zip(upperErrorBars)).map(t => (t._1, t._2._1, t._2._2))
(testData zip preds).map(i => (i._1, i._2._1, i._2._2, i._2._3))
}
} | transcendent-ai-labs/DynaML | dynaml-core/src/main/scala/io/github/mandar2812/dynaml/models/StochasticProcessModel.scala | Scala | apache-2.0 | 10,725 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.File
import java.net.URI
import java.nio.file.Files
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import org.apache.spark.{SparkConf, SparkUserAppException}
import org.apache.spark.api.python.{Py4JServer, PythonUtils}
import org.apache.spark.internal.config._
import org.apache.spark.util.{RedirectThread, Utils}
/**
* A main class used to launch Python applications. It executes python as a
* subprocess and then has it connect back to the JVM to access system properties, etc.
*/
object PythonRunner {
def main(args: Array[String]): Unit = {
val pythonFile = args(0)
val pyFiles = args(1)
val otherArgs = args.slice(2, args.length)
val sparkConf = new SparkConf()
val pythonExec = sparkConf.get(PYSPARK_DRIVER_PYTHON)
.orElse(sparkConf.get(PYSPARK_PYTHON))
.orElse(sys.env.get("PYSPARK_DRIVER_PYTHON"))
.orElse(sys.env.get("PYSPARK_PYTHON"))
.getOrElse("python")
// Format python file paths before adding them to the PYTHONPATH
val formattedPythonFile = formatPath(pythonFile)
val formattedPyFiles = resolvePyFiles(formatPaths(pyFiles))
val gatewayServer = new Py4JServer(sparkConf)
val thread = new Thread(() => Utils.logUncaughtExceptions { gatewayServer.start() })
thread.setName("py4j-gateway-init")
thread.setDaemon(true)
thread.start()
// Wait until the gateway server has started, so that we know which port is it bound to.
// `gatewayServer.start()` will start a new thread and run the server code there, after
// initializing the socket, so the thread started above will end as soon as the server is
// ready to serve connections.
thread.join()
// Build up a PYTHONPATH that includes the Spark assembly (where this class is), the
// python directories in SPARK_HOME (if set), and any files in the pyFiles argument
val pathElements = new ArrayBuffer[String]
pathElements ++= formattedPyFiles
pathElements += PythonUtils.sparkPythonPath
pathElements += sys.env.getOrElse("PYTHONPATH", "")
val pythonPath = PythonUtils.mergePythonPaths(pathElements.toSeq: _*)
// Launch Python process
val builder = new ProcessBuilder((Seq(pythonExec, formattedPythonFile) ++ otherArgs).asJava)
val env = builder.environment()
env.put("PYTHONPATH", pythonPath)
// This is equivalent to setting the -u flag; we use it because ipython doesn't support -u:
env.put("PYTHONUNBUFFERED", "YES") // value is needed to be set to a non-empty string
env.put("PYSPARK_GATEWAY_PORT", "" + gatewayServer.getListeningPort)
env.put("PYSPARK_GATEWAY_SECRET", gatewayServer.secret)
// pass conf spark.pyspark.python to python process, the only way to pass info to
// python process is through environment variable.
sparkConf.get(PYSPARK_PYTHON).foreach(env.put("PYSPARK_PYTHON", _))
sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _))
// if OMP_NUM_THREADS is not explicitly set, override it with the number of cores
if (sparkConf.getOption("spark.yarn.appMasterEnv.OMP_NUM_THREADS").isEmpty &&
sparkConf.getOption("spark.mesos.driverEnv.OMP_NUM_THREADS").isEmpty &&
sparkConf.getOption("spark.kubernetes.driverEnv.OMP_NUM_THREADS").isEmpty) {
// SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to the driver
// this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool
// see https://github.com/numpy/numpy/issues/10455
sparkConf.getOption("spark.driver.cores").foreach(env.put("OMP_NUM_THREADS", _))
}
builder.redirectErrorStream(true) // Ugly but needed for stdout and stderr to synchronize
try {
val process = builder.start()
new RedirectThread(process.getInputStream, System.out, "redirect output").start()
val exitCode = process.waitFor()
if (exitCode != 0) {
throw new SparkUserAppException(exitCode)
}
} finally {
gatewayServer.shutdown()
}
}
/**
* Format the python file path so that it can be added to the PYTHONPATH correctly.
*
* Python does not understand URI schemes in paths. Before adding python files to the
* PYTHONPATH, we need to extract the path from the URI. This is safe to do because we
* currently only support local python files.
*/
def formatPath(path: String, testWindows: Boolean = false): String = {
if (Utils.nonLocalPaths(path, testWindows).nonEmpty) {
throw new IllegalArgumentException("Launching Python applications through " +
s"spark-submit is currently only supported for local files: $path")
}
// get path when scheme is file.
val uri = Try(new URI(path)).getOrElse(new File(path).toURI)
var formattedPath = uri.getScheme match {
case null => path
case "file" | "local" => uri.getPath
case _ => null
}
// Guard against malformed paths potentially throwing NPE
if (formattedPath == null) {
throw new IllegalArgumentException(s"Python file path is malformed: $path")
}
// In Windows, the drive should not be prefixed with "/"
// For instance, python does not understand "/C:/path/to/sheep.py"
if (Utils.isWindows && formattedPath.matches("/[a-zA-Z]:/.*")) {
formattedPath = formattedPath.stripPrefix("/")
}
formattedPath
}
/**
* Format each python file path in the comma-delimited list of paths, so it can be
* added to the PYTHONPATH correctly.
*/
def formatPaths(paths: String, testWindows: Boolean = false): Array[String] = {
Option(paths).getOrElse("")
.split(",")
.filter(_.nonEmpty)
.map { p => formatPath(p, testWindows) }
}
/**
* Resolves the ".py" files. ".py" file should not be added as is because PYTHONPATH does
* not expect a file. This method creates a temporary directory and puts the ".py" files
* if exist in the given paths.
*/
private def resolvePyFiles(pyFiles: Array[String]): Array[String] = {
lazy val dest = Utils.createTempDir(namePrefix = "localPyFiles")
pyFiles.flatMap { pyFile =>
// In case of client with submit, the python paths should be set before context
// initialization because the context initialization can be done later.
// We will copy the local ".py" files because ".py" file shouldn't be added
// alone but its parent directory in PYTHONPATH. See SPARK-24384.
if (pyFile.endsWith(".py")) {
val source = new File(pyFile)
if (source.exists() && source.isFile && source.canRead) {
Files.copy(source.toPath, new File(dest, source.getName).toPath)
Some(dest.getAbsolutePath)
} else {
// Don't have to add it if it doesn't exist or isn't readable.
None
}
} else {
Some(pyFile)
}
}.distinct
}
}
| dbtsai/spark | core/src/main/scala/org/apache/spark/deploy/PythonRunner.scala | Scala | apache-2.0 | 7,749 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.{File, IOException}
import java.text.SimpleDateFormat
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.sql.execution.command.{AlterPartitionModel, DataMapField, Field, PartitionerField}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datamap.Segment
import org.apache.carbondata.core.datastore.block.{SegmentProperties, TableBlockInfo}
import org.apache.carbondata.core.datastore.filesystem.CarbonFile
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier, SegmentFileStore}
import org.apache.carbondata.core.metadata.schema.PartitionInfo
import org.apache.carbondata.core.metadata.schema.partition.PartitionType
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.mutate.CarbonUpdateUtil
import org.apache.carbondata.core.readcommitter.TableStatusReadCommittedScope
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.hadoop.CarbonInputSplit
import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
import org.apache.carbondata.spark.util.CommonUtil
object PartitionUtils {
def getListInfo(originListInfo: String): List[List[String]] = {
var listInfo = ListBuffer[List[String]]()
var templist = ListBuffer[String]()
val arr = originListInfo.split(",")
.map(_.trim())
var groupEnd = true
val iter = arr.iterator
while (iter.hasNext) {
val value = iter.next()
if (value.startsWith("(")) {
templist += value.replace("(", "").trim()
groupEnd = false
} else if (value.endsWith(")")) {
templist += value.replace(")", "").trim()
listInfo += templist.toList
templist.clear()
groupEnd = true
} else {
if (groupEnd) {
templist += value
listInfo += templist.toList
templist.clear()
} else {
templist += value
}
}
}
listInfo.toList
}
/**
* verify the add/split information and update the partitionInfo:
* 1. update rangeInfo/listInfo
* 2. update partitionIds
*/
def updatePartitionInfo(partitionInfo: PartitionInfo, partitionIdList: List[Int],
partitionId: Int, splitInfo: List[String], timestampFormatter: SimpleDateFormat,
dateFormatter: SimpleDateFormat): Unit = {
val columnDataType = partitionInfo.getColumnSchemaList.get(0).getDataType
val index = partitionIdList.indexOf(partitionId)
if (index < 0) {
throw new IllegalArgumentException("Invalid Partition Id " + partitionId +
"\\n Use show partitions table_name to get the list of valid partitions")
}
if (partitionInfo.getPartitionType == PartitionType.RANGE) {
val rangeInfo = partitionInfo.getRangeInfo.asScala.toList
val newRangeInfo = partitionId match {
case 0 => rangeInfo ++ splitInfo
case _ => rangeInfo.take(index - 1) ++ splitInfo ++
rangeInfo.takeRight(rangeInfo.size - index)
}
CommonUtil.validateRangeInfo(newRangeInfo, columnDataType,
timestampFormatter, dateFormatter)
partitionInfo.setRangeInfo(newRangeInfo.asJava)
} else if (partitionInfo.getPartitionType == PartitionType.LIST) {
val originList = partitionInfo.getListInfo.asScala.map(_.asScala.toList).toList
if (partitionId != 0) {
val targetListInfo = partitionInfo.getListInfo.get(index - 1)
CommonUtil.validateSplitListInfo(targetListInfo.asScala.toList, splitInfo, originList)
} else {
CommonUtil.validateAddListInfo(splitInfo, originList)
}
val addListInfo = PartitionUtils.getListInfo(splitInfo.mkString(","))
val newListInfo = partitionId match {
case 0 => originList ++ addListInfo
case _ => originList.take(index - 1) ++ addListInfo ++
originList.takeRight(originList.size - index)
}
partitionInfo.setListInfo(newListInfo.map(_.asJava).asJava)
}
if (partitionId == 0) {
partitionInfo.addPartition(splitInfo.size)
} else {
partitionInfo.splitPartition(index, splitInfo.size)
}
}
/**
* Used for alter table partition commands to get segmentProperties in spark node
* @param identifier
* @param segmentId
* @param oldPartitionIdList Task id group before partition info is changed
* @return
*/
def getSegmentProperties(identifier: AbsoluteTableIdentifier, segmentId: String,
partitionIds: List[String], oldPartitionIdList: List[Int],
partitionInfo: PartitionInfo,
carbonTable: CarbonTable): SegmentProperties = {
val tableBlockInfoList =
getPartitionBlockList(
identifier,
segmentId,
partitionIds,
oldPartitionIdList,
partitionInfo,
carbonTable)
val footer = CarbonUtil.readMetadatFile(tableBlockInfoList.get(0))
val segmentProperties = new SegmentProperties(footer.getColumnInTable,
footer.getSegmentInfo.getColumnCardinality)
segmentProperties
}
def getPartitionBlockList(identifier: AbsoluteTableIdentifier, segmentId: String,
partitionIds: List[String], oldPartitionIdList: List[Int],
partitionInfo: PartitionInfo,
carbonTable: CarbonTable): java.util.List[TableBlockInfo] = {
val jobConf = new JobConf(FileFactory.getConfiguration)
val job = new Job(jobConf)
val format = CarbonInputFormatUtil
.createCarbonTableInputFormat(identifier, partitionIds.asJava, job)
CarbonInputFormat.setTableInfo(job.getConfiguration, carbonTable.getTableInfo)
val splits = format.getSplitsOfOneSegment(job, segmentId,
oldPartitionIdList.map(_.asInstanceOf[Integer]).asJava, partitionInfo)
val blockList = splits.asScala.map(_.asInstanceOf[CarbonInputSplit])
val tableBlockInfoList = CarbonInputSplit.createBlocks(blockList.asJava)
tableBlockInfoList
}
@throws(classOf[IOException])
def deleteOriginalCarbonFile(alterPartitionModel: AlterPartitionModel,
identifier: AbsoluteTableIdentifier,
partitionIds: List[String], dbName: String, tableName: String,
partitionInfo: PartitionInfo): Unit = {
val carbonLoadModel = alterPartitionModel.carbonLoadModel
val segmentId = alterPartitionModel.segmentId
val oldPartitionIds = alterPartitionModel.oldPartitionIds
val newTime = carbonLoadModel.getFactTimeStamp
val tablePath = carbonLoadModel.getTablePath
val tableBlockInfoList =
getPartitionBlockList(identifier, segmentId, partitionIds, oldPartitionIds,
partitionInfo, carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable).asScala
val pathList: util.List[String] = new util.ArrayList[String]()
tableBlockInfoList.foreach{ tableBlockInfo =>
val path = tableBlockInfo.getFilePath
val timestamp = CarbonTablePath.DataFileUtil.getTimeStampFromFileName(path)
if (timestamp.toLong != newTime) {
// add carbondata file
pathList.add(path)
// add index file
val version = tableBlockInfo.getVersion
val taskNo = CarbonTablePath.DataFileUtil.getTaskNo(path)
val batchNo = CarbonTablePath.DataFileUtil.getBatchNoFromTaskNo(taskNo)
val taskId = CarbonTablePath.DataFileUtil.getTaskIdFromTaskNo(taskNo)
val bucketNumber = CarbonTablePath.DataFileUtil.getBucketNo(path)
val indexFilePath =
new Path(new Path(path).getParent,
CarbonTablePath.getCarbonIndexFileName(taskId,
bucketNumber.toInt,
batchNo,
timestamp,
segmentId)).toString
// indexFilePath could be duplicated when multiple data file related to one index file
if (indexFilePath != null && !pathList.contains(indexFilePath)) {
pathList.add(indexFilePath)
}
}
}
val files: util.List[File] = new util.ArrayList[File]()
for (path <- pathList.asScala) {
val file = new File(path)
files.add(file)
}
CarbonUtil.deleteFiles(files.asScala.toArray)
if (!files.isEmpty) {
val carbonTable = alterPartitionModel.carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val updatedSegFile: String = mergeAndUpdateSegmentFile(alterPartitionModel,
identifier,
segmentId,
carbonTable,
files.asScala)
val segmentFiles = Seq(new Segment(alterPartitionModel.segmentId, updatedSegFile, null))
.asJava
if (!CarbonUpdateUtil.updateTableMetadataStatus(
new util.HashSet[Segment](Seq(new Segment(alterPartitionModel.segmentId,
null, null)).asJava),
carbonTable,
alterPartitionModel.carbonLoadModel.getFactTimeStamp.toString,
true,
new util.ArrayList[Segment](0),
new util.ArrayList[Segment](segmentFiles), "")) {
throw new IOException("Data update failed due to failure in table status updation.")
}
}
}
/**
* Used to extract PartitionerFields for aggregation datamaps.
* This method will keep generating partitionerFields until the sequence of
* partition column is broken.
*
* For example: if x,y,z are partition columns in main table then child tables will be
* partitioned only if the child table has List("x,y,z", "x,y", "x") as the projection columns.
*
*
*/
def getPartitionerFields(allPartitionColumn: Seq[String],
fieldRelations: mutable.LinkedHashMap[Field, DataMapField]): Seq[PartitionerField] = {
def generatePartitionerField(partitionColumn: List[String],
partitionerFields: Seq[PartitionerField]): Seq[PartitionerField] = {
partitionColumn match {
case head :: tail =>
// Collect the first relation which matched the condition
val validRelation = fieldRelations.zipWithIndex.collectFirst {
case ((field, dataMapField), index) if
dataMapField.columnTableRelationList.getOrElse(Seq()).nonEmpty &&
head.equals(dataMapField.columnTableRelationList.get.head.parentColumnName) &&
dataMapField.aggregateFunction.isEmpty =>
(PartitionerField(field.name.get,
field.dataType,
field.columnComment), allPartitionColumn.indexOf(head))
}
if (validRelation.isDefined) {
val (partitionerField, index) = validRelation.get
// if relation is found then check if the partitionerFields already found are equal
// to the index of this element.
// If x with index 1 is found then there should be exactly 1 element already found.
// If z with index 2 comes directly after x then this check will be false are 1
// element is skipped in between and index would be 2 and number of elements found
// would be 1. In that case return empty sequence so that the aggregate table is not
// partitioned on any column.
if (index == partitionerFields.length) {
generatePartitionerField(tail, partitionerFields :+ partitionerField)
} else {
Seq.empty
}
} else {
// if not found then countinue search for the rest of the elements. Because the rest
// of the elements can also decide if the table has to be partitioned or not.
generatePartitionerField(tail, partitionerFields)
}
case Nil =>
// if end of list then return fields.
partitionerFields
}
}
generatePartitionerField(allPartitionColumn.toList, Seq.empty)
}
private def mergeAndUpdateSegmentFile(alterPartitionModel: AlterPartitionModel,
identifier: AbsoluteTableIdentifier,
segmentId: String,
carbonTable: CarbonTable, filesToBeDelete: Seq[File]) = {
val metadataDetails =
SegmentStatusManager.readTableStatusFile(
CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath))
val segmentFile =
metadataDetails.find(_.getLoadName.equals(segmentId)).get.getSegmentFile
var allSegmentFiles: Seq[CarbonFile] = Seq.empty[CarbonFile]
val file = SegmentFileStore.writeSegmentFile(
carbonTable,
alterPartitionModel.segmentId,
System.currentTimeMillis().toString)
if (segmentFile != null) {
allSegmentFiles ++= FileFactory.getCarbonFile(
SegmentFileStore.getSegmentFilePath(carbonTable.getTablePath, segmentFile)) :: Nil
}
val updatedSegFile = {
val carbonFile = FileFactory.getCarbonFile(
SegmentFileStore.getSegmentFilePath(carbonTable.getTablePath, file))
allSegmentFiles ++= carbonFile :: Nil
val mergedSegFileName = SegmentFileStore.genSegmentFileName(
segmentId,
alterPartitionModel.carbonLoadModel.getFactTimeStamp.toString)
val tmpFile = mergedSegFileName + "_tmp"
val segmentStoreFile = SegmentFileStore.mergeSegmentFiles(
tmpFile,
CarbonTablePath.getSegmentFilesLocation(carbonTable.getTablePath),
allSegmentFiles.toArray)
val indexFiles = segmentStoreFile.getLocationMap.values().asScala.head.getFiles
filesToBeDelete.foreach(f => indexFiles.remove(f.getName))
SegmentFileStore.writeSegmentFile(
segmentStoreFile,
CarbonTablePath.getSegmentFilesLocation(carbonTable.getTablePath) +
CarbonCommonConstants.FILE_SEPARATOR + mergedSegFileName + CarbonTablePath.SEGMENT_EXT)
carbonFile.delete()
FileFactory.getCarbonFile(
SegmentFileStore.getSegmentFilePath(
carbonTable.getTablePath, tmpFile + CarbonTablePath.SEGMENT_EXT)).delete()
mergedSegFileName + CarbonTablePath.SEGMENT_EXT
}
updatedSegFile
}
}
| sgururajshetty/carbondata | integration/spark-common/src/main/scala/org/apache/spark/util/PartitionUtils.scala | Scala | apache-2.0 | 15,107 |
package org.scalatra
import java.security.SecureRandom
object GenerateId {
def apply(): String = {
generateCsrfToken()
}
private def hexEncode(bytes: Array[Byte]) = ((new StringBuilder(bytes.length * 2) /: bytes) { (sb, b) =>
if((b.toInt & 0xff) < 0x10) sb.append("0")
sb.append(Integer.toString(b.toInt & 0xff, 16))
}).toString
protected def generateCsrfToken() = {
val tokenVal = new Array[Byte](20)
(new SecureRandom).nextBytes(tokenVal)
hexEncode(tokenVal)
}
@deprecated("Use generateCsrfToken()")
protected def generateCSRFToken() = generateCsrfToken()
}
object CsrfTokenSupport {
val DefaultKey = "org.scalatra.CsrfTokenSupport.key".intern
}
/**
* Provides cross-site request forgery protection.
*
* Adds a before filter. If a request is determined to be forged, the
* `handleForgery()` hook is invoked. Otherwise, a token for the next
* request is prepared with `prepareCsrfToken`.
*/
trait CsrfTokenSupport { self: ScalatraKernel =>
/**
* The key used to store the token on the session, as well as the parameter
* of the request.
*/
protected def csrfKey: String = CsrfTokenSupport.DefaultKey
/**
* Returns the token from the session.
*/
protected def csrfToken: String = session(csrfKey).asInstanceOf[String]
before() {
if (isForged) {
handleForgery()
}
prepareCsrfToken()
}
/**
* Tests whether a request with a unsafe method is a potential cross-site
* forgery.
*
* @return true if the request is an unsafe method (POST, PUT, DELETE, TRACE,
* CONNECT, PATCH) and the request parameter at `csrfKey` does not match
* the session key of the same name.
*/
protected def isForged: Boolean = {
request.isWrite && session.get(csrfKey) != params.get(csrfKey)
}
/**
* Take an action when a forgery is detected. The default action
* halts further request processing and returns a 403 HTTP status code.
*/
protected def handleForgery() {
halt(403, "Request tampering detected!")
}
/**
* Prepares a CSRF token. The default implementation uses `GenerateId`
* and stores it on the session.
*/
protected def prepareCsrfToken() = {
session.getOrElseUpdate(csrfKey, GenerateId())
}
@deprecated("Use prepareCsrfToken()")
protected def prepareCSRFToken() = prepareCsrfToken()
}
| kuochaoyi/scalatra | core/src/main/scala/org/scalatra/CsrfTokenSupport.scala | Scala | bsd-2-clause | 2,354 |
package sttp.client3
import sttp.capabilities.{Effect, Streams, WebSockets}
import sttp.client3.internal.{SttpFile, _}
import sttp.model._
import sttp.ws.{WebSocket, WebSocketFrame}
import java.io.InputStream
import java.nio.ByteBuffer
import scala.collection.immutable.Seq
import scala.concurrent.duration._
trait SttpApi extends SttpExtensions with UriInterpolator {
val DefaultReadTimeout: Duration = 1.minute
/** An empty request with no headers.
*
* Reads the response body as an `Either[String, String]`, where `Left` is used if the status code is non-2xx, and
* `Right` otherwise.
*/
val emptyRequest: RequestT[Empty, Either[String, String], Any] =
RequestT[Empty, Either[String, String], Any](
None,
None,
NoBody,
Vector(),
asString,
RequestOptions(
followRedirects = true,
DefaultReadTimeout,
FollowRedirectsBackend.MaxRedirects,
redirectToGet = false
),
Map()
)
/** A starting request, with the following modification comparing to `emptyRequest`: `Accept-Encoding` is set to
* `gzip, deflate` (compression/decompression is handled automatically by the library).
*
* Reads the response body as an `Either[String, String]`, where `Left` is used if the status code is non-2xx, and
* `Right` otherwise.
*/
val basicRequest: RequestT[Empty, Either[String, String], Any] =
emptyRequest.acceptEncoding("gzip, deflate")
/** A starting request which always reads the response body as a string, regardless of the status code.
*/
val quickRequest: RequestT[Empty, String, Any] = basicRequest.response(asStringAlways)
// response specifications
def ignore: ResponseAs[Unit, Any] = IgnoreResponse
/** Use the `utf-8` charset by default, unless specified otherwise in the response headers.
*/
def asString: ResponseAs[Either[String, String], Any] = asString(Utf8)
/** Use the `utf-8` charset by default, unless specified otherwise in the response headers.
*/
def asStringAlways: ResponseAs[String, Any] = asStringAlways(Utf8)
/** Use the given charset by default, unless specified otherwise in the response headers.
*/
def asString(charset: String): ResponseAs[Either[String, String], Any] =
asStringAlways(charset)
.mapWithMetadata { (s, m) =>
if (m.isSuccess) Right(s) else Left(s)
}
.showAs("either(as string, as string)")
def asStringAlways(charset: String): ResponseAs[String, Any] =
asByteArrayAlways
.mapWithMetadata { (bytes, metadata) =>
val charset2 = metadata.contentType.flatMap(charsetFromContentType).getOrElse(charset)
val charset3 = sanitizeCharset(charset2)
new String(bytes, charset3)
}
.showAs("as string")
def asByteArray: ResponseAs[Either[String, Array[Byte]], Any] = asEither(asStringAlways, asByteArrayAlways)
def asByteArrayAlways: ResponseAs[Array[Byte], Any] = ResponseAsByteArray
/** Use the `utf-8` charset by default, unless specified otherwise in the response headers.
*/
def asParams: ResponseAs[Either[String, Seq[(String, String)]], Any] = asParams(Utf8)
/** Use the `utf-8` charset by default, unless specified otherwise in the response headers.
*/
def asParamsAlways: ResponseAs[Seq[(String, String)], Any] = asParamsAlways(Utf8)
/** Use the given charset by default, unless specified otherwise in the response headers.
*/
def asParams(charset: String): ResponseAs[Either[String, Seq[(String, String)]], Any] = {
asEither(asStringAlways, asParamsAlways(charset)).showAs("either(as string, as params)")
}
/** Use the given charset by default, unless specified otherwise in the response headers.
*/
def asParamsAlways(charset: String): ResponseAs[Seq[(String, String)], Any] = {
val charset2 = sanitizeCharset(charset)
asStringAlways(charset2).map(ResponseAs.parseParams(_, charset2)).showAs("as params")
}
def asStream[F[_], T, S](s: Streams[S])(f: s.BinaryStream => F[T]): ResponseAs[Either[String, T], Effect[F] with S] =
asEither(asStringAlways, asStreamAlways(s)(f))
def asStreamWithMetadata[F[_], T, S](s: Streams[S])(
f: (s.BinaryStream, ResponseMetadata) => F[T]
): ResponseAs[Either[String, T], Effect[F] with S] =
asEither(asStringAlways, asStreamAlwaysWithMetadata(s)(f))
def asStreamAlways[F[_], T, S](s: Streams[S])(f: s.BinaryStream => F[T]): ResponseAs[T, Effect[F] with S] =
asStreamAlwaysWithMetadata(s)((s, _) => f(s))
def asStreamAlwaysWithMetadata[F[_], T, S](s: Streams[S])(
f: (s.BinaryStream, ResponseMetadata) => F[T]
): ResponseAs[T, Effect[F] with S] = ResponseAsStream(s)(f)
def asStreamUnsafe[S](s: Streams[S]): ResponseAs[Either[String, s.BinaryStream], S] =
asEither(asStringAlways, asStreamAlwaysUnsafe(s))
def asStreamAlwaysUnsafe[S](s: Streams[S]): ResponseAs[s.BinaryStream, S] = ResponseAsStreamUnsafe(s)
private[client3] def asSttpFile(file: SttpFile): ResponseAs[SttpFile, Any] =
ResponseAsFile(file)
def asWebSocket[F[_], T](f: WebSocket[F] => F[T]): ResponseAs[Either[String, T], Effect[F] with WebSockets] =
asWebSocketEither(asStringAlways, asWebSocketAlways(f))
def asWebSocketWithMetadata[F[_], T](
f: (WebSocket[F], ResponseMetadata) => F[T]
): ResponseAs[Either[String, T], Effect[F] with WebSockets] =
asWebSocketEither(asStringAlways, asWebSocketAlwaysWithMetadata(f))
def asWebSocketAlways[F[_], T](f: WebSocket[F] => F[T]): ResponseAs[T, Effect[F] with WebSockets] =
asWebSocketAlwaysWithMetadata((w, _) => f(w))
def asWebSocketAlwaysWithMetadata[F[_], T](
f: (WebSocket[F], ResponseMetadata) => F[T]
): ResponseAs[T, Effect[F] with WebSockets] =
ResponseAsWebSocket(f)
def asWebSocketUnsafe[F[_]]: ResponseAs[Either[String, WebSocket[F]], Effect[F] with WebSockets] =
asWebSocketEither(asStringAlways, asWebSocketAlwaysUnsafe)
def asWebSocketAlwaysUnsafe[F[_]]: ResponseAs[WebSocket[F], Effect[F] with WebSockets] = ResponseAsWebSocketUnsafe()
def asWebSocketStream[S](
s: Streams[S]
)(p: s.Pipe[WebSocketFrame.Data[_], WebSocketFrame]): ResponseAs[Either[String, Unit], S with WebSockets] =
asWebSocketEither(asStringAlways, asWebSocketStreamAlways(s)(p))
def asWebSocketStreamAlways[S](s: Streams[S])(
p: s.Pipe[WebSocketFrame.Data[_], WebSocketFrame]
): ResponseAs[Unit, S with WebSockets] = ResponseAsWebSocketStream(s, p)
def fromMetadata[T, R](default: ResponseAs[T, R], conditions: ConditionalResponseAs[T, R]*): ResponseAs[T, R] =
ResponseAsFromMetadata(conditions.toList, default)
/** Uses the `onSuccess` response specification for successful responses (2xx), and the `onError` specification
* otherwise.
*/
def asEither[A, B, R](onError: ResponseAs[A, R], onSuccess: ResponseAs[B, R]): ResponseAs[Either[A, B], R] =
fromMetadata(onError.map(Left(_)), ConditionalResponseAs(_.isSuccess, onSuccess.map(Right(_))))
.showAs(s"either(${onError.show}, ${onSuccess.show})")
/** Uses the `onSuccess` response specification for 101 responses (switching protocols), and the `onError`
* specification otherwise.
*/
def asWebSocketEither[A, B, R](onError: ResponseAs[A, R], onSuccess: ResponseAs[B, R]): ResponseAs[Either[A, B], R] =
fromMetadata(
onError.map(Left(_)),
ConditionalResponseAs(_.code == StatusCode.SwitchingProtocols, onSuccess.map(Right(_)))
).showAs(s"either(${onError.show}, ${onSuccess.show})")
/** Use both `l` and `r` to read the response body. Neither response specifications may use streaming or web sockets.
*/
def asBoth[A, B](l: ResponseAs[A, Any], r: ResponseAs[B, Any]): ResponseAs[(A, B), Any] =
asBothOption(l, r)
.map { case (a, bo) =>
// since l has no requirements, we know that the body will be replayable
(a, bo.get)
}
.showAs(s"(${l.show}, ${r.show})")
/** Use `l` to read the response body. If the raw body value which is used by `l` is replayable (a file or byte
* array), also use `r` to read the response body. Otherwise ignore `r` (if the raw body is a stream or a web
* socket).
*/
def asBothOption[A, B, R](l: ResponseAs[A, R], r: ResponseAs[B, Any]): ResponseAs[(A, Option[B]), R] =
ResponseAsBoth(l, r)
// multipart factory methods
/** Content type will be set to `text/plain` with `utf-8` encoding, can be overridden later using the `contentType`
* method.
*/
def multipart(name: String, data: String): Part[BasicRequestBody] =
Part(name, StringBody(data, Utf8), contentType = Some(MediaType.TextPlainUtf8))
/** Content type will be set to `text/plain` with given encoding, can be overridden later using the `contentType`
* method.
*/
def multipart(name: String, data: String, encoding: String): Part[BasicRequestBody] = {
Part(name, StringBody(data, encoding), contentType = Some(MediaType.TextPlain.charset(encoding)))
}
/** Content type will be set to `application/octet-stream`, can be overridden later using the `contentType` method.
*/
def multipart(name: String, data: Array[Byte]): Part[BasicRequestBody] =
Part(name, ByteArrayBody(data), contentType = Some(MediaType.ApplicationOctetStream))
/** Content type will be set to `application/octet-stream`, can be overridden later using the `contentType` method.
*/
def multipart(name: String, data: ByteBuffer): Part[BasicRequestBody] =
Part(name, ByteBufferBody(data), contentType = Some(MediaType.ApplicationOctetStream))
/** Content type will be set to `application/octet-stream`, can be overridden later using the `contentType` method.
*/
def multipart(name: String, data: InputStream): Part[BasicRequestBody] =
Part(name, InputStreamBody(data), contentType = Some(MediaType.ApplicationOctetStream))
/** Content type will be set to `application/octet-stream`, can be overridden later using the `contentType` method.
*
* File name will be set to the name of the file.
*/
private[client3] def multipartSttpFile(name: String, file: SttpFile): Part[BasicRequestBody] =
Part(name, FileBody(file), fileName = Some(file.name), contentType = Some(MediaType.ApplicationOctetStream))
/** Encodes the given parameters as form data using `utf-8`.
*
* Content type will be set to `application/x-www-form-urlencoded`, can be overridden later using the `contentType`
* method.
*/
def multipart(name: String, fs: Map[String, String]): Part[BasicRequestBody] =
Part(
name,
RequestBody.paramsToStringBody(fs.toList, Utf8),
contentType = Some(MediaType.ApplicationXWwwFormUrlencoded)
)
/** Encodes the given parameters as form data.
*
* Content type will be set to `application/x-www-form-urlencoded`, can be overridden later using the `contentType`
* method.
*/
def multipart(name: String, fs: Map[String, String], encoding: String): Part[BasicRequestBody] =
Part(
name,
RequestBody.paramsToStringBody(fs.toList, encoding),
contentType = Some(MediaType.ApplicationXWwwFormUrlencoded)
)
/** Encodes the given parameters as form data using `utf-8`.
*
* Content type will be set to `application/x-www-form-urlencoded`, can be overridden later using the `contentType`
* method.
*/
def multipart(name: String, fs: Seq[(String, String)]): Part[BasicRequestBody] =
Part(name, RequestBody.paramsToStringBody(fs, Utf8), contentType = Some(MediaType.ApplicationXWwwFormUrlencoded))
/** Encodes the given parameters as form data.
*
* Content type will be set to `application/x-www-form-urlencoded`, can be overridden later using the `contentType`
* method.
*/
def multipart(name: String, fs: Seq[(String, String)], encoding: String): Part[BasicRequestBody] =
Part(
name,
RequestBody.paramsToStringBody(fs, encoding),
contentType = Some(MediaType.ApplicationXWwwFormUrlencoded)
)
/** Content type will be set to `application/octet-stream`, can be overridden later using the `contentType` method.
*/
def multipart[B: BodySerializer](name: String, b: B): Part[BasicRequestBody] =
Part(name, implicitly[BodySerializer[B]].apply(b), contentType = Some(MediaType.ApplicationXWwwFormUrlencoded))
/** Content type will be set to `application/octet-stream`, can be overridden later using the `contentType` method.
*/
def multipartStream[S](s: Streams[S])(name: String, b: s.BinaryStream): Part[RequestBody[S]] =
Part(
name,
StreamBody(s)(b),
contentType = Some(MediaType.ApplicationOctetStream)
)
}
| softwaremill/sttp | core/src/main/scala/sttp/client3/SttpApi.scala | Scala | apache-2.0 | 12,648 |
package connectors
import exceptions.ServerNotRespondingException
import helpers.TestSpec
import org.mockito.Mockito._
import org.slf4j.Logger
import org.spongepowered.api.{Game, Server}
import scala.util.{Failure, Success}
class GameConnectorSpec extends TestSpec {
val mockServer = mock[Server]
def setupMockConnector(returnServer: Boolean): GameConnector = {
val mockLogger = mock[Logger]
val mockGame = mock[Game]
if (returnServer) {
when(mockGame.getServer)
.thenReturn(mockServer)
} else when(mockGame.getServer)
.thenThrow(new IllegalStateException)
new GameConnector(mockGame, mockLogger)
}
"Calling .fetchServer" should {
"return a Success containing the server" in {
val connector = setupMockConnector(returnServer = true)
val result = connector.fetchServer()
result shouldBe Success(mockServer)
}
"handle an exception with the correct response" in {
val connector = setupMockConnector(returnServer = false)
val result = connector.fetchServer()
result shouldBe Failure(ServerNotRespondingException())
}
}
}
| jameshforster/ToL-Scout-Plugin | src/test/scala/connectors/GameConnectorSpec.scala | Scala | apache-2.0 | 1,132 |
package tu.knowledge.semanticnetwork
/**
* @author talanov max
* date 2012-05-03
* time: 11:23 PM
*/
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import tu.model.knowledge.{Probability, KnowledgeURI}
import tu.model.knowledge.primitive.KnowledgeString
import tu.model.knowledge.semanticnetwork.{SemanticNetworkNode, SemanticNetworkLink}
@RunWith(classOf[JUnitRunner])
class SemanticNetworkNodeTest extends FunSuite {
val namespace = "testNamespace"
val name = "name"
val revision = "rev"
val uri = new KnowledgeURI(namespace, name, revision)
val probability = new Probability
val sourceContent = "Source"
val destinationContent = "Dest"
test("test Ok") {
assert(condition = true)
}
test("SemanticNetworkNode should store KnowledgeString") {
val s: SemanticNetworkNode[KnowledgeString] = new SemanticNetworkNode[KnowledgeString](new KnowledgeString(sourceContent, uri), List[SemanticNetworkLink](), uri)
expect(s.content.toString)(sourceContent)
}
}
| keskival/2 | model.knowledge/src/test/scala/tu/knowledge/semanticnetwork/SemanticNetworkNodeTest.scala | Scala | gpl-3.0 | 1,066 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.cli.test
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import spray.json._
import common.rest.WskRestOperations
import common.rest.RestResult
import common.TestUtils.RunResult
import common.WskActorSystem
@RunWith(classOf[JUnitRunner])
class WskRestActionSequenceTests extends WskActionSequenceTests with WskActorSystem {
override lazy val wsk = new WskRestOperations
override def verifyActionSequence(action: RunResult, name: String, compValue: JsArray, kindValue: JsString): Unit = {
val actionResultRest = action.asInstanceOf[RestResult]
actionResultRest.respBody.fields("exec").asJsObject.fields("components") shouldBe compValue
actionResultRest.respBody.fields("exec").asJsObject.fields("kind") shouldBe kindValue
}
}
| starpit/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/cli/test/WskRestActionSequenceTests.scala | Scala | apache-2.0 | 1,606 |
package org.jetbrains.plugins.scala
package lang.psi.api
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.util.Key
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ImplicitArgumentsOwner.IMPLICIT_ARGS_KEY
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScArgumentExprList, ScExpression}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
/**
* Nikolay.Tropin
* 2014-10-17
*/
// TODO Implement selectively, not by ScExpression
trait ImplicitArgumentsOwner extends ScalaPsiElement {
private[psi] final def setImplicitArguments(results: Option[Seq[ScalaResolveResult]]): Unit = {
putUserData(IMPLICIT_ARGS_KEY, results.orNull)
}
//todo: get rid of side-effect-driven logic
def findImplicitArguments: Option[Seq[ScalaResolveResult]] = {
ProgressManager.checkCanceled()
updateImplicitArguments()
getUserData(IMPLICIT_ARGS_KEY).toOption
}
//calculation which may set implicit arguments as a side effect, typically computation of a type
protected def updateImplicitArguments(): Unit
def matchedParameters: Seq[(ScExpression, Parameter)] = Seq.empty
def explicitImplicitArgList: Option[ScArgumentExprList] = {
val implicitArg = matchedParameters.collectFirst {
case (arg, param) if param.isImplicit => arg
}
implicitArg.toSeq
.flatMap(_.parentsInFile.take(2)) //argument or rhs of a named argument
.filterByType[ScArgumentExprList]
.headOption
}
}
object ImplicitArgumentsOwner {
private val IMPLICIT_ARGS_KEY: Key[Seq[ScalaResolveResult]] = Key.create[Seq[ScalaResolveResult]]("scala.implicit.arguments")
def unapply(e: ImplicitArgumentsOwner): Option[Seq[ScalaResolveResult]] = e.findImplicitArguments
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/ImplicitArgumentsOwner.scala | Scala | apache-2.0 | 1,858 |
package com.overviewdocs.jobhandler.documentset
import akka.actor.{ActorRef,ActorSystem,Props}
import scala.concurrent.Future
import com.overviewdocs.akkautil.WorkerActor
import com.overviewdocs.messages.DocumentSetCommands.SortField
class SortWorker(broker: ActorRef, sortRunner: SortRunner) extends WorkerActor[SortField](broker) {
override def doWorkAsync(command: SortField, asker: ActorRef): Future[Unit] = {
sortRunner.run(command.documentSetId, command.fieldName, asker)(context.system)
}
}
object SortWorker {
def props(broker: ActorRef, sortRunner: SortRunner): Props = Props(new SortWorker(broker, sortRunner))
}
| overview/overview-server | worker/src/main/scala/com/overviewdocs/jobhandler/documentset/SortWorker.scala | Scala | agpl-3.0 | 637 |
package org.sbuild.plugins.jbake
import java.io.File
import de.tototec.sbuild._
/**
* A configuration of a JBake instance.
* There are three different flavours:
* - `[[JBakeVersion$.Packaged]]` - A JBake distribution ZIP, defined by `version` and `url` (which can be any `[[TargetRef]]`)
* - `[[JBakeVersion$.Local]]` -- A locally installed JBake, defined by a `homeDir`.
* - `[[JBakeVersion$.Classpath]]` -- JBake will completely resolved from the given classpath, which is a `[[TargetRefs]]`.
*
* Note, there are also some predefined versions available in `[[JBakeVersion$]]`.
*/
sealed trait JBakeVersion {
/** The classpath to load JBake form. */
def classpath(implicit p: Project): TargetRefs
/** The `base.zip` file, use to create an initial project layout. */
def baseZip(implicit p: Project): Option[TargetRef]
}
object JBakeVersion {
object Packaged {
/**
* Conveniently download and use a released version from `jbake.org`.
*/
def apply(version: String): JBakeVersion = Packaged(version, s"http://jbake.org/files/jbake-${version}-bin.zip")
}
/**
* A JBake distribution ZIP.
*
* @param version The JBake version this package contains.
* @param url The URL to the package. This parameter supports also any [[TargetRef.name]].
*/
case class Packaged(version: String, url: String) extends JBakeVersion {
override def classpath(implicit p: Project): TargetRefs = s"zip:file=jbake-${version}/jbake-core.jar;archive=${url}" ~ s"zip:regex=jbake-${version}/lib/.*[.][Jj][Aa][Rr];archive=${url}"
override def baseZip(implicit p: Project): Option[TargetRef] = Some(s"zip:file=jbake-${version}/base.zip;archive=${url}")
}
/**
* A locally installed (or at least unpacked) JBake.
*
* @param homeDir The installation directory of the locally installed JBake version.
*/
case class Local(homeDir: File) extends JBakeVersion {
override def classpath(implicit p: Project): TargetRefs = s"${homeDir}/jbake-core.jar" ~ s"scan:${homeDir}/lib;regex=.*\\.jar"
override def baseZip(implicit p: Project): Option[TargetRef] = Some( s"${homeDir}/base.zip")
}
/**
* The JBake runtime is completely resolved from the given `explicitClasspath`.
*
* @param explicitClasspath The classpath, from where the JBake runtime will be loaded.
* @param explicitBaseZip The ZIP file containing the initial project layout.
*/
case class Classpath(explicitClasspath: TargetRefs, explicitBaseZip: Option[TargetRef] = None) extends JBakeVersion {
override def classpath(implicit p: Project): TargetRefs = explicitClasspath
override def baseZip(implicit p: Project): Option[TargetRef] = explicitBaseZip
}
/** JBake 2.2.0 */
val JBake_2_2_0 = Packaged("2.2.0")
/** JBake 2.2.1 */
val JBake_2_2_1 = Packaged("2.2.1")
}
| SBuild-org/sbuild-jbake | org.sbuild.plugins.jbake/src/main/scala/org/sbuild/plugins/jbake/JBakeVersion.scala | Scala | apache-2.0 | 2,821 |
// Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.pantsbuild.testproject.missingdepswhitelist2
import org.pantsbuild.testproject.publish.hello.greet.Greeting
class MissingDepsWhitelist2 {
def doStuff() = Greeting.greet("weep")
} | wisechengyi/intellij-pants-plugin | testData/testprojects/intellij-integration/src/scala/org/pantsbuild/testproject/missingdepswhitelist2/MissingDepsWhitelist2.scala | Scala | apache-2.0 | 328 |
package test.pprint
import utest._
import pprint.Config.Defaults._
// Things we want to test against regressions but only run on the JVM
object JvmTests extends TestSuite{
implicit val system = akka.actor.ActorSystem()
val tests = TestSuite{
'akka {
val serverBinding = akka.http.Http(system).bind(interface = "localhost", port = 31337)
Check(serverBinding, serverBinding.toString)
val materializer = akka.stream.ActorFlowMaterializer()
Check(materializer, materializer.toString)
}
'finagle{
implicitly[pprint.PPrint[com.twitter.util.Future[Unit]]]
}
'spire {
import spire.implicits._
import spire.math._
def mean[A: Fractional](xs: A*): A = xs.reduceLeft(_ + _) / xs.size
//
val m = mean(Rational(1, 2), Rational(3, 2), Rational(0))
implicitly[pprint.PPrint[Rational]]
import spire.implicits._
import spire.math._
Check(
Interval(0, 10),
"[0, 10]"
)
}
'doobie{
import scalaz._, Scalaz._
import doobie.imports._
Check(
42.point[ConnectionIO],
"Return(42)"
)
}
'coproduct{
import shapeless.{:+:, CNil, Coproduct}
type X = Int :+: String :+: CNil
Check(
Coproduct[X](1),
"1"
)
}
}
}
| Voltir/upickle | pprint/jvm/src/test/scala/test/pprint/JvmTests.scala | Scala | mit | 1,316 |
package streams
import common._
/**
* This component implements a parser to define terrains from a
* graphical ASCII representation.
*
* When mixing in that component, a level can be defined by
* defining the field `level` in the following form:
*
* val level =
* """------
* |--ST--
* |--oo--
* |--oo--
* |------""".stripMargin
*
* - The `-` character denotes parts which are outside the terrain
* - `o` denotes fields which are part of the terrain
* - `S` denotes the start position of the block (which is also considered
inside the terrain)
* - `T` denotes the final position of the block (which is also considered
inside the terrain)
*
* In this example, the first and last lines could be omitted, and
* also the columns that consist of `-` characters only.
*/
trait StringParserTerrain extends GameDef {
/**
* A ASCII representation of the terrain. This field should remain
* abstract here.
*/
val level: String
/**
* This method returns terrain function that represents the terrain
* in `levelVector`. The vector contains parsed version of the `level`
* string. For example, the following level
*
* val level =
* """ST
* |oo
* |oo""".stripMargin
*
* is represented as
*
* Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o'))
*
* The resulting function should return `true` if the position `pos` is
* a valid position (not a '-' character) inside the terrain described
* by `levelVector`.
*/
def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = (p: Pos) => {
def withinBounds(p: Pos) = {
def inRange(value: Int, size: Int) = value >= 0 && value < size
inRange(p.row, levelVector.size) && inRange(p.col, levelVector.headOption.map(_.size).getOrElse(0))
}
withinBounds(p) && levelVector(p.row)(p.col) != '-'
}
/**
* This function should return the position of character `c` in the
* terrain described by `levelVector`. You can assume that the `c`
* appears exactly once in the terrain.
*
* Hint: you can use the functions `indexWhere` and / or `indexOf` of the
* `Vector` class
*/
def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = {
def position(row: (Vector[Char], Int)) = {
val (data, index) = row
Pos(index, data.indexWhere(_ == c))
}
def validPosition(pos: Pos) = pos.col != -1
levelVector.zipWithIndex
.map(position)
.filter(validPosition)
.head
}
private lazy val vector: Vector[Vector[Char]] =
Vector(level.replace("\\r","").split("\\n").map(str => Vector(str: _*)): _*)
lazy val terrain: Terrain = terrainFunction(vector)
lazy val startPos: Pos = findChar('S', vector)
lazy val goal: Pos = findChar('T', vector)
}
| masipauskas/coursera-scala | progfun2/streams/src/main/scala/streams/StringParserTerrain.scala | Scala | unlicense | 2,837 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.view.testutil
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.test.util.QueryTest
import org.apache.carbondata.mv.plans.modular
import org.apache.carbondata.mv.plans.modular.{ModularPlan, OneRowTable, Select}
import org.apache.carbondata.mv.plans.modular.Flags._
/**
* Provides helper methods for comparing plans.
*/
abstract class ModularPlanTest extends QueryTest with PredicateHelper {
/**
* Since attribute references are given globally unique ids during analysis,
* we must normalize them to check if two different queries are identical.
*/
protected def normalizeExprIds(plan: ModularPlan): plan.type = {
plan transformAllExpressions {
case s: ScalarSubquery =>
s.copy(exprId = ExprId(0))
case e: Exists =>
e.copy(exprId = ExprId(0))
case l: ListQuery =>
l.copy(exprId = ExprId(0))
case a: AttributeReference =>
AttributeReference(a.name, a.dataType, a.nullable)(exprId = ExprId(0))
case a: Alias =>
Alias(a.child, a.name)(exprId = ExprId(0))
case ae: AggregateExpression =>
ae.copy(resultId = ExprId(0))
}
}
/**
* Rewrite [[EqualTo]] and [[EqualNullSafe]] operator to keep order. The following cases will be
* equivalent:
* 1. (a = b), (b = a);
* 2. (a <=> b), (b <=> a).
*/
private def rewriteEqual(condition: Expression): Expression = {
condition match {
case eq@EqualTo(l: Expression, r: Expression) =>
Seq(l, r).sortBy(_.hashCode()).reduce(EqualTo)
case eq@EqualNullSafe(l: Expression, r: Expression) =>
Seq(l, r).sortBy(_.hashCode()).reduce(EqualNullSafe)
case _ => condition // Don't reorder.
}
}
//
// /** Fails the test if the two plans do not match */
// protected def comparePlans(plan1: LogicalPlan, plan2: LogicalPlan) {
// val normalized1 = normalizePlan(normalizeExprIds(plan1))
// val normalized2 = normalizePlan(normalizeExprIds(plan2))
// if (normalized1 != normalized2) {
// fail(
// s"""
// |== FAIL: Plans do not match ===
// |${sideBySide(normalized1.treeString, normalized2.treeString).mkString("\n")}
// """.stripMargin)
// }
// }
//
// /** Fails the test if the two expressions do not match */
// protected def compareExpressions(e1: Expression, e2: Expression): Unit = {
// comparePlans(Filter(e1, OneRowRelation), Filter(e2, OneRowRelation))
// }
//
// /** Fails the test if the join order in the two plans do not match */
// protected def compareJoinOrder(plan1: LogicalPlan, plan2: LogicalPlan) {
// val normalized1 = normalizePlan(normalizeExprIds(plan1))
// val normalized2 = normalizePlan(normalizeExprIds(plan2))
// if (!sameJoinPlan(normalized1, normalized2)) {
// fail(
// s"""
// |== FAIL: Plans do not match ===
// |${sideBySide(normalized1.treeString, normalized2.treeString).mkString("\n")}
// """.stripMargin)
// }
// }
//
// /** Consider symmetry for joins when comparing plans. */
// private def sameJoinPlan(plan1: LogicalPlan, plan2: LogicalPlan): Boolean = {
// (plan1, plan2) match {
// case (j1: Join, j2: Join) =>
// (sameJoinPlan(j1.left, j2.left) && sameJoinPlan(j1.right, j2.right)) ||
// (sameJoinPlan(j1.left, j2.right) && sameJoinPlan(j1.right, j2.left))
// case (p1: Project, p2: Project) =>
// p1.projectList == p2.projectList && sameJoinPlan(p1.child, p2.child)
// case _ =>
// plan1 == plan2
// }
// }
/** Fails the test if the corresponding pairs of plans do not match */
protected def comparePlanCollections(planSet1: Seq[String], planSet2: Seq[String]) {
for ((plan1, plan2) <- planSet1 zip planSet2) {
compareMessages(plan1, plan2)
}
}
/** Fails the test if the two plans do not match */
/** Only expressionIds are normalized. This is enough for our test cases */
/** For more general normalization, see Spark PlanTest.scala for Logical Plan */
protected def comparePlans(plan1: ModularPlan, plan2: ModularPlan) {
val normalized1 = normalizeExprIds(plan1)
val normalized2 = normalizeExprIds(plan2)
if (normalized1 != normalized2) {
fail(
s"""
|== FAIL: Plans do not match ===
|${ sideBySide(normalized1.treeString, normalized1.treeString).mkString("\n") }
""".stripMargin)
}
}
/** Fails the test if the two expressions do not match */
protected def compareExpressions(e1: Seq[Expression], e2: Seq[Expression]): Unit = {
comparePlans(
Select(Nil, Nil, e1, Map.empty, Nil, Seq(OneRowTable), NoFlags, Seq.empty, Seq.empty), modular
.Select(Nil, Nil, e2, Map.empty, Nil, Seq(OneRowTable), NoFlags, Seq.empty, Seq.empty))
}
protected def compareMessages(msg1: String, msg2: String) {
if (msg1 != msg2) {
fail(
s"""
|== FAIL: Messages do not match ==
|${ sideBySide(msg1, msg2).mkString("\n") }
""".stripMargin)
}
}
object MatchLocalRelation {
def unapply(localRelation: LocalRelation): Option[(Seq[Attribute], Any)] = localRelation match {
case l: LocalRelation => Some(l.output, l.data)
case _ => None
}
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/view/testutil/ModularPlanTest.scala | Scala | apache-2.0 | 6,349 |
package chandu0101.scalajs.react.components
package semanticui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.VdomNode
import scala.scalajs.js
import scala.scalajs.js.`|`
import scala.scalajs.js.annotation.JSName
/**
* This file is generated - submit issues instead of PR against it
*/
case class SuiListList(
as: js.UndefOr[String | js.Function] = js.undefined,
className: js.UndefOr[String] = js.undefined,
key: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined
) {
def apply(children: VdomNode*) = {
val props = JSMacro[SuiListList](this)
val component = JsComponent[js.Object, Children.Varargs, Null](Sui.ListList)
component(props)(children: _*)
}
}
| rleibman/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/semanticui/SuiListList.scala | Scala | apache-2.0 | 778 |
import io.rampant.vulgar.security.JsonFilter
import models.db.{UserEmails, Users}
import models.{User, UserEmail}
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.mvc.WithFilters
import play.api.{Application, GlobalSettings, Logger}
import play.filters.gzip.GzipFilter
object Global extends WithFilters(new GzipFilter(), JsonFilter) with GlobalSettings {
override def onStart(app: Application): Unit = {
super.onStart(app)
UserEmails.findByEmail("jonathan@borzilleri.net").map({
case None => Users.insert(User(None, "Jonathan")).flatMap({ u =>
Logger.debug(u.toString)
UserEmails.insert(UserEmail(None, u.id.get, "jonathan@borzilleri.net", confirmed = true)).map({ e =>
Logger.debug(e.toString)
})
})
case Some(_) => // Do nothing
})
}
}
| duaiwe/vulgar | app/Global.scala | Scala | mit | 808 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package osgi.embedded.scala
import javax.script.{ ScriptEngineFactory, ScriptEngine, ScriptContext }
import java.io.StringWriter
import java.util.concurrent.{ TimeUnit, Executors, Callable, Future }
import junit.framework.Assert._
import junit.framework.TestCase
/**
* JSR 223 compliance test
*
* <br>
*
* there is no direct reference to the ScalaScriptingEngine
*
*/
class ScriptEngineTest extends TestCase {
implicit def fun2Call[R](f: () => R) = new Callable[R] { def call: R = f() }
def getScriptEngine(): ScriptEngine = {
import scala.collection.JavaConversions._
val factories = javax.imageio.spi.ServiceRegistry.lookupProviders(classOf[ScriptEngineFactory])
val scalaEngineFactory = factories.find(_.getEngineName == "Scala Scripting Engine")
scalaEngineFactory.map(_.getScriptEngine).getOrElse(
throw new AssertionError("Scala Scripting Engine not found")
)
}
/**
* tests a simple piece of code
*
* this can be used as a reference for how to build a valid string that contains scala code for the ScalaScriptingEngine
*/
def testSimple() {
val scriptEngine: ScriptEngine = getScriptEngine();
var code = new StringBuilder();
code.append("package osgi.embedded.scala {");
code.append("\\n");
code.append("class Script(args: ScriptArgs) {");
code.append("\\n");
code.append("import args._");
code.append("\\n");
code.append("println(\\"output:\\" + obj.saySomething()) ");
code.append("\\n");
code.append("}}");
val say = "hello";
val b = scriptEngine.getBindings(ScriptContext.ENGINE_SCOPE);
b.put("obj", new TestInject(say));
val writer = new StringWriter();
scriptEngine.getContext().setWriter(writer);
scriptEngine.eval(code.toString(), b)
assertEquals("output:" + say, writer.toString.trim())
}
/**
* multi-threaded test
*
* the purpose of this test is to demonstrate the capabilities/faults that the
* current ScalaScriptingEngine implementation has.
*/
//TODO get this test to work again
/*def testMultipleThreads() {
var code = new StringBuilder();
code.append("package osgi.embedded.scala{");
code.append("\\n");
code.append("class Script(args: ScriptArgs) {");
code.append("\\n");
code.append("import args._");
code.append("\\n");
code.append("println(\\"output:\\" + obj.saySomething()) ");
code.append("\\n");
code.append("}}");
val threads = 2;
val operations = 100;
val e = Executors.newFixedThreadPool(threads);
var futures = List[Future[Boolean]]();
for (i <- 0 to operations) {
val say = i % 3 match {
case 0 => "hello"
case 1 => "you again"
case 2 => "bye"
}
val c: Callable[Boolean] = () => buildSayCallable(code.toString, say);
val f: Future[Boolean] = e.submit(c);
futures = f :: futures;
}
e.shutdown();
e.awaitTermination(120, TimeUnit.SECONDS);
futures.foreach(f => {
try {
assertTrue(f.get(10, TimeUnit.SECONDS))
} catch {
case e: Exception => { e.printStackTrace; fail(e.getMessage); }
}
});
}*/
def buildSayCallable(code: String, say: String): Boolean = {
val scriptEngine: ScriptEngine = getScriptEngine
println("thread executing with engine: " + scriptEngine + ", say: " + say);
val b = scriptEngine.getBindings(ScriptContext.ENGINE_SCOPE);
b.put("obj", new TestInject(say));
val writer = new StringWriter();
scriptEngine.getContext().setWriter(writer);
scriptEngine.eval(code.toString(), b)
return "output:" + say == writer.toString.trim();
};
class TestInject(sayWhat: String) {
def saySomething() = sayWhat;
}
}
| scrawford/osgi-embedded-scala | script-engine-tests/src/test/scala/osgi/embedded/scala/ScriptEngineTest.scala | Scala | apache-2.0 | 4,532 |
package cwl
import io.circe._
import io.circe.Decoder
import io.circe.shapes._
import io.circe.generic.auto._
import io.circe.refined._
import io.circe.yaml
import io.circe.literal._
import common.Checked
import common.validation.Checked._
import wom.callable.ExecutableCallable
import wom.executable.Executable
import wom.executable.Executable.{InputParsingFunction, ParsedInputMap}
// WARNING! Because of 2.11 vs 2.12 incompatibilities, there are two versions of this file.
// If you're making changes here, you'll also need to update ../../scala-2.11/cwl/CwlExecutableValidation.scala
// (ExecutableValidation.scala has more info on why this was necessary)
object CwlExecutableValidation {
implicit val f = implicitly[Decoder[File]]
// Decodes the input file, and build the ParsedInputMap
private val inputCoercionFunction: InputParsingFunction =
inputFile => {
yaml.parser.parse(inputFile).flatMap(_.as[Map[String, MyriadInputValue]]) match {
case Left(error) => error.getMessage.invalidNelCheck[ParsedInputMap]
case Right(inputValue) => inputValue.map({ case (key, value) => key -> value.fold(CwlInputCoercion) }).validNelCheck
}
}
def buildWomExecutable(callable: Checked[ExecutableCallable], inputFile: Option[String]): Checked[Executable] = {
for {
womDefinition <- callable
executable <- Executable.withInputs(womDefinition, inputCoercionFunction, inputFile)
} yield executable
}
}
| ohsu-comp-bio/cromwell | cwl/src/main/scala-2.12/cwl/CwlExecutableValidation.scala | Scala | bsd-3-clause | 1,462 |
package com.twitter.scalding.examples
import com.twitter.scalding._
import com.twitter.scalding.mathematics.Matrix
/*
* MatrixTutorial5.scala
*
* Loads a directed graph adjacency matrix where a[i,j] = 1 if there is an edge from a[i] to b[j]
* and computes the jaccard similarity between any two pairs of vectors
*
* ../scripts/scald.rb --local MatrixTutorial5.scala --input data/graph.tsv --output data/jaccardSim.tsv
*
*/
class ComputeJaccardJob(args : Args) extends Job(args) {
import Matrix._
val adjacencyMatrix = Tsv( args("input"), ('user1, 'user2, 'rel) )
.read
.toMatrix[Long,Long,Double]('user1, 'user2, 'rel)
val aBinary = adjacencyMatrix.binarizeAs[Double]
// intersectMat holds the size of the intersection of row(a)_i n row (b)_j
val intersectMat = aBinary * aBinary.transpose
val aSumVct = aBinary.sumColVectors
val bSumVct = aBinary.sumRowVectors
//Using zip to repeat the row and column vectors values on the right hand
//for all non-zeroes on the left hand matrix
val xMat = intersectMat.zip(aSumVct).mapValues( pair => pair._2 )
val yMat = intersectMat.zip(bSumVct).mapValues( pair => pair._2 )
val unionMat = xMat + yMat - intersectMat
//We are guaranteed to have Double both in the intersection and in the union matrix
intersectMat.zip(unionMat)
.mapValues( pair => pair._1 / pair._2 )
.write(Tsv( args("output") ))
}
| sriramkrishnan/scalding | tutorial/MatrixTutorial5.scala | Scala | apache-2.0 | 1,420 |
package com.hanhuy.android.irc
import com.hanhuy.android.irc.model.BusEvent
import com.hanhuy.android.irc.model.Server
import com.hanhuy.android.irc.model.ChannelLike
import com.hanhuy.android.irc.model.{Channel => QicrChannel}
import com.hanhuy.android.irc.model.MessageAdapter
import com.hanhuy.android.irc.model.MessageLike._
import android.widget.Toast
import android.util.Log
import com.sorcix.sirc._
import com.sorcix.sirc.event.MessageEventListener.{Message, Action}
import com.sorcix.sirc.event.ServerEventListener.{Invite, Nick, Mode}
import com.sorcix.sirc.event.{ServerEventListener, MessageEventListener}
import scala.util.Try
import com.hanhuy.android.common._
import SpannedGenerator._
import IrcListeners._
import scala.annotation.tailrec
import android.text.SpannableStringBuilder
import com.hanhuy.android.irc.model.MessageLike.Kick
import com.hanhuy.android.irc.model.MessageLike.Privmsg
import com.hanhuy.android.irc.model.MessageLike.Join
import com.hanhuy.android.irc.model.MessageLike.Motd
import com.hanhuy.android.irc.model.MessageLike.Part
import com.hanhuy.android.irc.model.MessageLike.Quit
import com.hanhuy.android.irc.model.MessageLike.CtcpAction
import com.hanhuy.android.irc.model.MessageLike.CtcpReply
import com.hanhuy.android.irc.model.MessageLike.NickChange
import com.hanhuy.android.irc.model.MessageLike.ServerInfo
import com.hanhuy.android.irc.model.MessageLike.Topic
import com.hanhuy.android.irc.model.MessageLike.Notice
object IrcListeners {
val TAG = "IrcListeners"
@tailrec
private def matchesNickIndex(nick: String, m: String, cur: Int): Int = {
val mlen = m.length
val nlen = nick.length
val idx = m.indexOf(nick, cur)
if (idx < 0) idx else {
if (idx > 0) { // not at start of line
val before = !Character.isJavaIdentifierPart(m.charAt(idx - 1))
if (idx + nlen < mlen) {
if (before && !Character.isJavaIdentifierPart(m.charAt(idx + nlen)))
idx
else
matchesNickIndex(nick, m, idx + nlen)
} else if (before) idx else -1
} else {
// beginning of line
if (nlen < mlen) {
if (!Character.isJavaIdentifierPart(m.charAt(nlen)))
idx
else
matchesNickIndex(nick, m, nlen)
} else idx
}
}
}
// sometimes a null is passed in...
def matchesNick(server: Server, msg: CharSequence) = {
msg.?.fold(false)(m =>
matchesNickIndex(server.currentNick.toLowerCase, m.toString.toLowerCase, 0) != -1)
}
implicit class EnhancedUser(val u: User) extends AnyVal {
@inline def address = u.getUserName + "@" + u.getHostName
}
@inline implicit def toQicrChannel(c: ChannelLike): QicrChannel = c.asInstanceOf[QicrChannel]
}
class IrcListeners(manager: IrcManager) extends AdvancedListener
with ModeListener with ServerEventListener with MessageEventListener {
// ServerListener
// TODO send BusEvents instead
override def onConnect(c: IrcConnection) {
manager._connections.get(c) match {
case Some(server) =>
UiBus.run {
// ugh, need to do it here so that the auto commands can run
server.state() = Server.CONNECTED
server += ServerInfo(manager.getString(R.string.server_connected))
}
if (server.autorun.isDefined || server.autojoin.isDefined) {
val proc = CommandProcessor(Application.context)
proc.server = Some(server)
server.autorun.foreach { autorun =>
autorun.split(";") foreach { cmd =>
if (cmd.trim().length() > 0) {
val command = cmd.trim()
UiBus.run {
proc.executeLine(
if (command.charAt(0) != '/') "/" + command else command)
}
}
}
}
server.autojoin.foreach { autojoin =>
val join = manager.getString(R.string.command_join_1)
val channels = autojoin.split(";")
channels foreach { c =>
if (c.trim().length() > 0)
UiBus.run {
proc.executeCommand(join, Some(c.trim()))
}
}
}
}
UiBus.run {
NotificationCenter += ServerNotification(R.drawable.ic_wifi_black_24dp, server.name, "Connected")
Notifications.connected(server)
}
case None =>
Log.w(TAG, s"server not found in onConnect?! $c", new StackTrace)
}
}
override def onDisconnect(c: IrcConnection) {
// Log.w(TAG, "Connection dropped: " + c, new StackTrace)
manager._connections.get(c) foreach { s =>
manager.serverDisconnected(s)
UiBus.run {
NotificationCenter += ServerNotification(R.drawable.ic_error_black_24dp, s.name, Application.context.getString(R.string.server_disconnected))
}
}
}
override def onMotd(motd: ServerEventListener.Motd) {
manager._connections.get(motd.connection) foreach { server =>
// sIRC sends motd as one big blob of lines, split before adding
UiBus.run { motd.motd.split("\r?\n") foreach { m => server += Motd(m) } }
}
}
// ModeListener
// TODO
override def onAdmin(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onDeAdmin(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onDeFounder(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onDeHalfop(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onDeOp(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onDeVoice(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onFounder(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onHalfop(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onOp(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
override def onVoice(c: IrcConnection, channel: Channel,
src: User, user: User) = ()
val CtcpPing = """(\d+)\s+(\d+)""".r
def handleWhois(line: IrcPacket, start: Boolean = false) {
val realNick = line.getArgumentsArray()(1)
val nick = realNick.toLowerCase
if (start)
whoisBuffer += (nick -> List(line))
else {
if (whoisBuffer.contains(nick)) {
whoisBuffer += (nick -> (whoisBuffer(nick) ++ List(line)))
def whois(): Unit = {
for {
c <- manager.lastChannel
w <- accumulateWhois(nick, realNick)
} {
UiBus.run(c += w)
}
}
line.getNumericCommand match {
case 318 => whois()
case 369 => whois()
case _ =>
}
}
}
}
def whoisPrefix = "%1%2%3" formatSpans( textColor(0xff777777, "-"),
textColor(0xff00ff00, "!"), textColor(0xff777777, "-"))
def accumulateWhois(nick: String, realNick: String) = {
whoisBuffer.get(nick).map { lines =>
val b = lines.foldLeft(new SpannableStringBuilder) { (buf, line) =>
val args = line.getArgumentsArray
val m = line.getMessage
line.getNumericCommand match {
case 318 => // end of whois
case 369 => // end of whowas
case 311 =>
val (user, host, name) = (args(2), args(3), m)
val t = "%1 %2 [%3@%4]\n%6 name: %5" formatSpans(
whoisPrefix, MessageAdapter.colorNick(realNick), user, host,
bold(name), whoisPrefix)
buf.append(t)
case 314 =>
val (user, host, name) = (args(2), args(3), m)
val t = "%1 %2 was [%3@%4]\n%1 name: %5" formatSpans(
whoisPrefix, MessageAdapter.colorNick(realNick), user, host, bold(name))
buf.append(t)
case 312 =>
val (server, tag) = (args(2), m)
val t = "\n%1 server: %2 [%3]" formatSpans(whoisPrefix, server, tag)
buf.append(t)
case 330 =>
val (login, tag) = (args(2), m)
val t = "\n%1 %3: %2" formatSpans(whoisPrefix, login, tag)
buf.append(t)
case 319 =>
buf.append("\n%1 channels: %2" formatSpans(whoisPrefix, m))
case 338 =>
if (args.length > 2)
buf.append("\n%1 %2: %3" formatSpans(whoisPrefix, m, args(2)))
else
buf.append("\n%1 %2" formatSpans(whoisPrefix, m))
case 317 =>
val (idle, time) = (args(2).toInt, args(3).toLong * 1000)
buf.append("\n%1 %2: %3, %4" formatSpans(whoisPrefix, m,
args(2), new java.util.Date(time).toString))
case 401 =>
buf.append("%1 %2: %3" formatSpans(whoisPrefix,
MessageAdapter.colorNick(realNick), m))
case 406 =>
buf.append("%1 %2: %3" formatSpans(whoisPrefix,
MessageAdapter.colorNick(realNick), m))
case 671 =>
buf.append("\n%1 %2" formatSpans(whoisPrefix, m))
case _ =>
}
buf
}
whoisBuffer -= nick
Whois(b)
}
}
// AdvancedListener
override def onUnknown(c: IrcConnection, line: IrcPacket) {
manager._connections.get(c) foreach { server =>
if (line.isNumeric) {
// 306 = away
// 333 = topic change info
// 366 = end of /names list
line.getNumericCommand match {
case 5 => // server capabilities list
case 251 => () // msg, network users info
case 252 => () // args[1] ircops online
case 254 => () // args[1] channel count
case 255 => () // msg, local clients info
case 261 => () // msg, highest connection count
case 265 => () // args[1], local users, args[2], local max
case 266 => () // args[1], global users, args[2], max
case 301 => () // args[1], target nick, away msg (reply)
case 305 => () // unaway (user)
case 306 => () // nowaway (user)
case 333 => // topic change timestamp
// whois response line1
// 1: nick, 2: username, 3: host, 4: ?; msg = realname
case 311 => handleWhois(line, true)
case 314 => handleWhois(line, true) // like 311, whowas
case 312 => handleWhois(line) // 1: server, 2: server comment (whois l2)
case 313 => () // 1: nick, whois operator
case 318 => handleWhois(line) // args[1], nick, end of whois
case 330 => handleWhois(line) // args[1] nick, args[2] login, msg/2
case 319 => handleWhois(line) // args[1], nick, msg channels, whois
case 338 => handleWhois(line) // args[2] host/ip, whois l3
case 317 => handleWhois(line) // args[1], idle, args[2], signon whois l4
case 369 => handleWhois(line) // args[1], nick, end of whowas
case 401 => handleWhois(line, true) // args[1], nick, whois not there
case 406 => handleWhois(line, true) // args[1], nick, whowas not there
case 671 => handleWhois(line) // args[1], nick, is using a secure connection
case 366 => // end of names list
case 375 => // motd start
case _ => ()
}
} else {
line.getCommand match {
case "PONG" =>
for {
ping <- server.currentPing
p <- Try((line.getMessage.? getOrElse
line.getArgumentsArray()(1)).toLong).toOption
} {
server.currentPing = None
// non-numeric should pass through
// should always match if the lag timer is working
if (p == ping) {
val t = System.currentTimeMillis
server.currentLag() = (t - p).toInt
UiBus.send(BusEvent.ServerChanged(server))
}
}
case _ => ()
}
}
if (line.getCommand != "PONG") UiBus.run {
server += ServerInfo("[%s](%s):[%s]" format (
line.getCommand, line.getArguments,
line.getMessage))
}
}
}
// Never happens
override def onUnknownReply(c: IrcConnection, line: IrcPacket) = ()
private var whoisBuffer: Map[String,List[IrcPacket]] = Map.empty
override def onInvite(invite: Invite) = () // TODO
override def onNick(nick: Nick): Unit = {
val c = nick.connection
val oldnick = nick.oldUser
val newnick = nick.newUser
val server = manager._connections(c)
if (oldnick.isUs || newnick.isUs) {
server.currentNick = newnick.getNick
UiBus.run {
manager._channels.values foreach { c =>
if (c.server == server) {
UiBus.send(BusEvent.NickListChanged(c))
c += NickChange(oldnick.getNick, newnick.getNick, nick.timestamp)
}
}
}
} else {
UiBus.run {
manager.channels.values.collect {
case c: Channel if c.hasUser(newnick) && manager._channels.get(c).isDefined =>
manager._channels(c)
}.toSeq.distinct foreach { c =>
if (c.server == server) {
UiBus.send(BusEvent.NickListChanged(c))
c += NickChange(oldnick.getNick, newnick.getNick, nick.timestamp)
}
}
}
}
}
override def onQuit(quit: ServerEventListener.Quit): Unit = {
val user = quit.sender
val msg = quit.message
val c = quit.connection
if (!user.isUs) {
manager._connections.get(c) foreach { server =>
UiBus.run {
try {
// guard can change values if slow...
manager.channels.values collect {
case c: Channel if c.hasUser(user) => manager._channels.get(c)
} foreach { c =>
if (c.isDefined && c.get.server == server) {
UiBus.send(BusEvent.NickListChanged(c.get))
c.get += Quit(user.getNick, user.address, msg, quit.timestamp)
}
}
} catch {
case _: MatchError => ()
}
}
}
}
}
override def onMode(mode: Mode) = () // TODO
override def onPart(part: ServerEventListener.Part): Unit = {
val channel = part.channel
val user = part.sender
val msg = part.message
if (manager._channels.contains(channel)) {
UiBus.run {
val ch = manager._channels(channel)
if (user.isUs) {
ch.state = QicrChannel.State.PARTED
} else {
UiBus.send(BusEvent.NickListChanged(ch))
}
ch += Part(user.getNick, user.address, msg, part.timestamp)
}
}
}
override def onTopic(topic: ServerEventListener.Topic): Unit = {
val channel = topic.channel
val src = topic.sender
manager._channels.get(channel) foreach { c =>
UiBus.run {
c += Topic(src.?.map(_.getNick), topic.topic, topic.timestamp)
}
}
}
override def onKick(kick: ServerEventListener.Kick): Unit = {
val channel = kick.channel
val msg = kick.message
val user = kick.target
val op = kick.sender
UiBus.run {
manager._channels.get(channel) foreach { ch =>
if (user.isUs) {
// TODO update adapter's channel state
ch.state = QicrChannel.State.KICKED
} else {
UiBus.send(BusEvent.NickListChanged(ch))
}
ch += Kick(op.getNick, user.getNick, msg, kick.timestamp)
}
}
}
override def onJoin(join: ServerEventListener.Join): Unit = {
val user = join.sender
val channel = join.channel
val c = join.connection
if (user.isUs)
manager.addChannel(c, channel)
UiBus.run {
// sometimes channel hasn't been set yet (on connect)
manager._channels.get(channel) foreach { ch =>
UiBus.send(BusEvent.NickListChanged(ch))
if (!user.isUs)
ch += Join(user.getNick, user.address, join.timestamp)
}
}
}
override def onPrivateMessage(m: Message) {
UiBus.run {
manager.addQuery(
m.connection, m.sender.getNick, m.message, ts = m.timestamp)
}
}
override def onMessage(message: Message) {
val channel = message.target
val src = message.sender
val msg = message.message
manager._channels.get(channel) foreach {
c =>
if (src.isUs) {
cancelNotifications(c)
}
val pm = Privmsg(src.getNick, msg, usermode(src), ts = message.timestamp)
if (matchesNick(c.server, msg) && !src.isUs && !Config.Ignores(src.getNick))
manager.addChannelMention(c, pm)
else if (Config.Favorites(c) && !src.isUs && !Config.Ignores(src.getNick))
manager.addChannelMention(c, pm)
UiBus.run {
c += pm
}
}
}
override def onCtcpReply(ctcp: MessageEventListener.CtcpReply) {
val c = ctcp.connection
val src = ctcp.sender
val cmd = ctcp.command
val reply = ctcp.message
val s = manager._connections(c)
val r = CtcpReply(s, src.getNick, cmd, cmd match {
case "PING" => reply match {
case CtcpPing(seconds, micros) =>
Try {
// prevent malicious overflow causing a crash
val ts = seconds.toLong * 1000 + (micros.toLong / 1000)
Server.intervalString(System.currentTimeMillis - ts)
}.toOption orElse reply.?
case _ => reply.?
}
case _ => reply.?
}, ctcp.timestamp)
// TODO show in current WidgetChatActivity
if (manager.showing) {
UiBus.run {
val msg = MessageAdapter.formatText(Application.context, r)
MainActivity.instance foreach { activity =>
val tab = activity.adapter.currentTab
(tab.channel orElse tab.server).fold {
s += r
Toast.makeText(Application.context, msg, Toast.LENGTH_LONG).show()
}{ _ += r }
}
}
} else {
s += r
}
}
override def onNotice(n: MessageEventListener.Notice) {
val chan = n.target.?
val src = n.sender
val msg = n.message
chan.fold(UiBus.run {
val sn = manager._connections.get(n.connection).map(_.name).getOrElse(n.connection.getServer.getAddress)
NotificationCenter += NotifyNotification(
n.timestamp, sn, src.getNick, s"-${MessageAdapter.colorNick(src.getNick)}- $msg")
}) { channel =>
val c = manager._channels(channel)
val notice = Notice(src.getNick, msg, n.timestamp)
UiBus.run { c += notice }
if (matchesNick(c.server, msg) && !src.isUs && !Config.Ignores(src.getNick))
manager.addChannelMention(c, notice)
else if (Config.Favorites(c) && !src.isUs && !Config.Ignores(src.getNick))
manager.addChannelMention(c, notice)
}
}
private def cancelNotifications(c: ChannelLike): Unit = {
Notifications.markRead(c)
c.newMessages = false
c.newMentions = false
ServiceBus.send(BusEvent.ChannelStatusChanged(c))
manager.channels.keys filter (n =>
n.newMentions && n.server == c.server && n != c) foreach {
changed =>
changed.newMentions = false
ServiceBus.send(BusEvent.ChannelStatusChanged(changed))
}
}
override def onAction(a: Action) {
val chan = a.target.?
val src = a.sender
val msg = a.action
chan.fold(UiBus.run {
manager.addQuery(a.connection, src.getNick, msg,
action = true, ts = a.timestamp)
}) { channel =>
manager._channels.get(channel) foreach { c =>
if (src.isUs) {
cancelNotifications(c)
}
val action = CtcpAction(src.getNick, msg, a.timestamp)
UiBus.run {
c += action
}
if (matchesNick(c.server, msg) && !src.isUs && !Config.Ignores(src.getNick))
manager.addChannelMention(c, action)
else if (Config.Favorites(c) && !src.isUs && !Config.Ignores(src.getNick))
manager.addChannelMention(c, action)
}
}
}
}
| pfn/qicr | src/main/scala/com/hanhuy/android/irc/IrcListeners.scala | Scala | gpl-2.0 | 19,980 |
package com.atomist.rug.kind.yaml
import com.atomist.rug.kind.core.ProjectMutableView
import com.atomist.rug.kind.grammar.{AbstractTypeUnderFileTest, TypeUnderFile}
import com.atomist.rug.kind.yaml.YamlUsageTestTargets._
import com.atomist.source.{SimpleFileBasedArtifactSource, StringFileArtifact}
import com.atomist.tree.content.text.{OverwritableTextTreeNode, PositionedTreeNode, TextTreeNodeLifecycle}
import com.atomist.tree.utils.{NodeUtils, TreeNodePrinter}
import com.atomist.tree.{TreeNode, UpdatableTreeNode}
class YamlFileTypeTest extends AbstractTypeUnderFileTest {
override protected def typeBeingTested: TypeUnderFile = new YamlFileType
def parseAndPrepare(fileContent: String = YamlNestedSeq): UpdatableTreeNode = {
val f = new ProjectMutableView(SimpleFileBasedArtifactSource(StringFileArtifact("test.yml", fileContent))).findFile("test.yml")
val tn = typeBeingTested.fileToRawNode(f.currentBackingObject).get
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
TextTreeNodeLifecycle.makeWholeFileNodeReady("Yaml", PositionedTreeNode.fromParsedNode(tn), f)
}
"yaml file type" should "parse and run path expression to find sequence" in {
val tn = parseAndPrepare(YamlNestedSeq)
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val nodes = evaluatePathExpression(tn, "/components/*")
assert(nodes.last.asInstanceOf[OverwritableTextTreeNode].value === "Nait 3R")
}
it should "parse and run path expression to find nested sequence" in {
val tn = parseAndPrepare(YamlNestedSeq)
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val nodes = evaluatePathExpression(tn, "/components/cables/*")
assert(nodes.last.asInstanceOf[OverwritableTextTreeNode].value === "A5 speaker cable")
}
it should "parse and run path expression to find deeper nested sequence" in {
val tn = parseAndPrepare()
val nodes = evaluatePathExpression(tn, "/components/Amplifier/*[@name='future upgrades']/*[@value='NAP250.2']")
assert(nodes.size === 1)
assert(NodeUtils.value(nodes.last) === "NAP250.2")
}
it should "parse and run path expression to find deepest nested sequence" in {
val otif = parseAndPrepare()
val nodes = evaluatePathExpression(otif, "/components/Amplifier/*[@name='future upgrades']/NAC82/*")
assert(NodeUtils.value(nodes.head) === "NAPSC power supply")
}
it should "parse and output unchanged" in {
val f = StringFileArtifact("test.yml", xYaml)
val tn = typeBeingTested.fileToRawNode(f).get
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val nodeValue = NodeUtils.positionedValue(tn, xYaml)
withClue(s"Was [$nodeValue]\\nExpected [${f.content}]") {
assert(nodeValue === f.content)
}
}
it should "find scala value in quotes" in {
val tn = parseAndPrepare(xYaml)
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val nodes = evaluatePathExpression(tn, "/artifact")
assert(nodes.size == 1)
assert(nodes.last.asInstanceOf[OverwritableTextTreeNode].value === "\\"A Night at the Opera\\"")
}
it should "find scala value in quotes and modify" in {
val f = StringFileArtifact("test.yml", xYaml)
val pmv = new ProjectMutableView(SimpleFileBasedArtifactSource(f))
val found = typeBeingTested.findAllIn(pmv)
assert(found.get.size === 1)
val tn = found.get.head
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val oldContent = "\\"A Night at the Opera\\""
val nodes = evaluatePathExpression(tn, "/artifact")
assert(nodes.size == 1)
assert(nodes.head.asInstanceOf[TreeNode].value === oldContent)
val newContent = "What in God's holy name are you blathering about?"
nodes.head.asInstanceOf[UpdatableTreeNode].update(newContent)
assert(pmv.findFile("test.yml").content === xYaml.replace(oldContent, newContent))
}
def printTree(t: UpdatableTreeNode) =
println(TreeNodePrinter.draw[UpdatableTreeNode](u => u.childNodes.map(_.asInstanceOf[UpdatableTreeNode]), u => s"${u.nodeName}: ${u.value}")(t))
it should "find scalar using path expression key" in {
val tn = parseAndPrepare(xYaml)
var nodes = evaluatePathExpression(tn, "/group")
assert(nodes.size == 1)
assert(nodes.head.asInstanceOf[TreeNode].value === "queen")
nodes = evaluatePathExpression(tn, "/group/value")
assert(nodes.size == 1)
assert(nodes.head.asInstanceOf[TreeNode].value === "queen")
nodes = evaluatePathExpression(tn, "/group/value[@value='queen']")
assert(nodes.size == 1)
assert(nodes.head.asInstanceOf[TreeNode].value === "queen")
}
it should "parse and run path expression using name again" in {
val tn = parseAndPrepare(xYaml)
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val nodes = evaluatePathExpression(tn, "/dependencies")
assert(nodes.size == 1)
val nodes2 = evaluatePathExpression(tn, "/dependencies/*")
assert(nodes2.size === 12)
assert(NodeUtils.value(nodes2.last) === "\\"God Save the Queen\\"")
}
it should "parse and run path expression using type" in {
val tn = parseAndPrepare(xYaml)
// println(TreeNodeUtils.toShorterString(tn, TreeNodeUtils.NameAndContentStringifier))
val nodes2 = evaluatePathExpression(tn, "/Sequence()[@name='dependencies']/*")
assert(nodes2.size === 12)
assert(NodeUtils.value(nodes2.last) === "\\"God Save the Queen\\"")
}
it should "parse and run path expression against YamlOrgStart invoice" in {
val tn = parseAndPrepare(YamlOrgStart)
val nodes = evaluatePathExpression(tn, "//bill-to/given")
assert(nodes.size == 1)
}
it should "parse and run path expression using | and > strings" in {
val tn = parseAndPrepare(YamlOrgStart)
assert(tn.value === YamlOrgStart)
val nodes = evaluatePathExpression(tn, "//*[@name='bill-to']/given/value")
assert(nodes.size === 1)
assert(nodes.head.asInstanceOf[TreeNode].value === "Chris")
}
it should "return correct value for | string" in {
val tn = parseAndPrepare(YamlOrgStart)
val nodes = evaluatePathExpression(tn, "//*[@name='bill-to']/address/lines")
assert(nodes.size === 1)
val target = nodes.head
// assert(nodes.head.asInstanceOf[TreeNode].value === "Chris")
// Should have stripped whitespace
// assert(target.value === "458 Walkman Dr.\\nSuite #292\\n")
}
it should "return correct value for > string" in {
val tn = parseAndPrepare(YamlOrgStart)
val nodes = evaluatePathExpression(tn, "/comments")
assert(nodes.size === 1)
val target = nodes.head
// println(TreeNodeUtils.toShorterString(nodes.head, TreeNodeUtils.NameAndContentStringifier))
// TODO define this behavior
// val expected =
// """
// |Late afternoon is best.
// |Backup contact is Nancy
// |Billsmer @ 338-4338.
// """.stripMargin
// // Should strip whitespace
// assert(target.value === expected)
}
it should "parse multiple documents in one file" is pending
}
| atomist/rug | src/test/scala/com/atomist/rug/kind/yaml/YamlFileTypeTest.scala | Scala | gpl-3.0 | 7,254 |
package com.github.leifker.spark.sentiment
/**
* Created by dleifker on 2/23/17.
*/
object TokenPatchUtil {
private val patchExclaimQuestion = (tokens: Vector[String]) => patchToken(tokens, Constants.exclaimQuestion, _ + Constants.exclaimQuestion)
private val patchQuestion = (tokens: Vector[String]) => patchToken(tokens, "?", _ + "?")
private val patchExclaim = (tokens: Vector[String]) => patchToken(tokens, "!", _ + "!")
val patchWithPunctuation: (Vector[String]) => Vector[String] = patchExclaimQuestion andThen patchExclaim andThen patchQuestion
private def prevPunctuation(tokens: Vector[String], endIdx: Int): Int = tokens.lastIndexWhere(Constants.sentenceDelimiter.matcher(_).matches(), endIdx)
private def patchToken(tokens: Vector[String], punctuationTarget: String, mutator: String => String): Vector[String] = {
patchTokens(tokens, tokens.length - 1, Seq(punctuationTarget), mutator)
}
private def patchTokens(tokens: Vector[String], endIdx: Int, punctuationTarget: Seq[String], mutator: String => String): Vector[String] = {
val lastIdx = tokens.lastIndexOfSlice(punctuationTarget, endIdx)
if (lastIdx > 0) {
val prevPunctIdx = prevPunctuation(tokens, lastIdx - 1)
val startPatchIdx = prevPunctIdx + punctuationTarget.size
val nextTokens = lastIdx - startPatchIdx match {
case diff if diff > 0 => tokens.patch(startPatchIdx, tokens.slice(startPatchIdx, lastIdx).map(mutator), diff)
case _ => tokens
}
patchTokens(nextTokens, prevPunctIdx, punctuationTarget, mutator)
} else {
tokens
}
}
}
| leifker/geo-sentiment | spark/src/main/scala/com/github/leifker/spark/sentiment/TokenPatchUtil.scala | Scala | gpl-3.0 | 1,602 |
package org.jetbrains.plugins.scala.codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
/**
* @author Nikolay.Tropin
*/
class ToSetAndBackTest extends OperationsOnCollectionInspectionTest {
override val inspectionClass: Class[_ <: OperationOnCollectionInspection] = classOf[ToSetAndBackInspection]
override def hint: String = InspectionBundle.message("replace.toSet.and.back.with.distinct")
def testSeq(): Unit = {
doTest(
s"Seq(1).${START}toSet.toSeq$END",
"Seq(1).toSet.toSeq",
"Seq(1).distinct"
)
}
def testList(): Unit = {
doTest(
s"List(1).${START}toSet.toList$END",
"List(1).toSet.toList",
"List(1).distinct"
)
}
def testArray(): Unit = {
doTest(
s"Array(1).${START}toSet.toArray[Int]$END",
"Array(1).toSet.toArray[Int]",
"Array(1).distinct"
)
}
def testPostfix(): Unit = {
doTest(
s"(Seq(1)$START toSet) toSeq$END",
"(Seq(1) toSet) toSeq",
"Seq(1).distinct"
)
}
def testTo(): Unit = {
doTest(
s"Seq(1).${START}toSet.to[Seq]$END",
"Seq(1).toSet.to[Seq]",
"Seq(1).distinct"
)
}
def testMap(): Unit = {
checkTextHasNoErrors("Map(1 -> 2).toSet.toSeq")
}
def testSeqToList(): Unit = {
checkTextHasNoErrors("Seq(1).toSet.toList")
}
def testSeqToList2(): Unit = {
checkTextHasNoErrors("Seq(1).toSet.to[List]")
}
}
| triggerNZ/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/collections/ToSetAndBackTest.scala | Scala | apache-2.0 | 1,447 |
package com.github.davidhoyt.fluxmuster
import scala.reflect.runtime.universe._
sealed case class TypeTagTreeNode[T](source: TypeTagTreeSource, tpe: Type) extends TypeTagTree[T] {
import TypeTagTreeNode._
val symbol =
tpe.typeSymbol.asType
val typeArguments =
evaluate(tpe)
val isRefined =
refined(tpe)
private def evaluate(t: Type) =
recursivelyMapTypeArguments(t)(x => TypeTagTreeNode(source, x))
}
object TypeTagTreeNode {
private val AnyRefType = typeOf[AnyRef]
/** Constructs a new instance of [[TypeTagTree]] given the source. */
def apply[T](source: TypeTagTreeSource)(implicit tag: TypeTag[T]): TypeTagTree[T] =
TypeTagTreeNode(source, tag.tpe)
/** Determines if the given `t` [[scala.reflect.runtime.universe.Type]] is a refined type. */
private def refined(t: Type): Boolean = t match {
case RefinedType(_, _) => true
case _ => false
}
/**
* Processes the provided type by recursively processing the type tree in a depth-first
* manner and produces a tree of [[TypeTagTree]] instances from the discovered type
* arguments.
*
* @param t The [[scala.reflect.runtime.universe.Type]] that will be recursively mapped
* @param fn A function to apply as types are discovered and which maps from
* [[scala.reflect.runtime.universe.Type]] to `A`
* @tparam A The type that will be produced as a result of applying `fn` to discovered
* instances of [[scala.reflect.runtime.universe.Type]]
* @return
*/
def recursivelyMapTypeArguments[A](t: Type)(fn: Type => A): Vector[A] = {
def process(xs: List[Type]): Vector[A] = {
xs.toVector.map(fn)
}
//The most important logic in this file. This describes how to
//destructure and extract (if any) type arguments for this type.
//
//The most important thing to keep in mind is that the "type" is actually
//an AST where the type can be represented by any of a number of case classes
//that changes how their type arguments are accessed.
t match {
//Anonymous type such as "new Foo[String] {}"
case RefinedType((possiblyAnyRef @ TypeRef(_, _, _)) :: typeRefs, _) if possiblyAnyRef =:= AnyRefType =>
process(typeRefs)
//Refined type such as "Foo with Bar with Baz"
case RefinedType(typeRefs, _) =>
process(typeRefs)
//Existential type such as "Seq[_] forSome ..."
case ExistentialType(/*quantified*/_, underlying) =>
process(List(underlying))
//Annotated type
case AnnotatedType(_, underlying) =>
process(List(underlying))
//Standard type
case TypeRef(_, _, args) =>
process(args)
//Unrecognized type
case _ =>
throw new IllegalStateException(s"Unable to determine type arguments for $t")
}
}
}
/**
* Provides metadata about where a [[TypeTagTree]] instance was materialized.
*
* @param source The source file from which the [[TypeTagTree]] instance was summoned
* @param line The line in the source
* @param column The column in the line in the source
* @param index The number of characters from the beginning of the source
*/
sealed case class TypeTagTreeSource(source: String, line: Int, column: Int, index: Int)
@scala.annotation.implicitNotFound("No TypeTagTree available for ${T}. If there are type arguments, please consider providing explicit types for all of them.")
sealed trait TypeTagTree[T] {
/** The [[scala.reflect.runtime.universe.Type]] that this instance represents. */
val tpe: scala.reflect.runtime.universe.Type
/** The [[scala.reflect.runtime.universe.TypeSymbol]] that this instance represents. */
val symbol: scala.reflect.runtime.universe.TypeSymbol
/** The type arguments, if any, for this [[scala.reflect.runtime.universe.Type]]. */
val typeArguments: Seq[TypeTagTree[_]]
/** Provides information about where this [[TypeTagTree]] instance is being used. */
val source: TypeTagTreeSource
/** Determine if this [[scala.reflect.runtime.universe.Type]] is considered `refined`. */
def isRefined: Boolean
/** Determine if this [[scala.reflect.runtime.universe.Type]] refers to an existential type. */
lazy val isExistential =
symbol.isExistential
/** Provides a string representing the given [[scala.reflect.runtime.universe.Type]]. */
lazy val toShortString =
tpe.toString
/** Provides a string representing this [[TypeTagTree]] instance. */
override def toString: String =
s"TypeTagTree($toShortString, $source)"
/** Determines if another instance is equal to this [[TypeTagTree]] instance. */
override def equals(other: Any): Boolean =
other match {
case ref: TypeTagTree[_] if tpe == ref.tpe => true
case ref: AnyRef => ref eq TypeTagTree.this
case _ => false
}
}
object TypeTagTree {
import scala.reflect.runtime.universe._
import scala.reflect.macros._
import scala.language.experimental.macros
/** Materializes an instance of a [[TypeTagTree]] for the provided type `T`. */
def typeTagTreeOf[T](implicit ttt: TypeTagTree[T]): TypeTagTree[T] =
ttt
/**
* The use of `implicit def` is to behave similar to [[scala.reflect.api.TypeTags.TypeTag]] in that the compiler
* will call our macro to conjure an instance when needed.
*
* @tparam T The type whose type tag tree will be generated
* @return An instance of [[TypeTagTree]]
*/
implicit def apply[T]: TypeTagTree[T] =
macro Macros.typeTagTree[T]
/**
* Provides an extractor for getting the [[scala.reflect.runtime.universe.TypeSymbol]],
* the [[scala.reflect.runtime.universe.Type]], the type arguments, and the [[TypeTagTreeSource]]
* for a [[TypeTagTree]].
*
* @param ttt The [[TypeTagTree]] instance that will be extracted
* @tparam T The [[scala.reflect.runtime.universe.Type]] for the [[TypeTagTree]]
* @return An extracted view of the provided [[TypeTagTree]] instance
*/
def unapply[T](ttt: TypeTagTree[T]): Option[(TypeSymbol, Type, Seq[TypeTagTree[_]], TypeTagTreeSource)] =
PartialFunction.condOpt(ttt) {
case t => (t.symbol, t.tpe, t.typeArguments, t.source)
}
/**
* Takes an existing higher-kinded [[TypeTagTree]] as a template and produces a new instance
* with its type parameters replaced by the provided `typeParameters`.
*
* @param typeConstructorTemplate Higher-kinded [[TypeTagTree]] candidate that will have its
* type parameters replaced
* @param typeParameters Variable-length list of type parameters that will be used to replace
* the template's type parameters
* @tparam T Type of the template which will remain unchanged except for its type parameters
* @return A new [[TypeTagTree]] with new type parameters
*/
def alterTypeParameters[T](typeConstructorTemplate: TypeTagTree[T], typeParameters: TypeTagTree[_]*): TypeTagTree[T] =
TypeTagTreeNode(
typeConstructorTemplate.source,
alterTypeParameters(typeConstructorTemplate.tpe, typeParameters.map(_.tpe):_*)
)
def alterTypeParameters(typeConstructorTemplate: Type, typeParameters: Type*): Type = {
val asTypeRefApi = typeConstructorTemplate.asInstanceOf[TypeRefApi]
val newType = scala.reflect.runtime.universe.internal.typeRef(asTypeRefApi.pre, asTypeRefApi.sym, typeParameters.toList)
newType
}
private object Macros {
import scala.reflect.macros._
/**
* Generates a [[TypeTagTree]] instance when applied to a given [[scala.reflect.runtime.universe.Type]].
*/
def typeTagTree[T : c.WeakTypeTag](c: blackbox.Context): c.Expr[TypeTagTree[T]] = {
import c.universe._
val typeParam = c.weakTypeOf[T]
val pos = c.enclosingPosition
/**
* Generates an AST representing the following:
* `TypeTagTreeNode[T](TypeTagTreeSource(...))(<implicitly discovered tag>)`
* `T` is the provided type and its implicit type tag is automagically
* found and created by the compiler.
*/
c.Expr[TypeTagTreeNode[T]] {
q"_root_.com.github.davidhoyt.fluxmuster.TypeTagTreeNode[$typeParam](_root_.com.github.davidhoyt.fluxmuster.TypeTagTreeSource(${pos.source.toString()}, ${pos.line}, ${pos.column}, ${pos.start}))"
}
}
}
}
| davidhoyt/fluxmuster | macros/src/main/scala/com/github/davidhoyt/fluxmuster/TypeTagTree.scala | Scala | apache-2.0 | 8,305 |
import annotation.implicitNotFound
@implicitNotFound("There's no Foo1[${A}, ${B}]")
trait Foo1[A, B]
@implicitNotFound("There's no Foo2[${A}, ${B}]")
trait Foo2[A, B]
trait Bar12[C, D] extends Foo1[D, C] with Foo2[D, C]
trait Bar21[C, D] extends Foo2[D, C] with Foo1[D, C]
@implicitNotFound("There's no Baz12[${C}, ${D}]")
trait Baz12[C, D] extends Foo1[D, C] with Foo2[D, C]
@implicitNotFound("There's no Baz21[${C}, ${D}]")
trait Baz21[C, D] extends Foo2[D, C] with Foo1[D, C]
object Test {
implicitly[Bar12[Int, String]] // error
implicitly[Bar21[Int, String]] // error
implicitly[Baz12[Int, String]] // error
implicitly[Baz21[Int, String]] // error
}
| dotty-staging/dotty | tests/neg/i10098.scala | Scala | apache-2.0 | 670 |
package com.outr.arango.upgrade
import com.outr.arango.Graph
import scala.concurrent.Future
trait DatabaseUpgrade {
def label: String = getClass.getSimpleName.replace("$", "")
def applyToNew: Boolean
def blockStartup: Boolean
def alwaysRun: Boolean = false
def upgrade(graph: Graph): Future[Unit]
def afterStartup(graph: Graph): Future[Unit] = Future.successful(())
} | outr/arangodb-scala | driver/src/main/scala/com/outr/arango/upgrade/DatabaseUpgrade.scala | Scala | mit | 384 |
package wow.realm.protocol
import akka.actor.{Actor, ActorLogging}
import scodec.bits.BitVector
import scodec.{Codec, DecodeResult, Err}
import wow.auth.utils.{MalformedPacketException, PacketPartialReadException}
import wow.realm.RealmContext
import wow.utils.Reflection
import scala.collection.mutable
/**
* Tag used to mark a class as a packet handler
*/
trait PacketHandlerTag extends Actor with ActorLogging with RealmContext
/**
* A packet handler is an actor which handles one type of packet
*
* @tparam A packet handler tagged class
*/
abstract class PacketHandler[A <: PacketHandlerTag] {
/**
* List of OpCodes support by packet handler
*/
def opCodes: OpCodes.ValueSet
/**
* Handle incoming packet
*
* @param header header of packet
* @param payloadBits payload bits for packet
* @param self handling context
*/
def handle(header: ClientHeader, payloadBits: BitVector)(self: A): Unit
/**
* Fails with exception
*
* @param e exception
*/
def fail(e: Throwable): Unit = throw e
/**
* Fails with serialization exception
*
* @param e scodec error
*/
def fail(e: Err): Unit = fail(MalformedPacketException(e))
}
/**
* Entry point for finding/calling packet handlers
*/
object PacketHandler {
import scala.reflect.runtime.universe._
/**
* For every opcode, type of context that should be used to handle it
* (i.e. which actor it's forwarded to)
*/
private val processorByOpCode: mutable.Map[OpCodes.Value, HandledBy.Value] = {
val bld = mutable.HashMap.newBuilder[OpCodes.Value, HandledBy.Value]
for ((typeTag, processedBy) <- HandledBy.TypeTagMap) {
val handlers = Reflection.objectsOf[PacketHandler[PacketHandlerTag]](typeTag)
handlers.flatMap(_.opCodes.map(_ -> processedBy)).foreach(bld.+=)
}
bld.result()
}
/**
* Get's who processes packet
*
* @param header header of packet to be processed
* @return processed by
*/
def apply(header: ClientHeader): HandledBy.Value = processorByOpCode.getOrElse(header.opCode, HandledBy.Unhandled)
/**
* Processes packet
*
* @param header header
* @param payloadBits payload
* @param self packet handler context
* @tparam A packet handler context type
*/
def apply[A <: PacketHandlerTag : TypeTag](header: ClientHeader, payloadBits: BitVector)(self: A): Unit = {
val handlers = new PacketHandlerHelper[A]().handlersByOpCode
// Unsupported packets will be silently ignored
if (handlers.contains(header.opCode)) {
self.log.debug(s"Got packet $header/${payloadBits.bytes.length}")
handlers(header.opCode).handle(header, payloadBits)(self)
} else {
self.log.info(s"Got unhandled packet $header/${payloadBits.bytes.length}")
}
}
/**
* Helper class for reflection on generic type of packet handler
*
* @param typeTag type tag for packet handler with correct tag
* @tparam A packet handler tag
*/
private class PacketHandlerHelper[A <: PacketHandlerTag]()(implicit typeTag: TypeTag[PacketHandler[A]]) {
/**
* List of handlers for tag
*/
private val handlers = Reflection.objectsOf[PacketHandler[A]]
/**
* Map of handlers for tag by opcode
*/
val handlersByOpCode: Map[OpCodes.Value, PacketHandler[A]] =
handlers flatMap (x => x.opCodes.toList map (_ -> x)) toMap
}
}
/**
* Payload-containing packet handler
*
* @tparam B payload type
*/
abstract class PayloadHandler[A <: PacketHandlerTag, B <: Payload with ClientSide]
(override val opCodes: OpCodes.ValueSet)
(implicit codec: Codec[B])
extends PacketHandler[A] {
/**
* Construct from opcodeprovider for packet type
*/
def this()(implicit opCodeProvider: OpCodeProvider[B], codec: Codec[B]) =
this(OpCodes.ValueSet(opCodeProvider.opCode))(codec)
/**
* Processes an incoming payload
*
* @param payload payload to be processed
*/
protected def handle(header: ClientHeader, payload: B)(ps: A): Unit
override def handle(header: ClientHeader, payloadBits: BitVector)(ps: A): Unit = {
codec
.decode(payloadBits)
.fold(fail, {
case DecodeResult(payload, BitVector.empty) => handle(header, payload)(ps)
case DecodeResult(_, remainder) => throw PacketPartialReadException(remainder)
})
}
}
/**
* Payload-less packet handler
*/
abstract class IgnorePayloadHandler[A <: PacketHandlerTag] extends PacketHandler[A] {
/**
* Processes packet while ignoring payload
*/
def handle(header: ClientHeader)(ps: A): Unit
override def handle(header: ClientHeader, payloadBits: BitVector)(ps: A): Unit = handle(header)(ps)
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/realm/protocol/PacketHandler.scala | Scala | mit | 4,758 |
/*
Copyright (c) 2012, The Children's Hospital of Philadelphia All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package edu.chop.cbmi.dataExpress.exceptions
/**
* Created by IntelliJ IDEA.
* User: masinoa
* Date: 12/21/11
* Time: 12:06 PM
* To change this template use File | Settings | File Templates.
*/
case class TableDoesNotExist (table_name : String) extends Exception("Table name, "+table_name + ", does not exist") | chop-dbhi/dataexpress | src/main/scala/edu/chop/cbmi/dataExpress/exceptions/TableDoesNotExist.scala | Scala | bsd-2-clause | 1,649 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.admin
import java.time.Instant
import scrupal.api.Html.{Contents, ContentsArgs}
import scrupal.api.html.BootstrapPage
import scrupal.api.{Form, _}
import scrupal.core.Site_t
import scrupal.core.nodes.HtmlNode
import scrupal.utils.OSSLicense
import scalatags.Text.all._
case class AdminApp(implicit scrpl: Scrupal) extends Application('admin) {
val kind : Symbol = 'Admin
val author = "Reactific Software LLC"
val copyright = "© 2013-2015 Reactific Software LLC. All Rights Reserved."
val license = OSSLicense.ApacheV2
val timestamp = Some(Instant.parse("2014-12-05T12:20:06.00Z"))
val dbForm = new DBForm
def description : String = "The Scrupal Administrative Application"
def name : String = "AdminApp"
def created : Option[Instant] = timestamp
def modified : Option[Instant] = timestamp
StatusBar.siteSelectionForm.enable(this)
override def delegates: Iterable[Provider] = {
super.delegates ++ Iterable(new SingleNodeProvider(adminLayout(dbForm)))
}
def adminLayout(dbForm: Form.Form) = {
new HtmlNode(
name = "AdminLayout",
description = "Layout for Admin application",
template = AdminPage
) {
override def args: Map[String, Html.Generator] = Map(
"StatusBar" → StatusBar,
"Configuration" → SiteConfig,
"DatabaseForm" → dbForm,
"Database" → Database,
"Modules" → Modules,
"Applications" → Applications
)
}
}
class DBForm extends Form.Simple('database_form, "Database Form", "Description", "/admin/database_form", Seq(
Form.TextField("Host:", "The hostname where your MongoDB server is running",
DomainName_t, "localhost", optional = true, inline = true, attrs = Seq(placeholder := "localhost")),
Form.IntegerField("Port:", "The port number at which your MongoDB server is running",
TcpPort_t, 5253, optional = true, inline = true, attrs = Seq(placeholder := "5253")),
Form.TextField("Name:", "The name of the database you want to connect to",
Identifier_t, "scrupal", optional = true, inline = true, attrs = Seq(placeholder := "scrupal")),
Form.TextField("User:", "The user name for the MongoDB server authentication",
Identifier_t, "admin", optional = true, inline = true, attrs = Seq(placeholder := "admin")),
Form.PasswordField("Password:", "The password for the MongoDB server authentication", Password_t, inline = true),
Form.SubmitField("", "Submit database configuration to Scrupal server.", "Configure Database")
)) {
override def provideAcceptReactor(matchingSegment : String) : Form.AcceptForm = {
DataBaseFormAcceptance(this)
}
}
case class DataBaseFormAcceptance(override val form: Form.Form) extends Form.AcceptForm(form) {
/*
override def handleValidatedFormData(doc : BSONDocument) : Response = {
super.handleValidatedFormData(doc)
}
override def handleValidationFailure(errors : ValidationFailed[BSONValue]) : Result[_] = {
val node = adminLayout(formWithErrors(errors))
val contents = node.results(context)
HtmlResult(contents, Successful)
}
*/
}
object StatusBar extends Html.Template('AdminStatusBar) {
lazy val siteSelectionForm = new SiteSelectionForm
val description = "Lists the Sites"
def apply(context: Context, args: ContentsArgs = Html.EmptyContentsArgs): Contents = {
Seq(siteSelectionForm.render)
}
class SiteSelectionForm extends Form.Simple('SiteSelectionForm, "SiteSelection",
"A form for selecting the site to administrate", "/admin/siteselectionform",
Seq(
Form.SelectionField("Site: ", "Select a site to administrate", Site_t, inline = true)
)
)
}
object SiteConfig extends Html.Template('AdminSite) {
val description = "Configuration"
def apply(context: Context, args: ContentsArgs = Html.EmptyContentsArgs): Contents = {
Seq(div(cls := "well",
for ((enablee, enablement) ← context.site.get.getEnablementMap) {
p(enablee.id.name, " is enabled in ", enablement.map { e ⇒ e.id.name }.mkString(", "))
}
))
}
}
object Database extends Html.Template('AdminDatabase) {
val description = "Database Configuration"
def apply(context: Context, args: ContentsArgs = Html.EmptyContentsArgs): Contents = {
Seq(
div(cls := "well", tag("DatabaseForm", context, args))
)
}
}
object Modules extends Html.Template('AdminModules) {
val description = "Modules Administration"
def apply(context: Context, args: ContentsArgs = Html.EmptyContentsArgs): Contents = {
Seq(
div(cls := "well",
p("Modules Defined:"),
ul(
for (mod ← scrupal.Modules.values) yield {
li(mod.id.name, " - ", mod.description, " - ", mod.moreDetailsURL.toString)
}
)
)
)
}
}
object Applications extends Html.Template('AdminApplications) {
val description = "Applications Administration"
def apply(context: Context, args: ContentsArgs = Html.EmptyContentsArgs): Contents = {
Seq(
div(cls := "well",
p("Applications:"),
ul(
{
for (app ← context.site.get.applications) yield {
li(app.name, " - ", app.delegates.map { p ⇒ p.toString }.mkString(", ")) // FIXME: Provider.toString()?
}
}.toSeq
)
)
)
}
}
object AdminPage extends BootstrapPage('AdminPage, "Scrupal Admin", "Scrupal Administration") {
override def body_content(context: Context, args: ContentsArgs): Contents = {
Seq(
div(cls := "container",
div(cls := "panel panel-primary",
div(cls := "panel-heading", h1(cls := "panel-title", tag("StatusBar", context, args)))),
div(cls := "panel-body",
div(role := "tabpanel",
ul(cls := "nav nav-pills", role := "tablist", scalatags.Text.attrs.id := "AdminTab",
li(cls := "active", role := "presentation",
a(href := "#database", aria.controls := "database", role := "tab", data("toggle") := "pill",
"Database")),
li(role := "presentation",
a(href := "#configuration", aria.controls := "configuration", role := "tab",
data("toggle") := "pill", "Configuration")),
li(role := "presentation",
a(href := "#modules", aria.controls := "modules", role := "tab",
data("toggle") := "pill", "Modules")),
li(role := "presentation",
a(href := "#applications", aria.controls := "applications", role := "tab",
data("toggle") := "pill", "Applications"))
),
div(cls := "tab-content",
div(role := "tabpanel", cls := "tab-pane active", scalatags.Text.all.id := "database",
tag("Database", context, args)),
div(role := "tabpanel", cls := "tab-pane", scalatags.Text.all.id := "configuration",
tag("Configuration", context, args)),
div(role := "tabpanel", cls := "tab-pane", scalatags.Text.all.id := "modules",
tag("Modules", context, args)),
div(role := "tabpanel", cls := "tab-pane", scalatags.Text.all.id := "applications",
tag("Applications", context, args))
)
)
)
)
)
}
}
dbForm.enable(this)
}
class SiteAdminEntity(implicit scrupal: Scrupal) extends Entity('SiteAdmin) {
val author = "Reactific Software LLC"
val copyright = "© 2013-2015 Reactific Software LLC. All Rights Reserved."
val license = OSSLicense.ApacheV2
def kind: Symbol = 'SiteAdmin
def description: String = "An entity that handles administration of Scrupal sites."
def instanceType : BundleType = BundleType.empty
override def create(details: String): EntityCreate = {
NoOpEntityCreate(details)
}
override def retrieve(instance_id: Long, details: String): EntityRetrieve = {
retrieve(instance_id.toString, details)
}
override def retrieve(instance_id: String, details: String): EntityRetrieve = {
NoOpEntityRetrieve(instance_id, details)
}
override def update(instance_id: Long, details: String): EntityUpdate = {
update(instance_id.toString, details)
}
override def update(instance_id: String, details: String): EntityUpdate = {
NoOpEntityUpdate(instance_id, details)
}
override def delete(instance_id: Long, details: String): EntityDelete = {
delete(instance_id.toString, details)
}
override def delete(instance_id: String, details: String): EntityDelete = {
NoOpEntityDelete(instance_id, details)
}
override def query(details: String): EntityQuery = {
NoOpEntityQuery(details)
}
override def add(instance_id: Long, facet: String, details: String): EntityAdd = {
add(instance_id.toString, facet, details)
}
override def add(instance_id: String, facet: String, details: String): EntityAdd = {
NoOpEntityAdd(instance_id, facet, details)
}
override def get(instance_id: Long, facet: String, facet_id: String, details: String): EntityGet = {
get(instance_id.toString, facet, facet_id, details)
}
override def get(instance_id: String, facet: String, facet_id: String, details: String): EntityGet = {
NoOpEntityGet(instance_id, facet, facet_id, details)
}
override def set(instance_id: Long, facet: String, facet_id: String, details: String): EntitySet = {
set(instance_id.toString, facet, facet_id, details)
}
override def set(instance_id: String, facet: String, facet_id: String, details: String): EntitySet = {
NoOpEntitySet(instance_id, facet, facet_id, details)
}
override def remove(instance_id: Long, facet: String, facet_id: String, details: String): EntityRemove = {
remove(instance_id.toString, facet, facet_id, details)
}
override def remove(instance_id: String, facet: String, facet_id: String, details: String): EntityRemove = {
NoOpEntityRemove(instance_id, facet, facet_id, details)
}
override def find(instance_id: Long, facet: String, details: String): EntityFind = {
find(instance_id.toString, facet, details)
}
override def find(instance_id: String, facet: String, details: String): EntityFind = {
NoOpEntityFind(instance_id, facet, details)
}
/* FIXME: Implement SiteAdminEntity methods (currently defaulting to noop)
override def create(context: Context, id: String, instance: BSONDocument) : Create = {
new Create(context, id, instance) {
override def apply() : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.create(id, instance)(context)) )
}
}
}
override def retrieve(context: Context, id: String) : Retrieve = {
new Retrieve(context, id) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.retrieve(id)(context)) )
}
}
}
override def update(context: Context, id: String, fields: BSONDocument) : Update = {
new Update(context, id, fields) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.update(id, fields)(context)) )
}
}
}
override def delete(context: Context, id: String) : Delete = {
new Delete(context, id) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.delete(id)(context)) )
}
}
}
override def query(context: Context, id: String, fields: BSONDocument) : Query = {
new Query(context, id, fields) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.query(id, fields)(context)) )
}
}
}
override def createFacet(context: Context, what: Seq[String], instance: BSONDocument) : CreateFacet = {
new CreateFacet(context, what, instance) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.createFacet(what, instance)(context)) )
}
}
}
override def retrieveFacet(context: Context, what: Seq[String]) : RetrieveFacet = {
new RetrieveFacet(context, what) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.retrieveFacet(what)(context)) )
}
}
}
override def updateFacet(context: Context, id: String,
what: Seq[String], fields: BSONDocument) : UpdateFacet = {
new UpdateFacet(context, id, what, fields) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.updateFacet(id, what, fields)(context)) )
}
}
}
override def deleteFacet(context: Context, id: String, what: Seq[String]) : DeleteFacet = {
new DeleteFacet(context, id, what) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.deleteFacet(id, what)(context)) )
}
}
}
override def queryFacet(context: Context, id: String,
what: Seq[String], args: BSONDocument) : QueryFacet = {
new QueryFacet(context, id, what, args) {
override def apply : Future[Result[_]] = {
Future.successful( HtmlResult(scrupal.core.views.html.echo.queryFacet(id, what, args)(context)) )
}
}
}
*/
}
| scrupal/scrupal | scrupal-admin/src/main/scala/scrupal/admin/AdminApp.scala | Scala | apache-2.0 | 15,342 |
package rpgboss.editor
import rpgboss.editor.uibase._
import rpgboss.editor.cache._
import rpgboss.model._
import rpgboss.model.resource._
import scala.swing._
import rpgboss.editor.Internationalized._
object Dirtiness extends Enumeration {
val Clean, Dirty, Deleted = Value
}
case class MapState(map: RpgMap,
dirty: Dirtiness.Value,
mapDataOpt: Option[RpgMapData]) {
import Dirtiness._
def save(p: Project) = {
if (dirty == Dirty) {
// Save if it's dirty
map.writeMetadata()
mapDataOpt.map(data => map.saveMapData(data))
} else if (dirty == Deleted) {
// Effect deletion
RpgMap.metadataPath(p, map.name).delete()
val (mapFile, botFile, midFile, topFile, evtFile) =
RpgMapData.datafiles(p, map.name)
mapFile.delete()
// The other files stick around. No real reason to delete them.
}
}
}
/**
* This class manages the dirtiness and saving of all the open maps.
*/
class StateMaster(mainPanel: MainPanel, private var proj: Project) {
import Dirtiness._
val assetCache = new AssetCache(proj)
private var projDirty = Dirtiness.Clean
private val mapStates = collection.mutable.Map[String, MapState]()
def loadProjectData() = {
RpgMap.list(proj).map(RpgMap.readFromDisk(proj, _)).foreach(map => {
addMap(map, None, Dirtiness.Clean)
})
}
loadProjectData()
// maps map is cleared of excess data upon saving
def save() = {
// save project (database, etc.)
if (projDirty == Dirty) {
if (proj.writeMetadata()) {
projDirty = Clean
}
}
mapStates.values.map(_.save(proj)) // save all the maps
loadProjectData() // refresh stale shit
mainPanel.updateDirty(this)
}
def stateDirty =
projDirty != Clean || mapStates.values.exists(_.dirty != Clean)
def askSaveUnchanged(diagParent: Component) = {
if (stateDirty) {
Dialog.showConfirmation(diagParent,
getMessage("Save_Changes_To_Project"),
"rpgboss",
Dialog.Options.YesNoCancel) match {
case Dialog.Result.Yes =>
save()
true
case Dialog.Result.No => true
case Dialog.Result.Cancel => false
case _ => false
}
} else true
}
def getProj = proj
def getProjData = proj.data
def setProjData(newData: ProjectData) = {
proj = proj.copy(data = newData)
projDirty = Dirty
mainPanel.updateDirty(this)
}
def addMap(
map: RpgMap,
mapDataOpt: Option[RpgMapData],
dirty: Dirtiness.Value) = {
mapStates.put(map.name, MapState(map, dirty, mapDataOpt))
}
def removeMap(mapName: String) = {
for (mapState <- mapStates.get(mapName)) {
mapStates.update(mapName, mapState.copy(dirty = Dirtiness.Deleted))
}
}
def getMapStates = mapStates
def getMapMetas = mapStates.values.map(_.map).toSeq.sortBy(_.name)
// Must be sure that mapId exists and map data loaded to call
def getMap(mapName: String) =
mapStates.get(mapName).map(_.map)
def setMap(mapName: String, map: RpgMap, markDirty: Boolean = true) = {
val curState = mapStates.get(mapName).get
val newDirty = if (markDirty) Dirtiness.Dirty else curState.dirty
mapStates.update(mapName, curState.copy(map = map, dirty = newDirty))
mainPanel.updateDirty(this)
}
def getMapData(mapName: String) = {
assert(mapStates.contains(mapName), "map %s doesn't exist".format(mapName))
val mapState = mapStates.get(mapName).get
mapState.mapDataOpt getOrElse {
val mapData = mapState.map.readMapData() getOrElse {
Dialog.showMessage(null, getMessage("Map_Data_File_Missing_Recreating"),
"Error", Dialog.Message.Error)
RpgMap.emptyMapData(mapState.map.metadata.xSize,
mapState.map.metadata.ySize)
}
mapStates.update(mapName,
mapState.copy(mapDataOpt = Some(mapData)))
mapData
}
}
def setMapData(mapName: String, mapData: RpgMapData) = {
mapStates.update(mapName,
mapStates.get(mapName).get.copy(
mapDataOpt = Some(mapData), dirty = Dirtiness.Dirty))
mainPanel.updateDirty(this)
}
}
| toastythought/rpgboss | editor/src/main/scala/rpgboss/editor/StateMaster.scala | Scala | agpl-3.0 | 4,175 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600e.v3.retriever.CT600EBoxRetriever
case class E150(value: Option[Int]) extends CtBoxIdentifier("Disposals in period: Shares in, and loans to, controlled companies") with CtOptionalInteger with Input with ValidatableBox[CT600EBoxRetriever]{
override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = validateZeroOrPositiveInteger(this)
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v3/E150.scala | Scala | apache-2.0 | 1,053 |
package poxelcoll.geometry.convexccwpolygon
/* Copyright (C) 2012 Jens W.-Møller
* All rights reserved.
*
* This file is part of Poxelcoll.
*
* Poxelcoll is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Poxelcoll is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Poxelcoll. If not, see <http://www.gnu.org/licenses/>.
*/
import poxelcoll.P
import poxelcoll.BoundingBox
/** Supports operations for finding the intersection between two polygons.
*
* The efficiency for finding the intersection is intended to be linear in the
* size of the polygons points.
*
* ==Status==
*
* The current implementation as of 0.1 is meant to be geometrically robust,
* but gives no guarantees in regards to being numerically robust.
* The consequences of the lack of numerical robustness is unknown,
* but may range from imprecision to undefined behaviour.
* The numerical robustness may be improved in the future.
*/
object PolygonIntersection {
import GeneralFunctions._
/** Extracts the bounds from the given boundingBox option if present, else derives them from the given, non-empty points.
*
* @param polyPoints non-empty points
* @param boundingBox bounding box option
* @return the bounding box of the given polygon and bounding box option, or possibly undefined behaviour if points is empty
*/
private[this] def getBounds(polyPoints:Seq[P], boundingBox:Option[(P, P)]) = boundingBox match {
case Some(a) => a
case None => {
val polyPointsX = polyPoints.map(_.x)
val polyPointsY = polyPoints.map(_.y)
(P(polyPointsX.min, polyPointsY.min), P(polyPointsX.max, polyPointsY.max))
}
}
/** A function that returns the leftmost point-index of the two, and if equally leftmost, the uppermost.
*
* Behaviour is undefined if the point-index pair has the same point. This should never happen.
*
* @return a function that finds the leftmost, upper point-index. The index corresponsd to the given point
*/
private[this] def chooseLeftmostUpperPoint = ((oldPointIndex:(P, Int), newPointIndex:(P, Int)) =>
if (newPointIndex._1.x > oldPointIndex._1.x || (newPointIndex._1.x == oldPointIndex._1.x && newPointIndex._1.y < oldPointIndex._1.y))
oldPointIndex
else newPointIndex
)
/** Given a non-empty point sequence, find the bounding box.
*
* Behaviour is undefined if the point sequence is empty.
*
* @param polyPoints a non-empty point sequence
* @return bounding box of the non-empty point sequence, or undefined if empty
*/
private[this] def bBoxNonemptyPolygon(polyPoints:IndexedSeq[P]) = {
val polyPointsX = polyPoints.map(_.x)
val polyPointsY = polyPoints.map(_.y)
BoundingBox(P(polyPointsX.min, polyPointsY.min), P(polyPointsX.max, polyPointsY.max))
}
/** Finds the intersection between a point and a polygon.
*
* @param point the point
* @param poly the polygon
* @return the intersection
*/
private[this] def handlePointPolygon(point:Point, poly:Polygon):EmptyPoint = {
val polyPlusHead = (poly.p1 :: poly.p2 :: poly.p3 :: poly.rest.toList) :+ poly.p1
def goThroughPoly(polys:List[P]):Boolean = polys match {
case x1::x2::xs => {
val v1 = x2 - x1
val v2 = point.p - x1
(v1 X v2) >= 0.0 && goThroughPoly(x2::xs)
}
case x::xs => true
case Nil => true
}
if (goThroughPoly(polyPlusHead)) point else Empty
}
/** Finds the intersection between a line and a polygon.
*
* @param line the line
* @param poly the polygon
* @return the intersection
*/
private[this] def handleLinePoly(line:Line, poly:Polygon) = {
//Collide all line-segment pairs, and return the results.
val p11 = line.p1
val p12 = line.p2
val polygonListPlusHead = (poly.p1 :: poly.p2 :: poly.p3 :: poly.rest.toList) :+ poly.p1 //NOTE: Add head.
def collideAll(polyList:List[P], res:List[EmptyPointLine]):List[EmptyPointLine] = polyList match {
case x1::x2::xs => {
val p21 = x1
val p22 = x2
val collisionResult = handleLineLine(p11, p12, p21, p22)
collisionResult match {
case Empty => collideAll(x2::xs, res)
case pRes @ Point(_) => collideAll(x2::xs, res :+ pRes)
case lineRes @ Line(_, _) => List(lineRes)
}
}
case x::xs => res
case Nil => res
}
val insideEnds = {
List(handlePointPolygon(Point(p11), poly)) ++
List(handlePointPolygon(Point(p12), poly))
}
val collisions = collideAll(polygonListPlusHead, List.empty)
val finalResult = collisions match {
case List(a@Line(_, _)) => a
case _ => {
//Make a set out of the points, thereby removing duplicates.
val matchOnlyPoint = {case b:Point => b}: PartialFunction[EmptyPointLine, Point]
val finalFinalRes = collisions ++ insideEnds
finalFinalRes.toSet.collect(matchOnlyPoint).take(2).toList match {
case List(a, b) => Line.create(a.p, b.p)
case List(a) => Point(a.p)
case _ => Empty
}
}
}
Right(finalResult)
}
/** Finds the intersection between two polygons, that may be full or not-full.
*
* @param poly1 the first polygon
* @param poly2 the second polygon
* @param poly1Full whether the first polygon is full
* @param poly2Full whether the second polygon is full
* @param poly1ApproxBoundingBox the optional bounding box of the first polygon, to avoid possible recalculation
* @param poly2ApproxBoundingBox the optional bounding box of the second polygon, to avoid possible recalculation
* @return the intersection of the polygons
*/
def intersection(poly1:ConvexCCWPolygon, poly2:ConvexCCWPolygon, poly1Full:Boolean, poly2Full:Boolean,
poly1ApproxBoundingBox:Option[BoundingBox] = None, poly2ApproxBoundingBox:Option[BoundingBox] = None):Either[Boolean, ConvexCCWPolygon] = {
val poly1Points = poly1.points
val poly2Points = poly2.points
val boundingBoxesIntersect = {
(poly1, poly2) match { //Ensure that neither of the polygons are empty.
case (Empty, _) | (_, Empty) => false
case _ => {
//Match with the approximate bounding box if existing, and if not or no approximate, check actual bounding box.
(poly1ApproxBoundingBox, poly2ApproxBoundingBox) match {
case (None, None) => {
bBoxNonemptyPolygon(poly1Points) intersects bBoxNonemptyPolygon(poly2Points)
}
case (Some(approx1), None) => {
val boundingBox2 = bBoxNonemptyPolygon(poly2Points)
(approx1 intersects boundingBox2) && (bBoxNonemptyPolygon(poly1Points) intersects boundingBox2)
}
case (None, Some(approx2)) => {
val boundingBox1 = bBoxNonemptyPolygon(poly1Points)
(boundingBox1 intersects approx2) && (boundingBox1 intersects bBoxNonemptyPolygon(poly2Points))
}
case (Some(approx1), Some(approx2)) => {
(approx1 intersects approx2) && (bBoxNonemptyPolygon(poly1Points) intersects bBoxNonemptyPolygon(poly2Points))
}
}
}
}
}
if (!boundingBoxesIntersect) {
Left(false)
}
else {
(poly1, poly2) match {
case (po1:Polygon, po2:Polygon) => {
val bothFull = poly1Full && poly2Full
val (originIndex1, originIndex2) = {
val originIndex1 = poly1Points.zip(0 until poly1Points.length).foldLeft(po1.p1, 0)(chooseLeftmostUpperPoint)._2
val originIndex2 = poly2Points.zip(0 until poly2Points.length).foldLeft(po2.p1, 0)(chooseLeftmostUpperPoint)._2
(originIndex1, originIndex2)
}
val collisionSegmentsFinder = new CollisionSegmentsFinder(poly1Points, poly2Points, originIndex1, originIndex2)
val collisionSegmentsO = collisionSegmentsFinder.getCollisionSegments
val intersectionConstructionResult = collisionSegmentsO match {
case None => {
//No need to check for one polygon inside the other,
//since none means that there is no polygon intersection at all.
Empty
}
case Some(collisionSegments) => {
val intersectionFinder = new IntersectionFromCollisionSegments(collisionSegments, poly1Points, poly2Points)
val result = intersectionFinder.getIntersectionFromCollisionSegments
result
}
}
Right(intersectionConstructionResult)
}
case (line@Line(_, _), poly@Polygon(_, _, _, _)) => {
handleLinePoly(line, poly)
}
case (poly@Polygon(_, _, _, _), line@Line(_, _)) => {
handleLinePoly(line, poly)
}
case (point:Point, poly:Polygon) => Right(handlePointPolygon(point, poly))
case (poly:Polygon, point:Point) => Right(handlePointPolygon(point, poly))
case (Line(p11, p12), Line(p21, p22)) => {
Right(handleLineLine(p11, p12, p21, p22))
}
case (line:Line, point:Point) => Right(handlePointLine(point, line))
case (point:Point, line:Line) => Right(handlePointLine(point, line))
case (Point(a), Point(b)) => {
Right(if (a == b) Point(a) else Empty)
}
case (Empty, _) | (_, Empty) => {
Right(Empty)
}
}
}
}
}
| Poxelcoll/Poxelcoll | Scala/src/main/scala/src/poxelcoll/geometry/convexccwpolygon/PolygonIntersection.scala | Scala | gpl-3.0 | 9,938 |
package org.openjdk.jmh.samples
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations.{Benchmark, BenchmarkMode, CompilerControl, Fork, Measurement, Mode, OutputTimeUnit, Param, Scope, Setup, State, Warmup}
import org.openjdk.jmh.infra.Blackhole
object JMHSample_34_SafeLooping {
/*
* JMHSample_11_Loops warns about the dangers of using loops in @Benchmark methods.
* Sometimes, however, one needs to traverse through several elements in a dataset.
* This is hard to do without loops, and therefore we need to devise a scheme for
* safe looping.
*/
/*
* Suppose we want to measure how much it takes to execute work() with different
* arguments. This mimics a frequent use case when multiple instances with the same
* implementation, but different data, is measured.
*/
}
@State(Scope.Thread)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(3)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
class JMHSample_34_SafeLooping {
@Param(Array("1", "10", "100", "1000"))
var size: Int = _
var xs: Array[Int] = _
val BASE = 42
def work(x: Int): Int = BASE + x
@CompilerControl(CompilerControl.Mode.DONT_INLINE)
def sink(v: Int): Unit = {
}
@Setup
def setup(): Unit = {
xs = Array.ofDim[Int](size)
for (c <- 0 until size) {
xs(c) = c
}
}
@Benchmark
def measureWrong_1(): Int = {
var acc = 0
for (x <- xs) {
acc = work(x)
}
acc
}
@Benchmark
def measureWrong_2(): Int = {
var acc = 0
for (x <- xs) {
acc += work(x)
}
acc
}
@Benchmark
def measureRight_1(bh: Blackhole): Unit = {
for (x <- xs) {
bh.consume(work(x))
}
}
@Benchmark
def measureRight_2(): Unit = {
for (x <- xs) {
sink(work(x))
}
}
}
| ktoso/sbt-jmh | plugin/src/sbt-test/sbt-jmh/run/src/main/scala/org/openjdk/jmh/samples/JMHSample_34_SafeLooping.scala | Scala | apache-2.0 | 1,906 |
package io.scarman.spotify.request
import io.scarman.spotify.http.{Authorization, HttpRequest}
import io.scarman.spotify.response.{Category => CategoryResp}
import sttp.client._
import sttp.model.Uri
case class Category(categoryId: String, country: Option[String] = None, locale: Option[String] = None)(
implicit auth: Authorization,
backend: Backend
) extends HttpRequest[CategoryResp] {
lazy protected val reqUri: Uri =
uri"$base/browse/categories/$categoryId?country=$country&locale=$locale"
}
| hntd187/spotify | core/shared/src/main/scala/io/scarman/spotify/request/Category.scala | Scala | apache-2.0 | 514 |
import sbt._
class SbtEclipseUtilsPlugin(info: ProjectInfo) extends PluginProject(info) {
override def managedStyle = ManagedStyle.Maven
lazy val publishTo = {
val suffix = if (version.toString.trim.endsWith("SNAPSHOT")) {
"snapshots/"
} else {
"releases/"
}
val path = "../ashewring.github.com/maven/" + suffix
Resolver.file("GitHub Pages", new java.io.File(path))
}
val sbtIdeaRepo = "sbt-idea-repo" at "http://mpeltonen.github.com/maven/"
val sbtIdea = "com.github.mpeltonen" % "sbt-idea-processor_2.7.7" % "0.4.0"
}
| ashewring/sbt-eclipse-utils | project/build/SbtEclipseUtilsPlugin.scala | Scala | bsd-3-clause | 544 |
package dummy
/**
* This is another basic comment
*/
class /*s*/pC/*e*/
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/test-workspace/pc_doc/src/packaged.scala | Scala | bsd-3-clause | 75 |
/*
* DocumentHandler.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite
import java.net.URI
import de.sciss.lucre.Txn
import de.sciss.mellite.impl.DocumentHandlerImpl
import de.sciss.model.Model
import de.sciss.proc.Universe
object DocumentHandler {
type Document = Universe[_] // Universe[_ <: Txn[_]]
lazy val instance: DocumentHandler =
DocumentHandlerImpl()
sealed trait Update
final case class Opened[T <: Txn[T]](u: Universe[T]) extends Update
final case class Closed[T <: Txn[T]](u: Universe[T]) extends Update
}
/** Note: the model dispatches not on the EDT. Listeners
* requiring to execute code on the EDT should use a
* wrapper like `defer` (LucreSwing).
*/
trait DocumentHandler extends Model[DocumentHandler.Update] {
import DocumentHandler.Document
def addDocument[T <: Txn[T]](universe: Universe[T])(implicit tx: T): Unit
def allDocuments: Iterator[Document]
def getDocument(folder: URI): Option[Document]
def isEmpty: Boolean
} | Sciss/Mellite | core/src/main/scala/de/sciss/mellite/DocumentHandler.scala | Scala | agpl-3.0 | 1,229 |
/*
* Ported from https://github.com/junit-team/junit
*/
package org.junit
object ComparisonFailure {
private final val MAX_CONTEXT_LENGTH = 20
private class ComparisonCompactor(private val expected: String,
private val actual: String) {
private val ELLIPSIS: String = "..."
private val DIFF_END: String = "]"
private val DIFF_START: String = "["
def compact(message: String): String = {
if (expected == null || actual == null || expected.equals(actual)) {
Assert.format(message, expected, actual)
} else {
val extractor = new DiffExtractor()
val compactedPrefix = extractor.compactPrefix()
val compactedSuffix = extractor.compactSuffix()
Assert.format(message,
compactedPrefix + extractor.expectedDiff() + compactedSuffix,
compactedPrefix + extractor.actualDiff() + compactedSuffix)
}
}
private[junit] def sharedPrefix(): String = {
val end: Int = Math.min(expected.length, actual.length)
(0 until end).find(i => expected.charAt(i) != actual.charAt(i))
.fold(expected.substring(0, end))(expected.substring(0, _))
}
private def sharedSuffix(prefix: String): String = {
var suffixLength = 0
var maxSuffixLength = Math.min(expected.length() - prefix.length(),
actual.length() - prefix.length()) - 1
while (suffixLength <= maxSuffixLength) {
if (expected.charAt(expected.length() - 1 - suffixLength)
!= actual.charAt(actual.length() - 1 - suffixLength)) {
maxSuffixLength = suffixLength - 1 // break
}
suffixLength += 1
}
expected.substring(expected.length() - suffixLength)
}
private class DiffExtractor {
private val _sharedPrefix: String = sharedPrefix()
private val _sharedSuffix: String = sharedSuffix(_sharedPrefix)
def expectedDiff(): String = extractDiff(expected)
def actualDiff(): String = extractDiff(actual)
def compactPrefix(): String = {
if (_sharedPrefix.length() <= MAX_CONTEXT_LENGTH)
_sharedPrefix
else
ELLIPSIS + _sharedPrefix.substring(_sharedPrefix.length() - MAX_CONTEXT_LENGTH)
}
def compactSuffix(): String = {
if (_sharedSuffix.length() <= MAX_CONTEXT_LENGTH)
_sharedSuffix
else
_sharedSuffix.substring(0, MAX_CONTEXT_LENGTH) + ELLIPSIS
}
private def extractDiff(source: String): String = {
val sub = source.substring(_sharedPrefix.length(),
source.length() - _sharedSuffix.length())
DIFF_START + sub + DIFF_END
}
}
}
}
class ComparisonFailure(message: String, fExpected: String, fActual: String)
extends AssertionError(message) {
import ComparisonFailure._
override def getMessage(): String = {
val cc = new ComparisonCompactor(fExpected, fActual)
cc.compact(super.getMessage)
}
def getActual(): String = fActual
def getExpected(): String = fExpected
}
| lrytz/scala-js | junit-runtime/src/main/scala/org/junit/ComparisonFailure.scala | Scala | bsd-3-clause | 3,011 |
trait Comparinator[T] {
def sort[T](x: Comparinator[_ >: T]) = ()
sort((a: Int) => true) // error
}
trait Comparinator2[T >: U, U] {
def sort[TT](x: Comparinator2[_ >: TT, U]) = ()
sort((a: Int) => true) // error
}
| som-snytt/dotty | tests/neg-custom-args/i3627.scala | Scala | apache-2.0 | 224 |
package io.hydrosphere.mist.master.data
import java.nio.file.Paths
import com.typesafe.config.{Config, ConfigValueFactory}
import io.hydrosphere.mist.master.models.NamedConfig
import org.apache.commons.io.FileUtils
import org.scalatest._
class FStorageSpec extends FunSpec with Matchers with BeforeAndAfter {
case class TestEntry(
name: String,
value: Int
) extends NamedConfig
val testEntryConfigRepr = new ConfigRepr[TestEntry] {
import scala.collection.JavaConverters._
override def fromConfig(config: Config): TestEntry = {
TestEntry(config.getString("name"), config.getInt("value"))
}
override def toConfig(a: TestEntry): Config = {
val map = Map("value" -> ConfigValueFactory.fromAnyRef(a.value))
ConfigValueFactory.fromMap(map.asJava).toConfig
}
}
val path = "./target/file_store_test"
before {
val f = Paths.get(path).toFile
if (f.exists()) FileUtils.deleteDirectory(f)
}
it("should store files") {
val storage = FsStorage.create(path, testEntryConfigRepr)
storage.write("one", TestEntry("one", 1))
storage.write("two", TestEntry("two", 2))
storage.entries should contain allOf(
TestEntry("one", 1),
TestEntry("two", 2)
)
storage.delete("one")
storage.entries should contain allElementsOf(Seq(TestEntry("two", 2)))
}
}
| Hydrospheredata/mist | mist/master/src/test/scala/io/hydrosphere/mist/master/data/FStorageSpec.scala | Scala | apache-2.0 | 1,353 |
package Gvote
object ScrutinCST {
val uninominal = "UNINOMINAL";
val plurinominal = "PLURINOMINAL";
val semiProportionnel = "SEMI_PROPORTIONNEL";
val proportionnel = "PROPORTIONNEL";
val condorcet = "CONDORCET";
val public = "public";
val prive = "prive";
def paramUninominal(nbTour : Int, nbGagnantParTour : List[Int], visibilite : String) : ModeScrutin = {
return new ModeScrutin(uninominal,nbTour,nbGagnantParTour,visibilite)
}
// Pour le semi proportionnel liste de gagnant par tour egale nombre de sieges
/*
* Cette fonction est cree pour le semi proportionnel car on n'a bseoin de savoi
* le nombre de siege a pourvoir
*/
def paramSemiProportionnel(nbGagnant : Int, visibilite : String) : ModeScrutin = {
return new ModeScrutin(semiProportionnel,1,List(nbGagnant),visibilite);
}
def paramCondorcet(nbGagnant : Int, visibilite : String) : ModeScrutin = {
return new ModeScrutin(condorcet,1,List(nbGagnant),visibilite)
}
def paramProportionnel(nbGagnant : Int, visibilite : String) : ModeScrutin = {
return new ModeScrutin(proportionnel,1,List(nbGagnant),visibilite)
}
def paramPlurinominale(nbTour : Int , listgagnantTour:List[Int]) : ModeScrutin =
return new ModeScrutin(plurinominal, 2, listgagnantTour, prive)
}
| DoumbiaAmadou/FrameworkVote | src/Gvote/ScrutinCST.scala | Scala | mit | 1,280 |
package org.jetbrains.plugins.scala
package lang
package resolve
import com.intellij.lang.java.JavaLanguage
import com.intellij.psi._
import com.intellij.psi.impl.source.resolve.JavaResolveUtil
import com.intellij.psi.scope.{NameHint, PsiScopeProcessor}
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScBindingPattern
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScSelfTypeElement, ScTypeElement, ScTypeVariableTypeElement}
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScAccessModifier, ScFieldId, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScSuperReference, ScThisReference}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.fake.FakePsiMethod
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.{ScSyntheticClass, ScSyntheticValue}
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers
import org.jetbrains.plugins.scala.lang.psi.impl.{ScPackageImpl, ScalaPsiManager}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.{Any, FunctionType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.{ScalaPsiElement, ScalaPsiUtil}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.lang.resolve.ResolveTargets._
import org.jetbrains.plugins.scala.lang.resolve.processor.{BaseProcessor, ResolveProcessor, ResolverEnv}
import _root_.scala.collection.Set
/**
* @author ven
*/
object ResolveUtils {
def kindMatches(element: PsiElement, kinds: Set[ResolveTargets.Value]): Boolean = kinds == null ||
(element match {
case _: PsiPackage | _: ScPackaging => kinds contains PACKAGE
case obj: ScObject if obj.isPackageObject => kinds contains PACKAGE
case obj: ScObject => (kinds contains OBJECT) || (kinds contains METHOD)
case _: ScTypeVariableTypeElement => kinds contains CLASS
case _: ScTypeParam => kinds contains CLASS
case _: ScTypeAlias => kinds contains CLASS
case _: ScTypeDefinition => kinds contains CLASS
case _: ScSyntheticClass => kinds contains CLASS
case c: PsiClass =>
if (kinds contains CLASS) true
else {
def isStaticCorrect(clazz: PsiClass): Boolean = {
val cclazz = clazz.getContainingClass
cclazz == null || (clazz.hasModifierProperty(PsiModifier.STATIC) && isStaticCorrect(cclazz))
}
(kinds contains OBJECT) && isStaticCorrect(c)
}
case patt: ScBindingPattern =>
val parent = ScalaPsiUtil.getParentOfType(patt, classOf[ScVariable], classOf[ScValue])
parent match {
case x: ScVariable => kinds contains VAR
case _ => kinds contains VAL
}
case patt: ScFieldId =>
if (patt.getParent /*list of ids*/ .getParent.isInstanceOf[ScVariable])
kinds contains VAR else kinds contains VAL
case classParam: ScClassParameter =>
if (classParam.isVar) kinds.contains(VAR) else kinds.contains(VAL)
case param: ScParameter => kinds contains VAL
case _: ScSelfTypeElement => kinds contains VAL
case _: PsiMethod => kinds contains METHOD
case _: ScFun => kinds contains METHOD
case _: ScSyntheticValue => kinds contains VAL
case f: PsiField => (kinds contains VAR) || (f.hasModifierPropertyScala(PsiModifier.FINAL) && kinds.contains(VAL))
case _: PsiParameter => kinds contains VAL //to enable named Parameters resolve in Play 2.0 routing file for java methods
case _ => false
})
def methodType(m : PsiMethod, s : ScSubstitutor, scope: GlobalSearchScope) =
FunctionType(s.subst(m.getReturnType.toScType(m.getProject, scope)),
m.getParameterList.getParameters.map({
p => val pt = p.getType
//scala hack: Objects in java are modelled as Any in scala
if (pt.equalsToText("java.lang.Object")) Any
else s.subst(pt.toScType(m.getProject, scope))
}).toSeq)(m.getProject, scope)
def javaMethodType(m: PsiMethod, s: ScSubstitutor, scope: GlobalSearchScope, returnType: Option[ScType] = None): ScMethodType = {
val retType: ScType = (m, returnType) match {
case (f: FakePsiMethod, None) => s.subst(f.retType)
case (_, None) => s.subst(m.getReturnType.toScType(m.getProject, scope))
case (_, Some(x)) => x
}
new ScMethodType(retType,
m match {
case f: FakePsiMethod => f.params.toSeq
case _ =>
m.getParameterList.getParameters.map { param =>
val scType = s.subst(param.exactParamType())
new Parameter("", None, scType, scType, false, param.isVarArgs, false, param.index, Some(param))
}
}, false)(m.getProject, scope)
}
def javaPolymorphicType(m: PsiMethod, s: ScSubstitutor, scope: GlobalSearchScope = null, returnType: Option[ScType] = None): NonValueType = {
if (m.getTypeParameters.isEmpty) javaMethodType(m, s, scope, returnType)
else {
ScTypePolymorphicType(javaMethodType(m, s, scope, returnType), m.getTypeParameters.map(new TypeParameter(_)))(m.typeSystem)
}
}
def isAccessible(memb: PsiMember, _place: PsiElement, forCompletion: Boolean = false): Boolean = {
var place = _place
memb match {
case b: ScBindingPattern =>
b.nameContext match {
case memb: ScMember => return isAccessible(memb, place)
case _ => return true
}
//todo: ugly workaround, probably FakePsiMethod is better to remove?
case f: FakePsiMethod => f.navElement match {
case memb: PsiMember => return isAccessible(memb, place)
case _ =>
}
case _ =>
}
if (place.getLanguage == JavaLanguage.INSTANCE) {
return JavaResolveUtil.isAccessible(memb, memb.containingClass, memb.getModifierList, place, null, null)
}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil.getPlaceTd
//this is to make place and member on same level (resolve from library source)
var member: PsiMember = memb
memb.getContainingFile match {
case file: ScalaFile if file.isCompiled =>
place.getContainingFile match {
case file: ScalaFile if file.isCompiled =>
case _ if !member.isInstanceOf[ScMember] =>
member = member.getOriginalElement.asInstanceOf[PsiMember]
case _ => //todo: is it neccessary? added to avoid performance and other problems
}
case _ =>
}
if (forCompletion && place != null) {
val originalFile: PsiFile = place.getContainingFile.getOriginalFile
if (originalFile == member.getContainingFile) {
val newPlace = originalFile.findElementAt(place.getTextRange.getStartOffset)
place = newPlace
}
}
member match {
case f: ScFunction if f.isBridge => return false
case _ =>
}
def checkProtected(td: PsiClass, withCompanion: Boolean): Boolean = {
val isConstr = member match {
case m: PsiMethod => m.isConstructor
case _ => false
}
var placeTd: ScTemplateDefinition = getPlaceTd(place, isConstr)
if (isConstr) {
if (placeTd != null && !placeTd.isInstanceOf[ScTypeDefinition] && placeTd.extendsBlock.templateBody == None) {
placeTd = getPlaceTd(placeTd)
} else if (placeTd != null) {
if (td != null && isInheritorOrSelfOrSame(placeTd, td)) return true
}
while (placeTd != null) {
if (td == placeTd) return true
val companion: ScTemplateDefinition = ScalaPsiUtil.getCompanionModule(placeTd).getOrElse(null: ScTemplateDefinition)
if (companion != null && companion == td) return true
placeTd = getPlaceTd(placeTd)
}
return false
}
while (placeTd != null) {
if (td != null && isInheritorOrSelfOrSame(placeTd, td)) return true
val companion: ScTemplateDefinition = ScalaPsiUtil.
getCompanionModule(placeTd).getOrElse(null: ScTemplateDefinition)
if (withCompanion && companion != null && td != null &&
ScalaPsiUtil.cachedDeepIsInheritor(companion, td)) return true
placeTd = getPlaceTd(placeTd)
}
false
}
member match {
case scMember: ScMember => scMember.getModifierList.accessModifier match {
case None => true
case Some(am: ScAccessModifier) =>
if (am.isPrivate) {
if (am.access == ScAccessModifier.Type.THIS_PRIVATE) {
/*
ScalaRefernce.pdf:
A member M marked with this modifier can be accessed only from
within the object in which it is defined.
*/
place match {
case ref: ScReferenceElement =>
ref.qualifier match {
case None =>
val enclosing = PsiTreeUtil.getContextOfType(scMember, true, classOf[ScTemplateDefinition])
if (enclosing == null) return true
return PsiTreeUtil.isContextAncestor(enclosing, place, false)
case Some(t: ScThisReference) =>
val enclosing = PsiTreeUtil.getContextOfType(scMember, true, classOf[ScTemplateDefinition])
if (enclosing == null) return true
t.refTemplate match {
case Some(t) => return t == enclosing
case _ => return PsiTreeUtil.isContextAncestor(enclosing, place, false)
}
case Some(ref: ScReferenceElement) =>
val enclosing = PsiTreeUtil.getContextOfType(scMember, true, classOf[ScTemplateDefinition])
if (enclosing == null) return false
val resolve = ref.resolve()
if (enclosing.extendsBlock.selfTypeElement == Some(resolve)) return true
else return false
case _ => return false
}
case _ =>
val enclosing = PsiTreeUtil.getContextOfType(scMember, true, classOf[ScTemplateDefinition])
if (enclosing == null) return true
return PsiTreeUtil.isContextAncestor(enclosing, place, false)
}
}
val ref = am.getReference
if (ref != null) {
val bind = ref.resolve
if (bind == null) return true
def processPackage(packageName: String): Boolean = {
def context(place: PsiElement): PsiElement =
ScalaPsiUtil.getContextOfType(place, true, classOf[ScPackaging],
classOf[ScObject], classOf[ScalaFile])
var placeEnclosing: PsiElement = context(place)
while (placeEnclosing != null && placeEnclosing.isInstanceOf[ScObject] &&
!placeEnclosing.asInstanceOf[ScObject].isPackageObject)
placeEnclosing = context(placeEnclosing)
if (placeEnclosing == null) return false //not Scala
val placePackageName = placeEnclosing match {
case file: ScalaFile => ""
case obj: ScObject => obj.qualifiedName
case pack: ScPackaging => pack.fqn
}
packageContains(packageName, placePackageName)
}
bind match {
case td: ScTemplateDefinition =>
PsiTreeUtil.isContextAncestor(td, place, false) ||
PsiTreeUtil.isContextAncestor(ScalaPsiUtil.getCompanionModule(td).getOrElse(null: PsiElement),
place, false) || (td.isInstanceOf[ScObject] &&
td.asInstanceOf[ScObject].isPackageObject && processPackage(td.qualifiedName))
case pack: PsiPackage =>
val packageName = pack.getQualifiedName
processPackage(packageName)
case _ => true
}
}
else {
/*
ScalaRefernce.pdf:
Such members can be accessed only from within the directly enclosing
template and its companion module or companion class
*/
val enclosing = ScalaPsiUtil.getContextOfType(scMember, true,
classOf[ScalaFile], classOf[ScPackaging], classOf[ScTemplateDefinition])
enclosing match {
case td: ScTemplateDefinition =>
PsiTreeUtil.isContextAncestor(td, place, false) || PsiTreeUtil.isContextAncestor(ScalaPsiUtil.
getCompanionModule(td).getOrElse(null: PsiElement), place, false)
case file: ScalaFile if file.isScriptFile() =>
PsiTreeUtil.isContextAncestor(file, place, false)
case _ =>
val packageName = enclosing match {
case file: ScalaFile => ""
case packaging: ScPackaging => packaging.getPackageName
case _ => ""
}
val placeEnclosing: PsiElement = ScalaPsiUtil.
getContextOfType(place, true, classOf[ScPackaging], classOf[ScalaFile])
if (placeEnclosing == null) return false //not Scala
val placePackageName = placeEnclosing match {
case file: ScalaFile => ""
case pack: ScPackaging => pack.getPackageName
}
packageContains(packageName, placePackageName)
}
}
} else if (am.isProtected) { //todo: it's wrong if reference after not appropriate class type
val withCompanion = am.access != ScAccessModifier.Type.THIS_PROTECTED
val ref = am.getReference
if (ref != null) {
val bind = ref.resolve
if (bind == null) return true
def processPackage(packageName: String): Option[Boolean] = {
def context(place: PsiElement): PsiElement =
ScalaPsiUtil.getContextOfType(place, true, classOf[ScPackaging],
classOf[ScObject], classOf[ScalaFile])
var placeEnclosing: PsiElement = context(place)
while (placeEnclosing != null && placeEnclosing.isInstanceOf[ScObject] &&
!placeEnclosing.asInstanceOf[ScObject].isPackageObject)
placeEnclosing = context(placeEnclosing)
if (placeEnclosing == null) return Some(false) //not Scala
val placePackageName = placeEnclosing match {
case file: ScalaFile => ""
case obj: ScObject => obj.qualifiedName
case pack: ScPackaging => pack.fqn
}
if (packageContains(packageName, placePackageName)) return Some(true)
None
}
bind match {
case td: ScTemplateDefinition =>
if (PsiTreeUtil.isContextAncestor(td, place, false) || PsiTreeUtil.isContextAncestor(ScalaPsiUtil.
getCompanionModule(td).getOrElse(null: PsiElement), place, false)) return true
td match {
case o: ScObject if o.isPackageObject =>
processPackage(o.qualifiedName) match {
case Some(x) => return x
case None =>
}
case _ =>
}
case pack: PsiPackage => //like private (nothing related to real life)
val packageName = pack.getQualifiedName
processPackage(packageName) match {
case Some(x) => return x
case None =>
}
case _ => return true
}
}
val enclosing = ScalaPsiUtil.getContextOfType(scMember, true,
classOf[ScalaFile], classOf[ScTemplateDefinition], classOf[ScPackaging])
assert(enclosing != null, s"Enclosing is null in file ${scMember.getContainingFile.getName}:\n${scMember.getContainingFile.getText}")
if (am.isThis) {
place match {
case ref: ScReferenceElement =>
ref.qualifier match {
case None =>
case Some(t: ScThisReference) =>
case Some(s: ScSuperReference) =>
case Some(ref: ScReferenceElement) =>
val enclosing = PsiTreeUtil.getContextOfType(scMember, true, classOf[ScTemplateDefinition])
if (enclosing == null) return false
val resolve = ref.resolve()
if (enclosing.extendsBlock.selfTypeElement != Some(resolve)) return false
case _ => return false
}
case _ =>
}
}
enclosing match {
case td: ScTypeDefinition =>
if (PsiTreeUtil.isContextAncestor(td, place, false) ||
(withCompanion && PsiTreeUtil.isContextAncestor(ScalaPsiUtil.getCompanionModule(td).
getOrElse(null: PsiElement), place, false))) return true
checkProtected(td, withCompanion)
case td: ScTemplateDefinition =>
//it'd anonymous class, has access only inside
PsiTreeUtil.isContextAncestor(td, place, false)
case _ =>
//same as for private
val packageName = enclosing match {
case file: ScalaFile => ""
case packaging: ScPackaging => packaging.fullPackageName
}
val placeEnclosing: PsiElement = ScalaPsiUtil.
getContextOfType(place, true, classOf[ScPackaging], classOf[ScalaFile])
if (placeEnclosing == null) return false //not Scala
val placePackageName = placeEnclosing match {
case file: ScalaFile => ""
case pack: ScPackaging => pack.fullPackageName
}
packageContains(packageName, placePackageName)
}
} else true
}
case _ =>
if (member.hasModifierProperty("public")) true
else if (member.hasModifierProperty("private")) false
else if (member.hasModifierProperty("protected") &&
checkProtected(member.containingClass, withCompanion = true)) true
else {
val packageName = member.getContainingFile match {
case s: ScalaFile => ""
case f: PsiClassOwner => f.getPackageName
case _ => return false
}
val placeEnclosing: PsiElement = ScalaPsiUtil.
getContextOfType(place, true, classOf[ScPackaging], classOf[ScalaFile])
if (placeEnclosing == null) return false
val placePackageName = placeEnclosing match {
case file: ScalaFile => ""
case pack: ScPackaging => pack.fullPackageName
}
packageContains(packageName, placePackageName)
}
}
}
def processSuperReference(superRef: ScSuperReference, processor : BaseProcessor, place : ScalaPsiElement) {
if (superRef.isHardCoded) {
superRef.drvTemplate match {
case Some(c) => processor.processType(ScThisType(c), place)
case None =>
}
} else {
superRef.staticSuper match {
case Some(t) => processor.processType(t, place)
case None => superRef.drvTemplate match {
case Some(c) =>
TypeDefinitionMembers.processSuperDeclarations(c, processor, ResolveState.initial.put(ScSubstitutor.key, ScSubstitutor.empty), null, place)
case None =>
}
}
}
}
def getPlacePackage(place: PsiElement): String = {
val pack: ScPackaging = ScalaPsiUtil.getContextOfType(place, true, classOf[ScPackaging]) match {
case pack: ScPackaging => pack
case _ => null
}
if (pack == null) return ""
pack.fullPackageName
}
private def isInheritorOrSelfOrSame(placeTd: ScTemplateDefinition, td: PsiClass): Boolean = {
if (ScalaPsiUtil.cachedDeepIsInheritor(placeTd, td)) return true
placeTd.selfTypeElement match {
case Some(te: ScSelfTypeElement) => te.typeElement match {
case Some(te: ScTypeElement) =>
def isInheritorOrSame(tp: ScType): Boolean = {
tp.extractClass()(te.typeSystem) match {
case Some(clazz) =>
if (clazz == td) return true
if (ScalaPsiUtil.cachedDeepIsInheritor(clazz, td)) return true
case _ =>
}
false
}
te.getType(TypingContext.empty) match {
case Success(ctp: ScCompoundType, _) =>
for (tp <- ctp.components) {
if (isInheritorOrSame(tp)) return true
}
case Success(tp: ScType, _) =>
if (isInheritorOrSame(tp)) return true
case _ =>
}
case _ =>
}
case _ =>
}
false
}
def packageContains(packageName: String, potentialChild: String): Boolean = {
potentialChild == packageName || potentialChild.startsWith(packageName + ".")
}
def packageProcessDeclarations(pack: PsiPackage, processor: PsiScopeProcessor,
state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = {
processor match {
case b: BaseProcessor if b.isImplicitProcessor =>
val objectsIterator = ScalaPsiManager.instance(pack.getProject).
getPackageImplicitObjects(pack.getQualifiedName, place.getResolveScope).iterator
while (objectsIterator.hasNext) {
val obj = objectsIterator.next()
if (!processor.execute(obj, state)) return false
}
true
case base: BaseProcessor =>
val nameHint = base.getHint(NameHint.KEY)
val name = if (nameHint == null) "" else nameHint.getName(state)
if (name != null && name != "" && base.getClassKind) {
try {
base.setClassKind(classKind = false)
if (base.getClassKindInner) {
val manager = ScalaPsiManager.instance(pack.getProject)
val qName = pack.getQualifiedName
def calcForName(name: String): Boolean = {
val fqn = if (qName.length() > 0) qName + "." + name else name
val scope = base match {
case r: ResolveProcessor => r.getResolveScope
case _ => place.getResolveScope
}
var classes: Array[PsiClass] = manager.getCachedClasses(scope, fqn)
if (classes.isEmpty) {
//todo: fast fix for the problem with classes, should be fixed in indexes
val improvedFqn = fqn.split('.').map { s =>
if (ScalaNamesUtil.isKeyword(s)) s"`$s`" else s
}.mkString(".")
if (improvedFqn != fqn) {
classes = manager.getCachedClasses(scope, improvedFqn)
}
}
for (clazz <- classes if clazz.containingClass == null) {
if (!processor.execute(clazz, state)) return false
}
true
}
if (!calcForName(name)) return false
val scalaName = { //todo: fast fix for the problem with classes, should be fixed in indexes
base match {
case r: ResolveProcessor =>
val stateName = state.get(ResolverEnv.nameKey)
if (stateName == null) r.name else stateName
case _ => name
}
}
if (scalaName != name && !calcForName(scalaName)) return false
}
//process subpackages
if (base.kinds.contains(ResolveTargets.PACKAGE)) {
val psiPack = pack match {
case s: ScPackageImpl => s.pack
case _ => pack
}
val qName: String = psiPack.getQualifiedName
val subpackageQName: String = if (qName.isEmpty) name else qName + "." + name
val subPackage = ScalaPsiManager.instance(psiPack.getProject).getCachedPackage(subpackageQName).orNull
if (subPackage != null) {
if (!processor.execute(subPackage, state)) return false
}
true
} else true
} finally {
base.setClassKind(classKind = true)
}
} else {
try {
if (base.getClassKindInner) {
base.setClassKind(classKind = false)
val manager = ScalaPsiManager.instance(pack.getProject)
val scope = base match {
case r: ResolveProcessor => r.getResolveScope
case _ => place.getResolveScope
}
val iterator = manager.getClasses(pack, scope).iterator
while (iterator.hasNext) {
val clazz = iterator.next()
if (clazz.containingClass == null && !processor.execute(clazz, state)) return false
}
}
if (base.kinds.contains(ResolveTargets.PACKAGE)) {
//process subpackages
pack match {
case s: ScPackageImpl =>
s.pack.processDeclarations(processor, state, lastParent, place)
case _ =>
pack.processDeclarations(processor, state, lastParent, place)
}
} else true
} finally {
base.setClassKind(classKind = true)
}
}
case _ => pack.processDeclarations(processor, state, lastParent, place)
}
}
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/lang/resolve/ResolveUtils.scala | Scala | apache-2.0 | 26,992 |
package WorkDayLength.Cli.Arguments
import java.io.ByteArrayOutputStream
import WorkDayLength.Settings
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
/**
* Created by dyahofarov on 01/11/2016.
*/
class OptsToSettings(opts: Array[String]) {
private val parser = new scopt.OptionParser[Config]("work_day_length") {
head ("Work day length", "0.1")
help("help")
private val dateFormatHint = "should be in YYYY-MM-DD format."
opt[String]('s', "start-date").action( (x, c) => setVal(c, "app.startDate", x) ).
validate(dateValidator).
text(s"start date of report. It $dateFormatHint")
opt[String]('e', "end-date").action( (x, c) => setVal(c, "app.endDate", x) ).
validate(dateValidator).
text(s"end date of report. It $dateFormatHint")
opt[String]('m', "minimal-time").action( (x, c) => setVal(c, "app.grouping.max_timeout", x) ).
text("time may be skipped between activities")
private def dateValidator(date: String): Either[String, Unit] = {
val datePattern = """(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)""".r
datePattern.findFirstIn(date) match {
case Some(_) => success
case None => failure(s"Date $dateFormatHint")
}
}
override def terminate(exitState: Either[String, Unit]): Unit = ()
}
def settings: Either[Settings, ExitResult] = {
val bos = new ByteArrayOutputStream()
Console.withErr(bos) {
Console.withOut(bos) {
parser.parse(opts, ConfigFactory.empty()) match {
case Some(optsConfig) =>
if (opts.toList.contains("--help")) {
Right(ExitResult(bos.toString("UTF-8"), 0))
} else {
val defaultConfig = ConfigFactory.load()
val s = new Settings(optsConfig.withFallback(defaultConfig).resolve())
Left(s)
}
case None =>
Right(ExitResult(bos.toString("UTF-8"), 1))
}
}
}
}
private def setVal(c: Config, path: String, value: AnyRef): Config = {
c.withValue(path, ConfigValueFactory.fromAnyRef(value))
}
}
| denyago/work_day_length | src/main/scala/WorkDayLength/Cli/Arguments/OptsToSettings.scala | Scala | mit | 2,106 |
package pamflet
import unfiltered.request._
import unfiltered.response._
import unfiltered.jetty.Server
import unfiltered.filter.Plan
import java.io.OutputStream
import java.net.URI
import javax.servlet.http.HttpServletResponse
import collection.mutable
object Preview {
val heightCache: mutable.Map[(String, String), String] = mutable.Map()
def apply(globalized: => Globalized): Server = {
def css(lang: String) = Map.empty ++ globalized(lang).css
def files(lang: String) = Map.empty ++ globalized(lang).files
def defaultLanguage = globalized.defaultLanguage
def languages = globalized.languages
def pamfletHeight(lang: String, name: String): String =
heightCache.getOrElseUpdate((lang, name), {
Heights.heightCssFileContent(globalized(lang), name)
})
def faviconResponse(lang: String) =
globalized(lang).favicon map { responseStreamer } getOrElse NotFound
def cssResponse(lang: String, name: String) =
CssContent ~> ResponseString(css(lang)(name))
def pamfletHeightResponse(lang: String, name: String) =
CssContent ~> ResponseString(pamfletHeight(lang, name))
def fileResponse(lang: String, name: String) =
responseStreamer(files(lang)(name))
def pageResponse(lang: String, name: String): ResponseFunction[HttpServletResponse] =
Printer(globalized(lang), globalized, None).printNamed(name).map { html =>
Html5(html)
}.getOrElse { NotFound }.asInstanceOf
val plan: Plan = unfiltered.filter.Planify {
case GET(Path(Seg(lang :: Nil))) if languages.contains(lang) =>
globalized(lang).pages.headOption.map { page =>
Redirect("/" + lang + "/" + Printer.webify(page))
}.getOrElse { NotFound }
case GET(Path(Seg(Nil))) =>
globalized(defaultLanguage).pages.headOption.map { page =>
Redirect("/" + Printer.webify(page))
}.getOrElse { NotFound }
case GET(Path(Seg(lang :: "favicon.ico" :: Nil))) if languages.contains(lang) && globalized(lang).favicon.isDefined =>
faviconResponse(lang)
case GET(Path(Seg("favicon.ico" :: Nil))) if globalized(defaultLanguage).favicon.isDefined =>
faviconResponse(defaultLanguage)
case GET(Path(Seg("css" :: name :: Nil))) if name startsWith "pamfletheight" =>
pamfletHeightResponse(defaultLanguage, name)
case GET(Path(Seg(lang :: "css" :: name :: Nil))) if languages.contains(lang) && css(lang).contains(name) =>
cssResponse(lang, name)
case GET(Path(Seg("css" :: name :: Nil))) if css(defaultLanguage).contains(name) =>
cssResponse(defaultLanguage, name)
case GET(Path(Seg(lang :: "files" :: name :: Nil))) if languages.contains(lang) && files(lang).contains(name) =>
fileResponse(lang, name)
case GET(Path(Seg("files" :: name :: Nil))) if files(defaultLanguage).contains(name) =>
fileResponse(defaultLanguage, name)
case GET(Path(Seg(lang :: name :: Nil))) if languages.contains(lang) =>
pageResponse(lang, name)
case GET(Path(Seg(name :: Nil))) =>
pageResponse(defaultLanguage, name)
}
val http = unfiltered.jetty.Server.anylocal
http.plan(plan).resources(Shared.resources)
}
def responseStreamer(uri: URI) =
new ResponseStreamer { def stream(os:OutputStream): Unit = {
val is = uri.toURL.openStream
try {
val buf = new Array[Byte](1024)
Iterator.continually(is.read(buf)).takeWhile(_ != -1)
.foreach(os.write(buf, 0, _))
} finally {
is.close
}
} }
}
| foundweekends/pamflet | library/src/main/scala/preview.scala | Scala | lgpl-3.0 | 3,568 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, IdentityOutputShape}
import com.intel.analytics.bigdl.dllib.nn.internal.KerasLayer
import com.intel.analytics.bigdl.dllib.optim.Regularizer
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.Net
import com.intel.analytics.bigdl.dllib.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Applies an element-wise square root operation to the input.
*
* When you use this layer as the first layer of a model, you need to provide
* the argument inputShape (a Single Shape, does not include the batch dimension).
*
* Remark: This layer is from Torch and wrapped in Keras style.
*
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T The numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Sqrt[T: ClassTag](
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Tensor[T], Tensor[T], T](KerasUtils.addBatch(inputShape))
with IdentityOutputShape with Net {
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val layer = com.intel.analytics.bigdl.dllib.nn.Sqrt()
layer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
object Sqrt {
def apply[@specialized(Float, Double) T: ClassTag](
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Sqrt[T] = {
new Sqrt[T](inputShape)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/keras/layers/Sqrt.scala | Scala | apache-2.0 | 2,275 |
package controllers
import play.api.mvc._
import akka.actor.ActorSystem
import commands.{HelloWorld, HelloWorldAsync}
import util.Futures
import javax.inject.Inject
class Application @Inject()(implicit system: ActorSystem) extends Controller {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import util.Futures._
def index = Action {
Ok(views.html.index("Your new application is ready."))
}
def test = Action.async {
new HelloWorld("Bernd").future.map(Ok(_))
}
def async = Action.async {
new HelloWorldAsync("Bernd").future.map(Ok(_))
}
}
| knutwalker/hystrix-play | app/controllers/Application.scala | Scala | apache-2.0 | 591 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import org.mockito.Mockito._
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever
class E3Spec extends WordSpec with MockitoSugar with Matchers {
val boxRetriever = mock[CT600EBoxRetriever]
"E3 validation" should {
"make E3 required when E2 > E1" in {
when(boxRetriever.e1()).thenReturn(E1(Some(1)))
when(boxRetriever.e2()).thenReturn(E2(Some(2)))
E3(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("E3"), "error.E3.conditionalRequired", None))
}
"make E3 required when E2 set but E1 is empty" in {
when(boxRetriever.e1()).thenReturn(E1(None))
when(boxRetriever.e2()).thenReturn(E2(Some(2)))
E3(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("E3"), "error.E3.conditionalRequired", None))
}
"enforce E3 empty when E2 < E1" in {
when(boxRetriever.e1()).thenReturn(E1(Some(2)))
when(boxRetriever.e2()).thenReturn(E2(Some(1)))
E3(Some(1)).validate(boxRetriever) shouldBe Set(CtValidation(Some("E3"), "error.E3.conditionalMustBeEmpty", None))
}
"make E3 not required when E2 empty but E1 set" in {
when(boxRetriever.e1()).thenReturn(E1(Some(1)))
when(boxRetriever.e2()).thenReturn(E2(None))
E3(None).validate(boxRetriever) shouldBe Set()
}
"make E3 not required when E2 and E1 are both empty" in {
when(boxRetriever.e1()).thenReturn(E1(None))
when(boxRetriever.e2()).thenReturn(E2(None))
E3(None).validate(boxRetriever) shouldBe Set()
}
"E3 invalid if number less that 1" in {
when(boxRetriever.e1()).thenReturn(E1(None))
when(boxRetriever.e2()).thenReturn(E2(None))
E3(Some(0)).validate(boxRetriever) shouldBe Set(CtValidation(Some("E3"), "error.E3.outOfRange", None))
}
"E3 valid if number 1 or greater" in {
when(boxRetriever.e1()).thenReturn(E1(None))
when(boxRetriever.e2()).thenReturn(E2(None))
E3(Some(1)).validate(boxRetriever) shouldBe Set()
}
}
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/ct600e/v2/E3Spec.scala | Scala | apache-2.0 | 2,748 |
package com.ornithoptergames.psav
import java.io.File
import scala.util.matching.Regex
import scalafx.scene.paint.Color
object Messages {
val fps = RxMessage[Double](Config.defaultFps)
val fpsUp = RxMessage.impulse()
val fpsDown = RxMessage.impulse()
val frameFilter = RxMessage[List[Regex]](Config.defaultFilters)
val newFile = RxMessage[File]()
val updateFile = RxMessage[File]()
val newFrames = RxMessage[FrameInfo]()
val updateFrames = RxMessage[FrameInfo]()
val bgColor = RxMessage[Color](Config.defaultBgColor)
val play = RxMessage.impulse()
val pause = RxMessage.impulse()
val animationLoaded = RxMessage.impulse()
val animationPlaying = RxMessage.impulse()
val animationPaused = RxMessage.impulse()
}
| JavadocMD/anim-view | src/main/scala/com/ornithoptergames/psav/Messages.scala | Scala | apache-2.0 | 789 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.File
import java.util.IllegalFormatException
import com.fasterxml.jackson.annotation.JsonInclude.Include
import com.fasterxml.jackson.core.JsonParser.Feature.STRICT_DUPLICATE_DETECTION
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind.SerializationFeature
import com.fasterxml.jackson.databind.json.JsonMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.apache.commons.io.IOUtils
import org.apache.spark.SparkThrowableHelper._
import org.apache.spark.util.Utils
/**
* Test suite for Spark Throwables.
*/
class SparkThrowableSuite extends SparkFunSuite {
override def beforeAll(): Unit = {
super.beforeAll()
}
def checkIfUnique(ss: Seq[Any]): Unit = {
val dups = ss.groupBy(identity).mapValues(_.size).filter(_._2 > 1).keys.toSeq
assert(dups.isEmpty)
}
def checkCondition(ss: Seq[String], fx: String => Boolean): Unit = {
ss.foreach { s =>
assert(fx(s))
}
}
test("No duplicate error classes") {
// Enabling this feature incurs performance overhead (20-30%)
val mapper = JsonMapper.builder()
.addModule(DefaultScalaModule)
.enable(STRICT_DUPLICATE_DETECTION)
.build()
mapper.readValue(errorClassesUrl, new TypeReference[Map[String, ErrorInfo]]() {})
}
test("Error classes are correctly formatted") {
val errorClassFileContents = IOUtils.toString(errorClassesUrl.openStream())
val mapper = JsonMapper.builder()
.addModule(DefaultScalaModule)
.enable(SerializationFeature.INDENT_OUTPUT)
.build()
val rewrittenString = mapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true)
.setSerializationInclusion(Include.NON_ABSENT)
.writeValueAsString(errorClassToInfoMap)
assert(rewrittenString.trim == errorClassFileContents.trim)
}
test("SQLSTATE invariants") {
val sqlStates = errorClassToInfoMap.values.toSeq.flatMap(_.sqlState)
val errorClassReadMe = Utils.getSparkClassLoader.getResource("error/README.md")
val errorClassReadMeContents = IOUtils.toString(errorClassReadMe.openStream())
val sqlStateTableRegex =
"(?s)<!-- SQLSTATE table start -->(.+)<!-- SQLSTATE table stop -->".r
val sqlTable = sqlStateTableRegex.findFirstIn(errorClassReadMeContents).get
val sqlTableRows = sqlTable.split("\\n").filter(_.startsWith("|")).drop(2)
val validSqlStates = sqlTableRows.map(_.slice(1, 6)).toSet
// Sanity check
assert(Set("07000", "42000", "HZ000").subsetOf(validSqlStates))
assert(validSqlStates.forall(_.length == 5), validSqlStates)
checkCondition(sqlStates, s => validSqlStates.contains(s))
}
test("Message format invariants") {
val messageFormats = errorClassToInfoMap.values.toSeq.map(_.messageFormat)
checkCondition(messageFormats, s => s != null)
checkIfUnique(messageFormats)
}
test("Round trip") {
val tmpFile = File.createTempFile("rewritten", ".json")
val mapper = JsonMapper.builder()
.addModule(DefaultScalaModule)
.enable(SerializationFeature.INDENT_OUTPUT)
.build()
mapper.writeValue(tmpFile, errorClassToInfoMap)
val rereadErrorClassToInfoMap = mapper.readValue(
tmpFile, new TypeReference[Map[String, ErrorInfo]]() {})
assert(rereadErrorClassToInfoMap == errorClassToInfoMap)
}
test("Check if error class is missing") {
val ex1 = intercept[IllegalArgumentException] {
getMessage("", Array.empty)
}
assert(ex1.getMessage == "Cannot find error class ''")
val ex2 = intercept[IllegalArgumentException] {
getMessage("LOREM_IPSUM", Array.empty)
}
assert(ex2.getMessage == "Cannot find error class 'LOREM_IPSUM'")
}
test("Check if message parameters match message format") {
// Requires 2 args
intercept[IllegalFormatException] {
getMessage("MISSING_COLUMN", Array.empty)
}
// Does not fail with too many args (expects 0 args)
assert(getMessage("DIVIDE_BY_ZERO", Array("foo", "bar")) ==
"divide by zero. To return NULL instead, use 'try_divide'. If necessary set foo to false " +
"(except for ANSI interval type) to bypass this error.")
}
test("Error message is formatted") {
assert(getMessage("MISSING_COLUMN", Array("foo", "bar, baz")) ==
"Column 'foo' does not exist. Did you mean one of the following? [bar, baz]")
}
test("Try catching legacy SparkError") {
try {
throw new SparkException("Arbitrary legacy message")
} catch {
case e: SparkThrowable =>
assert(e.getErrorClass == null)
assert(e.getSqlState == null)
case _: Throwable =>
// Should not end up here
assert(false)
}
}
test("Try catching SparkError with error class") {
try {
throw new SparkException(
errorClass = "WRITING_JOB_ABORTED",
messageParameters = Array.empty,
cause = null)
} catch {
case e: SparkThrowable =>
assert(e.getErrorClass == "WRITING_JOB_ABORTED")
assert(e.getSqlState == "40000")
case _: Throwable =>
// Should not end up here
assert(false)
}
}
test("Try catching internal SparkError") {
try {
throw new SparkException(
errorClass = "INTERNAL_ERROR",
messageParameters = Array("this is an internal error"),
cause = null
)
} catch {
case e: SparkThrowable =>
assert(e.isInternalError)
assert(e.getSqlState == null)
case _: Throwable =>
// Should not end up here
assert(false)
}
}
}
| ueshin/apache-spark | core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala | Scala | apache-2.0 | 6,408 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.stream
import java.util.concurrent.{CompletableFuture, CompletionStage}
import java.util.function.Consumer
import akka.actor.AbstractActor
import akka.japi.function
import akka.stream.Supervision.{Directive, Resume}
import akka.stream._
import akka.stream.javadsl.{RunnableGraph, Sink}
import akka.util.Timeout
import akka.{Done, NotUsed}
import org.squbs.unicomplex.AbstractFlowDefinition
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.runtime.BoxedUnit
import scala.compat.java8.FunctionConverters._
/**
* Java API for perpetual stream that starts and stops with the server.
* @tparam T The type of the materialized value of the stream.
*/
abstract class AbstractPerpetualStream[T] extends AbstractActor with PerpetualStreamBase[T] {
/**
* Describe your graph by implementing streamGraph
*
* @return The graph.
*/
def streamGraph: RunnableGraph[T]
/**
* The decider to use. Override if not resumingDecider.
*/
def decider: akka.japi.function.Function[Throwable, Directive] =
new function.Function[Throwable, Directive] {
override def apply(t: Throwable): Directive = {
log.error("Uncaught error {} from stream", t)
t.printStackTrace()
Resume
}
}
implicit val materializer: ActorMaterializer =
ActorMaterializer(ActorMaterializerSettings(context.system).withSupervisionStrategy(decider))
override private[stream] final def runGraph(): T = streamGraph.run(materializer)
override private[stream] final def shutdownAndNotify(): Unit = shutdown()
.thenAccept(asJavaConsumer((_: Done) => self ! Done))
override def createReceive(): AbstractActor.Receive = {
new AbstractActor.Receive(PartialFunction.empty[Any, BoxedUnit])
}
private def stageToDone(stage: CompletionStage[_]): CompletionStage[Done] =
stage.thenApply(asJavaFunction((_: Any) => Done))
/**
* Override shutdown to define your own shutdown process or wait for the sink to finish.
* The default shutdown makes the following assumptions:<ol>
* <li>The stream materializes to a CompletionStage, or a Pair or List
* for which the last element is a CompletionStage</li>
* <li>This CompletionStage represents the state whether the stream is done</li>
* <li>The stream has the killSwitch as the first processing stage</li>
* </ol>In which case you do not need to override this default shutdown if there are no further shutdown
* requirements. In case you override shutdown, it is recommended that super.shutdown() be called
* on overrides even if the stream only partially meets the requirements above.
*
* @return A CompletionStage[Done] that gets completed when the whole stream is done.
*/
def shutdown(): CompletionStage[Done] = {
matValue match {
case f: CompletionStage[_] =>
killSwitch.shutdown()
stageToDone(f)
case akka.japi.Pair(first, last) =>
first match {
case k: KillSwitch => k.shutdown()
case _ =>
}
killSwitch.shutdown()
last match {
case f: CompletionStage[_] => stageToDone(f)
}
case l: java.util.List[_] if l.size > 0 =>
l.get(0) match {
case k: KillSwitch => k.shutdown()
case _ =>
}
killSwitch.shutdown()
l.get(l.size() - 1) match {
case f: CompletionStage[_] => stageToDone(f)
case _ => CompletableFuture.completedFuture(Done)
}
case _ =>
killSwitch.shutdown()
CompletableFuture.completedFuture(Done)
}
}
/**
* Override getStopTimeout to set a custom stop timeout.
* @return The timeout, in milliseconds to allow for stopping the server.
*/
def getStopTimeout: Long = super.stopTimeout.toMillis
override final def stopTimeout: FiniteDuration = getStopTimeout.millis
}
/**
* Java API for creating an HTTP FlowDefinition connecting to a PerpetualStream.
*/
abstract class FlowToPerpetualStream extends AbstractFlowDefinition {
def matValue[T](perpetualStreamName: String): Sink[T, NotUsed] = {
implicit val _ = context.system
implicit val timeout: Timeout = Timeout(10.seconds)
import akka.pattern.ask
val responseF = SafeSelect(perpetualStreamName) ? MatValueRequest
// Exception! This code is executed only at startup. We really need a better API, though.
Await.result(responseF, timeout.duration).asInstanceOf[Sink[T, NotUsed]]
}
} | anilgursel/squbs | squbs-unicomplex/src/main/scala/org/squbs/stream/AbstractPerpetualStream.scala | Scala | apache-2.0 | 5,118 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.concurrent.ExecutionException
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.aggregate.NoOp
import org.apache.spark.sql.catalyst.expressions.codegen.{CodeAndComment, CodeGenerator}
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{IntegerType, StructType}
class CodeGeneratorWithInterpretedFallbackSuite extends SparkFunSuite with PlanTestBase {
val codegenOnly = CodegenObjectFactoryMode.CODEGEN_ONLY.toString
val noCodegen = CodegenObjectFactoryMode.NO_CODEGEN.toString
object FailedCodegenProjection
extends CodeGeneratorWithInterpretedFallback[Seq[Expression], UnsafeProjection] {
override protected def createCodeGeneratedObject(in: Seq[Expression]): UnsafeProjection = {
val invalidCode = new CodeAndComment("invalid code", Map.empty)
// We assume this compilation throws an exception
CodeGenerator.compile(invalidCode)
null
}
override protected def createInterpretedObject(in: Seq[Expression]): UnsafeProjection = {
InterpretedUnsafeProjection.createProjection(in)
}
}
test("UnsafeProjection with codegen factory mode") {
val input = Seq(BoundReference(0, IntegerType, nullable = true))
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) {
val obj = UnsafeProjection.createObject(input)
assert(obj.getClass.getName.contains("GeneratedClass$SpecificUnsafeProjection"))
}
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> noCodegen) {
val obj = UnsafeProjection.createObject(input)
assert(obj.isInstanceOf[InterpretedUnsafeProjection])
}
}
test("MutableProjection with codegen factory mode") {
val input = Seq(BoundReference(0, IntegerType, nullable = true))
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) {
val obj = MutableProjection.createObject(input)
assert(obj.getClass.getName.contains("GeneratedClass$SpecificMutableProjection"))
}
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> noCodegen) {
val obj = MutableProjection.createObject(input)
assert(obj.isInstanceOf[InterpretedMutableProjection])
}
}
test("fallback to the interpreter mode") {
val input = Seq(BoundReference(0, IntegerType, nullable = true))
val fallback = CodegenObjectFactoryMode.FALLBACK.toString
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> fallback) {
val obj = FailedCodegenProjection.createObject(input)
assert(obj.isInstanceOf[InterpretedUnsafeProjection])
}
}
test("codegen failures in the CODEGEN_ONLY mode") {
val errMsg = intercept[ExecutionException] {
val input = Seq(BoundReference(0, IntegerType, nullable = true))
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) {
FailedCodegenProjection.createObject(input)
}
}.getMessage
assert(errMsg.contains("failed to compile: org.codehaus.commons.compiler.CompileException:"))
}
test("SPARK-25358 Correctly handles NoOp in MutableProjection") {
val exprs = Seq(Add(BoundReference(0, IntegerType, nullable = true), Literal.create(1)), NoOp)
val input = InternalRow.fromSeq(1 :: 1 :: Nil)
val expected = 2 :: null :: Nil
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) {
val proj = MutableProjection.createObject(exprs)
assert(proj(input).toSeq(StructType.fromDDL("c0 int, c1 int")) === expected)
}
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> noCodegen) {
val proj = MutableProjection.createObject(exprs)
assert(proj(input).toSeq(StructType.fromDDL("c0 int, c1 int")) === expected)
}
}
test("SPARK-25374 Correctly handles NoOp in SafeProjection") {
val exprs = Seq(Add(BoundReference(0, IntegerType, nullable = true), Literal.create(1)), NoOp)
val input = InternalRow.fromSeq(1 :: 1 :: Nil)
val expected = 2 :: null :: Nil
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) {
val proj = SafeProjection.createObject(exprs)
assert(proj(input).toSeq(StructType.fromDDL("c0 int, c1 int")) === expected)
}
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> noCodegen) {
val proj = SafeProjection.createObject(exprs)
assert(proj(input).toSeq(StructType.fromDDL("c0 int, c1 int")) === expected)
}
}
}
| maropu/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGeneratorWithInterpretedFallbackSuite.scala | Scala | apache-2.0 | 5,293 |
package io.github.nivox.dandelion.datatxt.nex
import java.util.Date
import akka.http.scaladsl.model.Uri
import io.github.nivox.dandelion.datatxt.CommonCodecs
case class WikipediaResource(id: Int, title: String, uri: Uri)
case class Spot(word: String, start: Int, end: Int)
case class Lod(wikipedia: Uri, dbpedia: Uri)
case class Image(full: Uri, thumbnail: Uri)
case class Annotation(resource: WikipediaResource,
label: String,
confidence: Float,
spot: Spot,
types: List[Uri],
categories: List[String],
abstractS: Option[String],
lod: Option[Lod],
alternateLabels: List[String],
image: Option[Image]
)
case class NexResponse(timestamp: Date,
time: Int,
lang: String,
langConfidence: Option[Float],
text: Option[String],
url: Option[Uri],
annotations: List[Annotation]
)
object ResponseModelsCodec {
import argonaut._
import Argonaut._
import scalaz._
import Scalaz._
def getOrElse[T](cursor: HCursor, field: String, default: => Option[T])(implicit d: DecodeJson[T]): DecodeResult[Option[T]] =
(cursor --\\ field).focus map (_.as[T] map (_.some)) getOrElse DecodeResult.ok(default)
implicit val wikipediaResourceDecode: DecodeJson[WikipediaResource] = DecodeJson { c =>
for {
id <- c.get[Int]("id") ||| DecodeResult.fail("Missing or invalid wikipedia resource id", c.history)
title <- c.get[String]("title") ||| DecodeResult.fail("Missing or invalid wikipedia resource title", c.history)
uri <- c.get[String]("uri") ||| DecodeResult.fail("Missing or invalid wikipedia resource uri", c.history)
} yield WikipediaResource(id, title, uri)
}
implicit val spotDecode: DecodeJson[Spot] = DecodeJson { c =>
for {
word <- c.get[String]("spot") ||| DecodeResult.fail("Missing or invalid spot", c.history)
start <- c.get[Int]("start") ||| DecodeResult.fail("Missing or invalid start", c.history)
end <- c.get[Int]("end") ||| DecodeResult.fail("Missing or invalid end", c.history)
} yield Spot(word, start, end)
}
implicit val lodDecode: DecodeJson[Lod] = DecodeJson { c =>
for {
wikipedia <- c.get[String]("wikipedia") ||| DecodeResult.fail("Missing or invalid wikipedia lod", c.history)
dbpedia <- c.get[String]("dbpedia") ||| DecodeResult.fail("Missing or invalid dbpedia lod", c.history)
} yield Lod(wikipedia, dbpedia)
}
implicit val imageDecode: DecodeJson[Image] = DecodeJson { c =>
for {
full <- c.get[String]("full") ||| DecodeResult.fail("Missing or invalid full image url", c.history)
thumbnail <- c.get[String]("thumbnail") ||| DecodeResult.fail("Missing or invalid thumbnail image url", c.history)
} yield Image(full, thumbnail)
}
implicit val annotationDecode: DecodeJson[Annotation] = DecodeJson { c =>
for {
resource <- c.focus.as[WikipediaResource]
label <- c.get[String]("label") ||| DecodeResult.fail("Missing or invalid label", c.history)
confidence <- c.get[Float]("confidence") ||| DecodeResult.fail("Missing or invalid confidence", c.history)
spot <- c.focus.as[Spot]
typeStrLst <- getOrElse[List[String]](c, "types", None).map(_.getOrElse(List())) ||| DecodeResult.fail("Missing or invalid types", c.history)
typeLst = typeStrLst.map(Uri(_))
categories <- getOrElse[List[String]](c, "categories", None).map(_.getOrElse(List())) ||| DecodeResult.fail("Missing or invalid categories", c.history)
lod <- getOrElse[Lod](c, "lod", None)
abstractS <- getOrElse[String](c, "abstract", None) ||| DecodeResult.fail("Invalid abstract", c.history)
alternateLabels <- getOrElse[List[String]](c, "alternateLabels", None).map(_.getOrElse(List())) ||| DecodeResult.fail("Invalid alternate labels", c.history)
image <- getOrElse[Image](c, "image", None)
} yield Annotation(resource, label, confidence, spot, typeLst, categories, abstractS, lod, alternateLabels, image)
}
implicit val nexResponseDecode: DecodeJson[NexResponse] = DecodeJson { c =>
for {
timestamp <- CommonCodecs.getTimestamp(c)
time <- c.get[Int]("time") ||| DecodeResult.fail("Missing or invalid time", c.history)
lang <- c.get[String]("lang") ||| DecodeResult.fail("Missing or invalid lang", c.history)
langConfidence <- getOrElse[Float](c, "langConfidence", None) ||| DecodeResult.fail("Invalid language confidence", c.history)
text <- getOrElse[String](c, "text", None) ||| DecodeResult.fail("Invalid text", c.history)
urlStr <- getOrElse[String](c, "url", None) ||| DecodeResult.fail("Invalid url", c.history)
url = urlStr map (Uri(_))
annotations <- c.get[List[Annotation]]("annotations")
} yield NexResponse(timestamp, time, lang, langConfidence, text, url, annotations)
}
} | nivox/dandelion-scala | datatxt-nex/src/main/scala/io/github/nivox/dandelion/datatxt/nex/ResponseModels.scala | Scala | mit | 5,099 |
package com.andbutso.poker.handhistory.pokerstars.parser
import com.andbutso.poker.ParentSpec
class TableLineParserSpec extends ParentSpec {
"TableLineParser" should {
val matching = "Table 'Medusa' 9-max Seat #2 is the button"
val nonMatching = Seq(
matching.replace("'", ""), // Quotes around table name missing
matching.replace("-", ""), // Dash between capacity and 'max' missing
matching.replace("#", ""), // Number sign before button is missing
matching.replace("Medusa", ""), // Table name missing
matching.replace("Seat", "seat"), // Case is wrong
matching.replace("is the button", "") // End of line missing
)
"matching line extracts table info" in {
matching match {
case TableLineParser(name, capacity, button) =>
name mustEqual "Medusa"
capacity mustEqual 9
button mustEqual 2
case _ =>
failure(s"$matching should have matched")
}// must beSome
ok
}
//
// "non-matching lines don't execute partial function" in {
// nonMatching foreach { nonMatch =>
// TableLineParser(nonMatch)(identityPF) must beNone
// }
//
// ok
// }
}
}
| marcel/texas | src/test/scala/com/andbutso/poker/handhistory/pokerstars/parser/TableLineParserSpec.scala | Scala | mit | 1,204 |
package org.openurp.edu.eams.teach.election.service
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.ems.dictionary.service.BaseCodeService
import org.openurp.base.Semester
import org.openurp.edu.base.Project
import org.openurp.edu.base.Student
import org.openurp.edu.eams.teach.election.RetakeFeeConfig
import org.openurp.edu.teach.lesson.CourseTake
trait RetakeFeeConfigService extends BaseCodeService {
def getFeeRuleScript(): String
def getCurrOpenConfigs(): List[RetakeFeeConfig]
def getOpenConfigs(semesters: Semester*): List[RetakeFeeConfig]
def getOpenConfigs(project: Project, semesters: Semester*): List[RetakeFeeConfig]
def getOpenConfigBuilder(project: Project, semesters: Semester*): OqlBuilder[RetakeFeeConfig]
def getConfigs(project: Project, semesters: Semester*): List[RetakeFeeConfig]
def doCheck(project: Project, semesters: Semester*): Boolean
def getRetakeCourseTakes(student: Student, semesters: Semester*): List[CourseTake]
def doCheck(config: RetakeFeeConfig): Boolean
def saveOrUpdate(config: RetakeFeeConfig): Unit
def getConfig(config: RetakeFeeConfig): RetakeFeeConfig
}
| openurp/edu-eams-webapp | election/src/main/scala/org/openurp/edu/eams/teach/election/service/RetakeFeeConfigService.scala | Scala | gpl-3.0 | 1,150 |
/*
* ============= Ryft-Customized BSD License ============
* Copyright (c) 2015, Ryft Systems, Inc.
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software must display the following acknowledgement:
* This product includes software developed by Ryft Systems, Inc.
* 4. Neither the name of Ryft Systems, Inc. nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY RYFT SYSTEMS, INC. ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL RYFT SYSTEMS, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ============
*/
package com.ryft.spark.connector.partitioner.impl
import java.net.URL
import com.ryft.spark.connector.partitioner.RyftPartitioner
import com.ryft.spark.connector.query.RyftQuery
/**
* No partitioning rules applied.
*/
class NoPartitioner extends RyftPartitioner {
override def partitions(query: RyftQuery): Set[URL] = Set.empty[URL]
}
| getryft/spark-ryft-connector | spark-ryft-connector/src/main/scala/com/ryft/spark/connector/partitioner/impl/NoPartitioner.scala | Scala | bsd-3-clause | 2,137 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package vta.dpi
import chisel3._
import chisel3.util._
import vta.util.config._
import vta.interface.axi._
import vta.shell._
/** Host DPI parameters */
trait VTAHostDPIParams {
val dpiAddrBits = 8
val dpiDataBits = 32
}
/** Host master interface.
*
* This interface is tipically used by the Host
*/
class VTAHostDPIMaster extends Bundle with VTAHostDPIParams {
val req = new Bundle {
val valid = Output(Bool())
val opcode = Output(Bool())
val addr = Output(UInt(dpiAddrBits.W))
val value = Output(UInt(dpiDataBits.W))
val deq = Input(Bool())
}
val resp = Flipped(ValidIO(UInt(dpiDataBits.W)))
}
/** Host client interface.
*
* This interface is tipically used by the Accelerator
*/
class VTAHostDPIClient extends Bundle with VTAHostDPIParams {
val req = new Bundle {
val valid = Input(Bool())
val opcode = Input(Bool())
val addr = Input(UInt(dpiAddrBits.W))
val value = Input(UInt(dpiDataBits.W))
val deq = Output(Bool())
}
val resp = ValidIO(UInt(dpiDataBits.W))
}
/** Host DPI module.
*
* Wrapper for Host Verilog DPI module.
*/
class VTAHostDPI extends BlackBox with HasBlackBoxResource {
val io = IO(new Bundle {
val clock = Input(Clock())
val reset = Input(Bool())
val dpi = new VTAHostDPIMaster
})
setResource("/verilog/VTAHostDPI.v")
}
/** Host DPI to AXI Converter.
*
* Convert Host DPI to AXI for VTAShell
*/
class VTAHostDPIToAXI(debug: Boolean = false)(implicit p: Parameters)
extends Module {
val io = IO(new Bundle {
val dpi = new VTAHostDPIClient
val axi = new AXILiteMaster(p(ShellKey).hostParams)
})
val addr = RegInit(0.U.asTypeOf(chiselTypeOf(io.dpi.req.addr)))
val data = RegInit(0.U.asTypeOf(chiselTypeOf(io.dpi.req.value)))
val sIdle :: sReadAddress :: sReadData :: sWriteAddress :: sWriteData :: sWriteResponse :: Nil =
Enum(6)
val state = RegInit(sIdle)
switch(state) {
is(sIdle) {
when(io.dpi.req.valid) {
when(io.dpi.req.opcode) {
state := sWriteAddress
}.otherwise {
state := sReadAddress
}
}
}
is(sReadAddress) {
when(io.axi.ar.ready) {
state := sReadData
}
}
is(sReadData) {
when(io.axi.r.valid) {
state := sIdle
}
}
is(sWriteAddress) {
when(io.axi.aw.ready) {
state := sWriteData
}
}
is(sWriteData) {
when(io.axi.w.ready) {
state := sWriteResponse
}
}
is(sWriteResponse) {
when(io.axi.b.valid) {
state := sIdle
}
}
}
when(state === sIdle && io.dpi.req.valid) {
addr := io.dpi.req.addr
data := io.dpi.req.value
}
io.axi.aw.valid := state === sWriteAddress
io.axi.aw.bits.addr := addr
io.axi.w.valid := state === sWriteData
io.axi.w.bits.data := data
io.axi.w.bits.strb := "h_f".U
io.axi.b.ready := state === sWriteResponse
io.axi.ar.valid := state === sReadAddress
io.axi.ar.bits.addr := addr
io.axi.r.ready := state === sReadData
io.dpi.req.deq := (state === sReadAddress & io.axi.ar.ready) | (state === sWriteAddress & io.axi.aw.ready)
io.dpi.resp.valid := io.axi.r.valid
io.dpi.resp.bits := io.axi.r.bits.data
if (debug) {
when(state === sWriteAddress && io.axi.aw.ready) {
printf("[VTAHostDPIToAXI] [AW] addr:%x\n", addr)
}
when(state === sReadAddress && io.axi.ar.ready) {
printf("[VTAHostDPIToAXI] [AR] addr:%x\n", addr)
}
when(io.axi.r.fire()) {
printf("[VTAHostDPIToAXI] [R] value:%x\n", io.axi.r.bits.data)
}
when(io.axi.w.fire()) {
printf("[VTAHostDPIToAXI] [W] value:%x\n", io.axi.w.bits.data)
}
}
}
| Huyuwei/tvm | vta/hardware/chisel/src/main/scala/dpi/VTAHostDPI.scala | Scala | apache-2.0 | 4,490 |
package ostinato.chess.core
import org.scalatest._
import ostinato.core.{Piece, XY}
class PieceDeltasTest extends FunSpec with Matchers {
describe("Piece deltas on the board") {
it("should find no deltas for trapped king") {
val game = ChessGame.fromGridString("""........
|........
|...♜♜♜..
|...♜♚♜..
|...♜♜♜..
|........
|........
|........""".stripMargin).get
game.board.kings.head.deltas(game.board) shouldBe Set()
}
it("should find only one delta for trapped king") {
val game = ChessGame.fromGridString("""........
|........
|...♜♜♜..
|...♜♚♜..
|...♜.♜..
|........
|........
|........""".stripMargin).get
game.board.kings.head.deltas(game.board) shouldBe Set(XY(0, 1))
}
it("should find all deltas for king (except castling)") {
val game = ChessGame.fromGridString("""........
|........
|........
|....♚...
|........
|........
|........
|........""".stripMargin).get
game.board.kings.head.deltas(game.board) shouldBe
Piece.toXYs(
Set((-1, 0),
(1, 0),
(0, -1),
(0, 1),
(-1, -1),
(1, 1),
(-1, 1),
(1, -1)))
}
it(
"should find all deltas for king (including castling but except going up)") {
val game = ChessGame.fromGridString("""....♚...
|........
|........
|........
|........
|........
|........
|........""".stripMargin).get
game.board.kings.head.deltas(game.board) shouldBe
Piece.toXYs(
Set((-1, 0), (1, 0), (0, 1), (1, 1), (-1, 1), (2, 0), (-2, 0)))
}
it("should find delta to capture enemy queen") {
val game = ChessGame.fromGridString("""........
|........
|...♜♜♜..
|...♜♚♕..
|...♜♜♜..
|........
|........
|........""".stripMargin).get
game.board.kings.head.deltas(game.board) shouldBe
Piece.toXYs(Set((1, 0)))
}
it("should find all pawn deltas including en passant movement") {
val game = ChessGame.fromGridString("""........
|........
|........
|........
|........
|........
|...♙....
|........""".stripMargin).get
game.board.pawns.head.deltas(game.board) shouldBe
Piece.toXYs(Set((0, -1), (0, -2), (-1, -1), (1, -1)))
}
it("should find all pawn deltas except en passant movement") {
val game = ChessGame.fromGridString("""........
|........
|........
|........
|........
|...♙....
|........
|........""".stripMargin).get
game.board.pawns.head.deltas(game.board) shouldBe
Piece.toXYs(Set((0, -1), (-1, -1), (1, -1)))
}
it(
"should find all pawn deltas except en passant movement, and promotion quadruplication shouldn't affect it") {
val game = ChessGame.fromGridString("""........
|...♙....
|........
|........
|........
|........
|........
|........""".stripMargin).get
game.board.pawns.head.deltas(game.board) shouldBe
Piece.toXYs(Set((0, -1), (-1, -1), (1, -1)))
}
it("should find all deltas for queen") {
val game = ChessGame.fromGridString("""........
|...♛....
|........
|........
|........
|........
|........
|........""".stripMargin).get
game.board.queens.head.deltas(game.board) shouldBe
Piece.toXYs(
Set((0, -1),
(0, 1),
(0, 2),
(0, 3),
(0, 4),
(0, 5),
(0, 6),
(-1, 0),
(-2, 0),
(-3, 0),
(1, 0),
(2, 0),
(3, 0),
(4, 0),
(-1, -1),
(1, -1),
(-1, 1),
(-2, 2),
(-3, 3),
(1, 1),
(2, 2),
(3, 3),
(4, 4)))
}
}
}
| MarianoGappa/ostinato | shared/src/test/scala/ostinato/chess/core/PieceDeltasTest.scala | Scala | mit | 4,426 |
package bytecode.model
abstract class Op
case object ALoad0 extends Op
case class InvokeSpecial(methodIndex: Int) extends Op
case object AConstNull extends Op
case object Return extends Op
| AndrewHancock/scala-bytecode-disassembler | src/main/scala/bytecode/model/Op.scala | Scala | apache-2.0 | 190 |
import sbt._
import Keys._
object ApplicationBuild extends Build {
val appName = "image-util"
val appVersion = "1.1.1"
val appOrganization = "com.github.mt_sito"
val buildScalaVersion = "2.11.7"
lazy val root = Project(id = appName,
base = file("."),
settings = Project.defaultSettings ++ Seq(
name := appName,
organization := appOrganization,
version := appVersion,
scalaVersion := buildScalaVersion,
publishMavenStyle := true,
otherResolvers := Seq(Resolver.file("dotM2", file(Path.userHome + "/.m2/repository"))),
publishLocalConfiguration <<= (packagedArtifacts, deliverLocal, ivyLoggingLevel) map {
(arts, _, level) => new PublishConfiguration(None, "dotM2", arts, List[String]("sha1", "md5"), level)
},
crossScalaVersions := Seq(
"2.10.4",
"2.11.7"
),
libraryDependencies <+= scalaVersion(v => v match {
case "2.10.4" => "org.scalatest" % "scalatest_2.10" % "2.0" % "test"
case _ => "org.scalatest" %% "scalatest" % "2.2.2" % "test"
})
)
)
}
| mt-sito/image-util | project/Build.scala | Scala | bsd-3-clause | 1,021 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.codegen
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.NoOp
import org.apache.spark.sql.types.DecimalType
// MutableProjection is not accessible in Java
abstract class BaseMutableProjection extends MutableProjection
/**
* Generates byte code that produces a [[MutableRow]] object that can update itself based on a new
* input [[InternalRow]] for a fixed set of [[Expression Expressions]].
* It exposes a `target` method, which is used to set the row that will be updated.
* The internal [[MutableRow]] object created internally is used only when `target` is not used.
*/
object GenerateMutableProjection extends CodeGenerator[Seq[Expression], () => MutableProjection] {
protected def canonicalize(in: Seq[Expression]): Seq[Expression] =
in.map(ExpressionCanonicalizer.execute)
protected def bind(in: Seq[Expression], inputSchema: Seq[Attribute]): Seq[Expression] =
in.map(BindReferences.bindReference(_, inputSchema))
protected def create(expressions: Seq[Expression]): (() => MutableProjection) = {
val ctx = newCodeGenContext()
val projectionCodes = expressions.zipWithIndex.map {
case (NoOp, _) => ""
case (e, i) =>
val evaluationCode = e.gen(ctx)
if (e.dataType.isInstanceOf[DecimalType]) {
// Can't call setNullAt on DecimalType, because we need to keep the offset
s"""
${evaluationCode.code}
if (${evaluationCode.isNull}) {
${ctx.setColumn("mutableRow", e.dataType, i, null)};
} else {
${ctx.setColumn("mutableRow", e.dataType, i, evaluationCode.value)};
}
"""
} else {
s"""
${evaluationCode.code}
if (${evaluationCode.isNull}) {
mutableRow.setNullAt($i);
} else {
${ctx.setColumn("mutableRow", e.dataType, i, evaluationCode.value)};
}
"""
}
}
val allProjections = ctx.splitExpressions(ctx.INPUT_ROW, projectionCodes)
val code = s"""
public Object generate($exprType[] expr) {
return new SpecificMutableProjection(expr);
}
class SpecificMutableProjection extends ${classOf[BaseMutableProjection].getName} {
private $exprType[] expressions;
private $mutableRowType mutableRow;
${declareMutableStates(ctx)}
${declareAddedFunctions(ctx)}
public SpecificMutableProjection($exprType[] expr) {
expressions = expr;
mutableRow = new $genericMutableRowType(${expressions.size});
${initMutableStates(ctx)}
}
public ${classOf[BaseMutableProjection].getName} target($mutableRowType row) {
mutableRow = row;
return this;
}
/* Provide immutable access to the last projected row. */
public InternalRow currentValue() {
return (InternalRow) mutableRow;
}
public Object apply(Object _i) {
InternalRow ${ctx.INPUT_ROW} = (InternalRow) _i;
$allProjections
return mutableRow;
}
}
"""
logDebug(s"code for ${expressions.mkString(",")}:\\n${CodeFormatter.format(code)}")
val c = compile(code)
() => {
c.generate(ctx.references.toArray).asInstanceOf[MutableProjection]
}
}
}
| pronix/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateMutableProjection.scala | Scala | apache-2.0 | 4,225 |
package software.betamax.specs2
import software.betamax.{Recorder, _}
/**
* Created by sean on 2/11/16.
*/
object RecordedInteraction {
def apply[T](tape: String, configuration: ConfigurationBuilder => ConfigurationBuilder = builder => builder)(block: => T): T = {
val recorder = new Recorder(configuration(Configuration.builder()).build())
recorder.start(tape)
try {
block
} finally {
recorder.stop()
}
}
} | betamaxteam/betamax | betamax-specs2_2.11/src/main/scala/software/betamax/specs2/RecordedInteraction.scala | Scala | apache-2.0 | 451 |
import scala.tools.nsc.settings.MutableSettings
object Test {
val tokens = "" :: "-deprecation" :: "foo.scala" :: Nil
val permutations0 = tokens.toSet.subsets().flatMap(_.toList.permutations).toList.distinct
def runWithCp(cp: String) = {
val permutations = permutations0.flatMap(s => ("-cp CPTOKEN" :: s).permutations)
for ((p, i) <- permutations.distinct.sortBy(_ mkString "").zipWithIndex) {
val args = p.flatMap(_.split("\\\\s+")).map(x => if (x == "CPTOKEN") cp else x)
val expected = args.filter(_ == "foo.scala")
val s = new MutableSettings(println)
val (ok, residual) = s.processArguments(args, processAll = true)
assert(residual == expected, residual)
assert(ok, args)
println(s"$i) $args ==> $s")
}
}
def main(args0: Array[String]): Unit = {
runWithCp("")
runWithCp("/tmp:/bippy")
}
}
| scala/scala | test/files/run/settings-parse.scala | Scala | apache-2.0 | 897 |
class CaseClauseInput {
def foo {
1 match {
case x => {
/*start*/
x + 1
x + 2
/*end*/
}
case _ =>
}
}
}
/*
class CaseClauseInput {
def foo {
1 match {
case x => {
testMethodName(x)
}
case _ =>
}
}
def testMethodName(x: Int): Unit = {
x + 1
x + 2
}
}
*/ | ilinum/intellij-scala | testdata/extractMethod/input/CaseClauseInput.scala | Scala | apache-2.0 | 351 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.nscplugin.test
import org.scalajs.nscplugin.test.util._
import org.junit.Test
// scalastyle:off line.size.limit
class JSDynamicLiteralTest extends DirectTest with TestHelpers {
override def preamble: String =
"""import scala.scalajs.js.Dynamic.{ literal => lit }
"""
@Test
def callApplyOnly: Unit = {
// selectDynamic (with any name)
expr"""
lit.helloWorld
""" hasErrors
"""
|newSource1.scala:3: error: value selectDynamic is not a member of object scalajs.js.Dynamic.literal
|error after rewriting to scala.scalajs.js.Dynamic.literal.<selectDynamic: error>("helloWorld")
|possible cause: maybe a wrong Dynamic method signature?
| lit.helloWorld
| ^
"""
// applyDynamicNamed with wrong method name
expr"""
lit.helloWorld(a = "a")
""" hasErrors
"""
|newSource1.scala:3: error: js.Dynamic.literal does not have a method named helloWorld
| lit.helloWorld(a = "a")
| ^
"""
// applyDynamic with wrong method name
expr"""
lit.helloWorld("a" -> "a")
""" hasErrors
"""
|newSource1.scala:3: error: js.Dynamic.literal does not have a method named helloWorld
| lit.helloWorld("a" -> "a")
| ^
"""
}
@Test
def goodTypesOnly: Unit = {
// Bad value type (applyDynamic)
"""
class A {
val x = new Object()
def foo = lit("a" -> x)
}
""" hasErrors
"""
|newSource1.scala:5: error: type mismatch;
| found : Object
| required: scala.scalajs.js.Any
| def foo = lit("a" -> x)
| ^
"""
// Bad key type (applyDynamic)
"""
class A {
val x = Seq()
def foo = lit(x -> "a")
}
""" hasErrors
"""
|newSource1.scala:5: error: type mismatch;
| found : (Seq[Nothing], String)
| required: (String, scala.scalajs.js.Any)
| def foo = lit(x -> "a")
| ^
"""
// Bad value type (applyDynamicNamed)
"""
class A {
val x = new Object()
def foo = lit(a = x)
}
""" hasErrors
"""
|newSource1.scala:5: error: type mismatch;
| found : Object
| required: scala.scalajs.js.Any
|error after rewriting to scala.scalajs.js.Dynamic.literal.applyDynamicNamed("apply")(scala.Tuple2("a", x))
|possible cause: maybe a wrong Dynamic method signature?
| def foo = lit(a = x)
| ^
"""
}
@Test
def noNonLiteralMethodName: Unit = {
// applyDynamicNamed
"""
class A {
val x = "string"
def foo = lit.applyDynamicNamed(x)()
}
""" hasErrors
"""
|newSource1.scala:5: error: js.Dynamic.literal.applyDynamicNamed may not be called directly
| def foo = lit.applyDynamicNamed(x)()
| ^
"""
// applyDynamic
"""
class A {
val x = "string"
def foo = lit.applyDynamic(x)()
}
""" hasErrors
"""
|newSource1.scala:5: error: js.Dynamic.literal.applyDynamic may not be called directly
| def foo = lit.applyDynamic(x)()
| ^
"""
}
@Test
def keyDuplicationWarning: Unit = {
// detects duplicate named keys
expr"""
lit(a = "1", b = "2", a = "3")
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit(a = "1", b = "2", a = "3")
| ^
"""
// detects duplicate named keys
expr"""
lit(aaa = "1", b = "2", aaa = "3")
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "aaa" shadows a previously defined one
| lit(aaa = "1", b = "2", aaa = "3")
| ^
"""
// detects duplicate named keys
expr"""
lit(aaa = "1",
bb = "2",
bb = "3")
""" hasWarns
"""
|newSource1.scala:5: warning: Duplicate property "bb" shadows a previously defined one
| bb = "3")
| ^
"""
// detects duplicate named keys
expr"""
lit(aaa = "1",
b = "2",
aaa = "3")
""" hasWarns
"""
|newSource1.scala:5: warning: Duplicate property "aaa" shadows a previously defined one
| aaa = "3")
| ^
"""
// detects triplicated named keys
expr"""
lit(a = "1", a = "2", a = "3")
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit(a = "1", a = "2", a = "3")
| ^
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit(a = "1", a = "2", a = "3")
| ^
"""
// detects two different duplicates named keys
expr"""
lit(a = "1", b = "2", a = "3", b = "4", c = "5", c = "6", c = "7")
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit(a = "1", b = "2", a = "3", b = "4", c = "5", c = "6", c = "7")
| ^
|newSource1.scala:3: warning: Duplicate property "b" shadows a previously defined one
| lit(a = "1", b = "2", a = "3", b = "4", c = "5", c = "6", c = "7")
| ^
|newSource1.scala:3: warning: Duplicate property "c" shadows a previously defined one
| lit(a = "1", b = "2", a = "3", b = "4", c = "5", c = "6", c = "7")
| ^
|newSource1.scala:3: warning: Duplicate property "c" shadows a previously defined one
| lit(a = "1", b = "2", a = "3", b = "4", c = "5", c = "6", c = "7")
| ^
"""
// detects duplicate keys when represented with arrows
expr"""
lit("a" -> "1", "b" -> "2", "a" -> "3")
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit("a" -> "1", "b" -> "2", "a" -> "3")
| ^
"""
// detects duplicate keys when represented with tuples
expr"""
lit(("a", "1"), ("b", "2"), ("a", "3"))
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit(("a", "1"), ("b", "2"), ("a", "3"))
| ^
"""
// detects duplicate keys when represented with mixed tuples and arrows
expr"""
lit("a" -> "1", ("b", "2"), ("a", "3"))
""" hasWarns
"""
|newSource1.scala:3: warning: Duplicate property "a" shadows a previously defined one
| lit("a" -> "1", ("b", "2"), ("a", "3"))
| ^
"""
// should not warn if the key is not literal
expr"""
val a = "x"
lit("a" -> "1", a -> "2", a -> "3")
""".hasNoWarns
// should not warn if the key/value pairs are not literal
"""
class A {
val tup = "x" -> lit()
def foo = lit(tup, tup)
}
""".hasNoWarns
// should warn only for the literal keys when in
// the presence of non literal keys
"""
class A {
val b = "b"
val tup = b -> lit()
lit("a" -> "2", tup, ("a", "3"), b -> "5", tup, b -> "6")
}
""" hasWarns
"""
|newSource1.scala:6: warning: Duplicate property "a" shadows a previously defined one
| lit("a" -> "2", tup, ("a", "3"), b -> "5", tup, b -> "6")
| ^
"""
}
}
| nicolasstucki/scala-js | compiler/src/test/scala/org/scalajs/nscplugin/test/JSDynamicLiteralTest.scala | Scala | apache-2.0 | 8,057 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter.expression
import java.util.Collections
import org.opengis.filter.expression.PropertyName
import org.opengis.filter.{Filter, FilterVisitor, Or}
import scala.collection.immutable.HashSet
/**
* OR filter implementation for many OR'd equality filters that uses a hash lookup instead of evaluating
* equality for each filter serially
*
* @param property property name
* @param values values to check for equality
*/
class OrHashEquality(property: PropertyName, values: HashSet[AnyRef]) extends Or {
import org.locationtech.geomesa.filter.factory.FastFilterFactory.factory
import scala.collection.JavaConverters._
lazy private val children: Set[Filter] = values.map(value => factory.equals(property, factory.literal(value)))
override def getChildren: java.util.List[Filter] = children.toList.asJava
override def evaluate(obj: AnyRef): Boolean = values.contains(property.evaluate(obj))
override def accept(visitor: FilterVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData)
override def toString: String = children.mkString("[", " OR ", "]")
override def equals(obj: Any): Boolean = {
obj match {
case o: Or => children == o.getChildren.asScala.toSet
case _ => false
}
}
// note: this may break equals/hashCode contract with other implementations of OR...
override def hashCode(): Int = children.hashCode()
}
object OrHashEquality {
class OrHashListEquality(property: PropertyName, values: HashSet[AnyRef])
extends OrHashEquality(property: PropertyName, values: HashSet[AnyRef]) {
override def evaluate(obj: AnyRef): Boolean = {
val list = property.evaluate(obj).asInstanceOf[java.util.List[AnyRef]]
val iter = if (list == null) { Collections.emptyIterator } else { list.iterator() }
while (iter.hasNext) {
if (values.contains(iter.next)) {
return true
}
}
false
}
}
} | ddseapy/geomesa | geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/expression/OrHashEquality.scala | Scala | apache-2.0 | 2,427 |
import test._
// Specs2
import org.specs2.mutable.Specification
class SpecificUserDefinedTypesSpec extends Specification {
"A case class with another record as a field" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest58(AvroTypeProviderTest00(1))
val record2 = AvroTypeProviderTest58(AvroTypeProviderTest00(2))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Float` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest59(AvroTypeProviderTest58(AvroTypeProviderTest00(1)))
val record2 = AvroTypeProviderTest59(AvroTypeProviderTest58(AvroTypeProviderTest00(2)))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with an `Long` field" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest60(AvroTypeProviderTest00(1), AvroTypeProviderTest58(AvroTypeProviderTest00(2)))
val record2 = AvroTypeProviderTest60(AvroTypeProviderTest00(3), AvroTypeProviderTest58(AvroTypeProviderTest00(4)))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with a field that is list of a user-defined type" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest61(Array(AvroTypeProviderTest00(1), AvroTypeProviderTest00(2)))
val record2 = AvroTypeProviderTest61(Array(AvroTypeProviderTest00(3), AvroTypeProviderTest00(4)))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with a field that is list of a nested user-defined type" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest62(Array(AvroTypeProviderTest58(AvroTypeProviderTest00(1)), AvroTypeProviderTest58(AvroTypeProviderTest00(2))))
val record2 = AvroTypeProviderTest62(Array(AvroTypeProviderTest58(AvroTypeProviderTest00(3)), AvroTypeProviderTest58(AvroTypeProviderTest00(4))))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
/* //TODO make readable file for this class - not very urgent since this field type is tested in other contexts also
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest63(Array(AvroTypeProviderTest00(1), AvroTypeProviderTest00(2)), Array(AvroTypeProviderTest60(AvroTypeProviderTest00(3), AvroTypeProviderTest58(AvroTypeProviderTest00(2)))))
val record2 = AvroTypeProviderTest63(Array(AvroTypeProviderTest00(3), AvroTypeProviderTest00(2)), Array(AvroTypeProviderTest60(AvroTypeProviderTest00(3), AvroTypeProviderTest58(AvroTypeProviderTest00(2)))))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
*/
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest64(Some(AvroTypeProviderTest00(1)))
val record2 = AvroTypeProviderTest64(Some(AvroTypeProviderTest00(2)))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest65(None)
val record2 = AvroTypeProviderTest65(None)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest66(Some(AvroTypeProviderTest58(AvroTypeProviderTest00(1))))
val record2 = AvroTypeProviderTest66(Some(AvroTypeProviderTest58(AvroTypeProviderTest00(2))))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest67(Some(AvroTypeProviderTest00(1)), Some(AvroTypeProviderTest60(AvroTypeProviderTest00(4), AvroTypeProviderTest58(AvroTypeProviderTest00(1)))))
val record2 = AvroTypeProviderTest67(Some(AvroTypeProviderTest00(7)), Some(AvroTypeProviderTest60(AvroTypeProviderTest00(8), AvroTypeProviderTest58(AvroTypeProviderTest00(7)))))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record1 = AvroTypeProviderTest68(Some(Array(Some(AvroTypeProviderTest00(1)), None)), Array(None, Some(Array(AvroTypeProviderTest01(1F), AvroTypeProviderTest01(2F)))))
val record2 = AvroTypeProviderTest68(Some(Array(Some(AvroTypeProviderTest00(3)), None)), Array(None, Some(Array(AvroTypeProviderTest01(3F), AvroTypeProviderTest01(4F)))))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case object that represents an empty record" should {
"serialize and deserialize correctly" in {
val record1 = Reset()
val records = List(record1)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with fields that are records imported from avdl of a different namespace" should {
"serialize and deserialize correctly" in {
val record1 = DependentRecord(other.ns.ExternalDependency(1), 2)
val record2 = DependentRecord(other.ns.ExternalDependency(3), 4)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with fields that are imported enums from avsc" should {
"serialize and deserialize correctly" in {
val record1 = DependentRecord2("SPADES", "John")
val record2 = DependentRecord2("HEARTS", "Sandy")
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with fields that are imported records from avdl in the same namespace" should {
"serialize and deserialize correctly" in {
val record1 = DependentRecord3(Embedded(1), true)
val record2 = DependentRecord3(Embedded(2), false)
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
"A case class with fields that are imported records from avdl in the same namespace" should {
"serialize and deserialize correctly" in {
val record1 = DependentRecord4(ComplexExternalDependency(model.v2.NestedRecord(Option(model.UnionRecord("hurrah")))))
val record2 = DependentRecord4(ComplexExternalDependency(model.v2.NestedRecord(None)))
val records = List(record1, record2)
SpecificTestUtil.verifyWriteAndRead(records)
}
}
}
| julianpeeters/sbt-avrohugger | src/sbt-test/avrohugger/SpecificStringEnumSerializationTests/src/test/scala/specific/SpecificUserDefinedTypesSpec.scala | Scala | apache-2.0 | 7,336 |
package definiti.scalamodel.builder.typeVerification
import definiti.common.ast._
import definiti.scalamodel.ScalaAST
import definiti.scalamodel.builder.ScalaModelBuilder
import definiti.scalamodel.utils.{ListUtils, Memoizer}
trait TypeVerificationInformation {
self: ScalaModelBuilder =>
private val extractDependentTypeVerificationNamesMemoizer = new Memoizer[Seq[String]]
def extractDependentTypeVerificationNames(classDefinition: ClassDefinition): Seq[String] = {
extractDependentTypeVerificationNamesMemoizer.value(classDefinition.fullName) {
classDefinition match {
case definedType: DefinedType =>
val attributeVerificationNames = definedType.attributes
.map(_.typeDeclaration)
.flatMap(extractTypeNames)
.distinct
.flatMap(library.typesMap.get)
.flatMap(extractDependentTypeVerificationNames)
val directVerificationNames = definedType.verifications.collect {
case typeVerification: DependentTypeVerification => typeVerification.name
}
(attributeVerificationNames ++ directVerificationNames).distinct
case aliasType: AliasType =>
val aliasVerificationNames = library.typesMap
.get(aliasType.alias.typeName)
.map(extractDependentTypeVerificationNames)
.getOrElse(Seq.empty)
val directVerificationNames = aliasType.verifications.collect {
case typeVerification: DependentTypeVerification => typeVerification.name
}
(aliasVerificationNames ++ directVerificationNames).distinct
case _ => Seq.empty
}
}
}
private def extractTypeNames(typeDeclaration: TypeDeclaration): Seq[String] = {
typeDeclaration.typeName +: typeDeclaration.genericTypes.flatMap(extractTypeNames)
}
private val hasDependentTypeVerificationMemoizer = new Memoizer[Boolean]
def hasDependentTypeVerification(classDefinition: ClassDefinition, name: String): Boolean = {
hasDependentTypeVerificationMemoizer.value(s"${classDefinition.fullName}.${name}") {
extractDependentTypeVerificationNames(classDefinition).contains(name)
}
}
private val extractDependentTypeParametersMemoizer = new Memoizer[Seq[ScalaAST.Parameter]]
def extractDependentTypeParameters(classDefinition: ClassDefinition, name: String): Seq[ScalaAST.Parameter] = {
extractDependentTypeParametersMemoizer.value(s"${classDefinition.fullName}.${name}") {
classDefinition match {
case definedType: DefinedType =>
val attributeVerificationNames = definedType.attributes
.map(_.typeDeclaration)
.flatMap(extractTypeNames)
.distinct
.flatMap(library.typesMap.get)
.flatMap(extractDependentTypeParameters(_, name))
val directVerificationNames = definedType.verifications
.collect { case typeVerification: DependentTypeVerification if typeVerification.name == name =>
typeVerification.function.parameters.tail.map(generateParameter)
}
.flatten
ListUtils.distinctBy(attributeVerificationNames ++ directVerificationNames, (x: ScalaAST.Parameter) => x.name)
case aliasType: AliasType =>
val aliasVerificationNames = library.typesMap
.get(aliasType.alias.typeName)
.map(extractDependentTypeParameters(_, name))
.getOrElse(Seq.empty)
val directVerificationNames = aliasType.verifications
.collect { case typeVerification: DependentTypeVerification if typeVerification.name == name =>
typeVerification.function.parameters.tail.map(generateParameter)
}
.flatten
ListUtils.distinctBy(aliasVerificationNames ++ directVerificationNames, (x: ScalaAST.Parameter) => x.name)
case _ => Seq.empty
}
}
}
}
| definiti/definiti-scala-model | src/main/scala/definiti/scalamodel/builder/typeVerification/TypeVerificationInformation.scala | Scala | mit | 3,916 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.