code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package db
import javax.inject.{Inject, Singleton}
import io.flow.common.v0.models.UserReference
import io.flow.util.IdGenerator
import io.flow.postgresql.{Authorization, OrderBy, Query}
import anorm._
import io.flow.postgresql.play.db.DbHelpers
import play.api.db._
case class DependencyForm(
applicationId: String,
dependencyId: String
)
case class InternalDependency(
id: String,
applicationId: String,
dependencyId: String
)
@Singleton
class DependenciesDao @Inject() (
db: Database
) extends lib.PublicAuthorizedQuery {
private val dbHelpers = DbHelpers(db, "dependencies")
private[this] val BaseQuery = Query(s"""
select dependencies.id,
dependencies.application_id,
dependencies.dependency_id
from dependencies
""")
private[this] val InsertQuery = """
insert into dependencies
(id, application_id, dependency_id, updated_by_user_id)
values
({id}, {application_id}, {dependency_id}, {updated_by_user_id})
"""
private[this] val idGenerator = IdGenerator("dep")
def create(createdBy: UserReference, form: DependencyForm): InternalDependency = {
val id = db.withConnection { implicit c =>
create(c, createdBy, form)
}
findById(Authorization.All, id).getOrElse {
sys.error("Failed to create dependency")
}
}
private[db] def create(
implicit c: java.sql.Connection,
createdBy: UserReference,
form: DependencyForm
): String = {
val id = idGenerator.randomId()
SQL(InsertQuery).on(
Symbol("id") -> id,
Symbol("application_id") -> form.applicationId,
Symbol("dependency_id") -> form.dependencyId,
Symbol("updated_by_user_id") -> createdBy.id
).execute()
id
}
private[db] def deleteApplicationDependency(
implicit c: java.sql.Connection,
user: UserReference,
applicationId: String,
dependencyId: String
): Unit = {
findAll(
Authorization.All,
applications = Some(Seq(applicationId)),
dependencies = Some(Seq(dependencyId)),
limit = 1
).foreach { dep =>
delete(c, user, dep)
}
}
def delete(implicit c: java.sql.Connection, deletedBy: UserReference, dependency: InternalDependency): Unit = {
dbHelpers.delete(c, deletedBy, dependency.id)
}
def findById(auth: Authorization, id: String): Option[InternalDependency] = {
findAll(auth, ids = Some(Seq(id)), limit = 1).headOption
}
private[db] def findAll(
auth: Authorization,
ids: Option[Seq[String]] = None,
applications: Option[Seq[String]] = None,
dependencies: Option[Seq[String]] = None,
limit: Long = 25,
offset: Long = 0,
orderBy: OrderBy = OrderBy("dependencies.application_id, dependencies.dependency_id")
): Seq[InternalDependency] = {
db.withConnection { implicit c =>
findAllWithConnection(c, auth, ids, applications, dependencies, limit, offset, orderBy)
}
}
private[db] def findAllWithConnection(
implicit c: java.sql.Connection,
auth: Authorization,
ids: Option[Seq[String]] = None,
applications: Option[Seq[String]] = None,
dependencies: Option[Seq[String]] = None,
limit: Long = 25,
offset: Long = 0,
orderBy: OrderBy = OrderBy("dependencies.application_id, dependencies.dependency_id")
): Seq[InternalDependency] = {
dbHelpers.authorizedQuery(BaseQuery, queryAuth(auth)).
optionalIn("dependencies.id", ids).
optionalIn("dependencies.application_id", applications).
optionalIn("dependencies.dependency_id", dependencies).
limit(limit).
offset(offset).
orderBy(orderBy.sql).
as(
parser.*
)
}
private[this] val parser: RowParser[InternalDependency] = {
SqlParser.str("id") ~
SqlParser.str("application_id") ~
SqlParser.str("dependency_id") map {
case id ~ applicationId ~ dependencyId => {
InternalDependency(
id = id,
applicationId = applicationId,
dependencyId = dependencyId
)
}
}
}
}
| flowcommerce/registry | api/app/db/DependenciesDao.scala | Scala | mit | 4,035 |
package com.seanshubin.builder.domain
import java.nio.file.Path
import com.seanshubin.uptodate.console.ConfigurationDependencyInjection
import com.seanshubin.uptodate.logic.{Configuration, GroupArtifactVersion, SummaryReport}
class DependencyUpgraderImpl(baseDirectory: Path,
baseLogDirectory: Path) extends DependencyUpgrader {
override def upgradeDependencies(projectName: String): SummaryReport = {
val path = baseDirectory.resolve(projectName)
val logDir = baseLogDirectory.resolve("command").resolve(projectName).resolve("up-to-date")
val upToDateCacheDir = baseLogDirectory.resolve("up-to-date-cache")
val buggyJackson = GroupArtifactVersion(
group = "com.fasterxml.jackson.module",
artifact = "jackson-module-scala_2.12",
version = "2.9.0"
)
val upToDateConfiguration = Configuration(
pomFileName = "pom.xml",
directoryNamesToSkip = Set("target"),
directoriesToSearch = Seq(path),
mavenRepositories = Seq("http://thoughtfulcraftsmanship.com/nexus/content/groups/public"),
automaticallyUpgrade = true,
doNotUpgradeTo = Set(buggyJackson),
doNotUpgradeFrom = Set(),
reportDirectory = logDir,
cacheDirectory = upToDateCacheDir,
cacheExpire = "5 days"
)
val summary = ConfigurationDependencyInjection(upToDateConfiguration).flow.run()
summary
}
}
| SeanShubin/builder | domain/src/main/scala/com/seanshubin/builder/domain/DependencyUpgraderImpl.scala | Scala | unlicense | 1,399 |
package org.fayalite.util.dsl
import scala.collection.TraversableLike
// EXPERIMENTAL USE WITH CAUTION
// YOU'VE BEEN WARNED // AUTOGENERATED
trait MethodShorteners {
/*
implicit class SeqFix[T](s: Seq[T]) {
def m[Q](f: T => Q) = s map f
}
*/
implicit class TravExt[K](kv: Traversable[K]) {
def gb[B](f: K => B) = kv.groupBy{f}.map{
case (a, bs) => bs
}
def spk(f: K => Boolean) = {
kv.withFilter{q => f(q)} ->
kv.withFilter{q => !f(q)}
}
def zig [B](f: K => B) = zi.groupBy{case (k, i) => f(k)}.map{
case (q,w) => q -> w.sortBy{_._2}.map{_._1}
}
def zi = kv.toSeq.zipWithIndex
}
implicit class ShortTravOp[K,V](kv: Traversable[(K,V)]) {
def fk[B](f: (K => Boolean)) = kv.filter{q => f(q._1)}
def mk[B](f: (K => B)) = kv.map{
case (x,y) => f(x) -> y
}
def mv[B](f: (V => B)) = kv.map{
case (x,y) => x -> f(y)
}
def gbk[B] = {
kv.groupBy(_._1).map{case (x,y) => x -> y.map{_._2}}
}
def spk(f: K => Boolean) = {
kv.withFilter{q => f(q._1)} ->
kv.withFilter{q => !f(q._1)}
}
def m1 = kv m{_._1}
def m2 = kv m {_._2}
}
implicit class MapShortTuple[K,V](kv: (K,V)) {
def m1 = kv._1
def m2 = kv._2
def mk[B](f: (K => B)) = kv match {
case (x,y) => f(x) -> y
}
def mv[B](f: (V => B)) = kv match {
case (x,y) => x -> f(y)
}
}
implicit class TLAbbrv[+A, +Repr](t: TraversableLike[A, Repr]) {
def fe[U](f: scala.Function1[A, U]): scala.Unit = t.foreach(f)
def ie: scala.Boolean = t isEmpty
def m[ B, That](f: scala.Function1[A, B])(implicit bf: scala.collection.generic.CanBuildFrom[Repr, B, That]): That = t map f
def fm[B, That](f: scala.Function1[A, scala.collection.GenTraversableOnce[B]])(implicit bf: scala.collection.generic.CanBuildFrom[Repr, B, That]): That = t flatMap f
def ft(p: scala.Function1[A, scala.Boolean]): Repr = t filter p
def fn(p: scala.Function1[A, scala.Boolean]): Repr = t filterNot p
def c[B, That](pf: scala.PartialFunction[A, B])(implicit bf: scala.collection.generic.CanBuildFrom[Repr, B, That]): That = t collect pf
def p(p: scala.Function1[A, scala.Boolean]): scala.Tuple2[Repr, Repr] = t partition p
def g[K](f: scala.Function1[A, K]): scala.collection.immutable.Map[K, Repr] = t groupBy f
def fl(p: scala.Function1[A, scala.Boolean]): scala.Boolean = t.forall(p)
def e(p: scala.Function1[A, scala.Boolean]): scala.Boolean = t exists p
def fi(p: scala.Function1[A, scala.Boolean]): scala.Option[A] = t find p
def s[B >: A, That](z: B)(op: scala.Function2[B, B, B])(implicit cbf: scala.collection.generic.CanBuildFrom[Repr, B, That]): That = t.scan(z)(op)
def sl[B, That](z: B)(op: scala.Function2[B, A, B])(implicit bf: scala.collection.generic.CanBuildFrom[Repr, B, That]): That = t.scanLeft(z)(op)
def sr[B, That](z: B)(op: scala.Function2[A, B, B])(implicit bf: scala.collection.generic.CanBuildFrom[Repr, B, That]): That = t.scanRight(z)(op)
def h: A = t head
def ho: scala.Option[A] = t headOption
def ta: Repr = t tail
def l: A = t last
def lo: scala.Option[A] = t lastOption
def i: Repr = t init
def t(n: scala.Int): Repr = t take n
def d(n: scala.Int): Repr = t drop n
def sl(from: scala.Int, until: scala.Int): Repr = t.slice(from, until)
def tw(p: scala.Function1[A, scala.Boolean]): Repr = t takeWhile p
def dw(p: scala.Function1[A, scala.Boolean]): Repr = t dropWhile p
type f1[a,b] = scala.Function1[a,b]
def sp(p: scala.Function1[A, scala.Boolean]): scala.Tuple2[Repr, Repr] = t span p
def spa(n: scala.Int): scala.Tuple2[Repr, Repr] = t splitAt n
def tas: scala.collection.Iterator[Repr] = t tails
def is: scala.collection.Iterator[Repr] = t inits
type ite[q] = scala.collection.Iterator[q]
def ca[B >: A](xs: scala.Array[B], start: scala.Int, len: scala.Int): scala.Unit = t copyToArray(xs, start, len)
def tt: scala.collection.Traversable[A] = t toTraversable
def ti: scala.collection.Iterator[A] = t toIterator
def ts: scala.Stream[A] = t toStream
def str: scala.Predef.String = t toString()
def strp: scala.Predef.String = t stringPrefix
def v: scala.AnyRef with scala.collection.TraversableView[A, Repr] = t view
def v(from: scala.Int, until: scala.Int): scala.collection.TraversableView[A, Repr] = t.view(from, until)
def w(p: scala.Function1[A, scala.Boolean]): scala.collection.generic.FilterMonadic[A, Repr] = t withFilter (p)
}
} | ryleg/fayalite | common/src/main/scala/org/fayalite/util/dsl/MethodShorteners.scala | Scala | mit | 4,583 |
package katsconf
object ZipCodeService {
sealed trait Failure
case object IllegalZipCode extends Failure
def citiesInZipCode(zipCode: String): Either[Failure, Set[String]] =
if (zipCode matches "[0-9]{5}")
Right(zipCode.map(n => s"City $n").toSet)
else
Left(IllegalZipCode)
}
| larsrh/katsconf2017 | src/main/scala/ZipCodeService.scala | Scala | mit | 306 |
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package com.cloudant.clouseau
import com.yammer.metrics.scala._
import org.apache.log4j.Logger
import org.apache.lucene.analysis.tokenattributes._
import scala.collection.immutable.List
import scalang._
import org.apache.lucene.analysis.Analyzer
class AnalyzerService(ctx: ServiceContext[ConfigurationArgs]) extends Service(ctx) with Instrumented {
val logger = Logger.getLogger("clouseau.analyzer")
override def handleCall(tag: (Pid, Reference), msg: Any): Any = msg match {
case ('analyze, analyzerConfig: Any, text: String) =>
SupportedAnalyzers.createAnalyzer(analyzerConfig) match {
case Some(analyzer) =>
('ok, tokenize(text, analyzer))
case None =>
('error, 'no_such_analyzer)
}
}
def tokenize(text: String, analyzer: Analyzer): List[String] = {
var result: List[String] = List()
val tokenStream = analyzer.tokenStream("default", text)
tokenStream.reset()
while (tokenStream.incrementToken) {
val term = tokenStream.getAttribute(classOf[CharTermAttribute])
result = term.toString +: result
}
tokenStream.end()
tokenStream.close()
result.reverse
}
}
| supriyantomaftuh/network | src/main/scala/com/cloudant/clouseau/AnalyzerService.scala | Scala | apache-2.0 | 1,720 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testHelpers
import org.scalatest.Inspectors
import org.scalatest.compatible.Assertion
import org.scalatest.matchers.should.Matchers
import play.api.data.{Form, FormError}
object FormInspectors extends Matchers with Inspectors {
val toErrorSeq = (fe: FormError) => (fe.key, fe.message)
implicit class FormErrorOps[T](form: Form[T]) {
def shouldHaveErrors(es: Seq[(String, String)]): Assertion = {
form.errors shouldBe 'nonEmpty
form.errors.size shouldBe es.size
form.errors.map(toErrorSeq) shouldBe es
}
def shouldHaveGlobalErrors(es: String*): Assertion = {
form.globalErrors shouldBe 'nonEmpty
form.globalErrors.size shouldBe es.size
form.globalErrors.map(toErrorSeq).map(_._2) should contain only (es: _*)
}
def shouldContainValue(value: T): Assertion = {
form.errors shouldBe 'empty
form.value shouldBe Some(value)
}
}
}
| hmrc/vat-registration-frontend | test/testHelpers/FormInspectors.scala | Scala | apache-2.0 | 1,524 |
package json.bench
import java.util.concurrent.TimeUnit
import json.bench.model.Data
import org.openjdk.jmh.annotations._
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 4, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 4, time = 5, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, jvmArgsAppend = Array("-Xms1G", "-Xmx1G"))
@State(Scope.Benchmark)
class JmhWriterBench {
@Param(Array(
"128b",
"1kb",
"128kb",
"1mb",
"32mb"
))
var jsonSize: String = _
val seed = 10000
var data: Seq[Data] = _
@Setup(Level.Trial)
def setup(): Unit = {
data = jsonSize match {
case "128b" => Data.dataSamples(1, seed)
case "1kb" => Data.dataSamples(8, seed)
case "128kb" => Data.dataSamples(128 * 8, seed)
case "1mb" => Data.dataSamples(8 * 128 * 8, seed)
case "32mb" => Data.dataSamples(32 * 8 * 128 * 8, seed)
}
}
@Param(Array(
"tethys-jackson",
"pure-jackson",
"circe",
"java.lang.StringBuilder",
"scala.StringBuilder",
"json4s-jackson",
"json4s-native",
"play-json",
"spray-json"
))
var processorName: String = _
@Benchmark
def bench: String = {
DataWriter.instances(processorName).write(data)
}
}
| tethys-json/tethys | modules/benchmarks/src/main/scala/json/bench/JmhWriterBench.scala | Scala | apache-2.0 | 1,278 |
import sbt._
import sbt.Keys._
object StageDist {
import java.nio.file._
lazy val stageDirectory = taskKey[File]("Target directory")
lazy val stageActions = taskKey[Seq[StageAction]]("Actions to build stage")
lazy val basicStage = taskKey[File]("Build stage for basic distributive")
lazy val runStage = taskKey[File]("Build stage for docker distributive")
lazy val dockerStage = taskKey[File]("Build stage for docker distributive")
lazy val packageTar = taskKey[File]("Package stage to tar")
lazy val settings = Seq(
stageDirectory := target.value / name.value,
basicStage := stageBuildTask(basicStage).value,
dockerStage := stageBuildTask(dockerStage).value,
runStage := stageBuildTask(runStage).value,
packageTar := {
import scala.sys.process._
val dir = basicStage.value
val name = dir.getName
val out = s"$name.tar.gz"
val ps = Process(Seq("tar", "cvfz", out, name), Some(dir.getParentFile))
ps.!
file(out)
}
)
private def stageBuildTask(key: TaskKey[File]): Def.Initialize[Task[File]] = Def.task {
val log = (streams in key).value.log
val dir = (stageDirectory in key).value
if (dir.exists())
IO.delete(dir)
if (!dir.exists)
IO.createDirectory(dir)
val actions = (stageActions in key).value
actions.foreach({
case MkDir(name) => mkDir(name, dir)
case copy: CpFile => copyToDir(copy, dir)
case Write(name, data) => IO.write(dir.asPath.resolve(name).toFile, data.getBytes)
})
log.info(s"Stage is built at $dir")
dir
}
private def mkDir(name: String, dir: File): Unit = {
val f = dir.asPath.resolve(name).toFile
IO.createDirectory(f)
}
private def copyToDir(a: CpFile, dir: File): Unit = {
val path = Paths.get(a.path)
Option(path.getParent).foreach(p => {
IO.createDirectory(p.toFile)
})
val filePath = dir.asPath.resolve(path)
val file = filePath.toFile
if (a.file.isDirectory) {
if (!file.exists())
IO.createDirectory(file)
a.file.listFiles().foreach(f => copyToDir(CpFile(f), file))
} else {
Files.copy(a.file.asPath, filePath, StandardCopyOption.REPLACE_EXISTING)
}
}
sealed trait StageAction
case class CpFile(
file: File,
renameTo: Option[String],
toDir: Option[File]
) extends StageAction {
def as(name: String): CpFile = copy(renameTo = Some(name))
def to(dir: String): CpFile = copy(toDir = Some(sbt.file(dir)))
def path: String = {
val name = renameTo.getOrElse(file.getName)
toDir.map(_.getPath + "/" + name).getOrElse(name)
}
}
object CpFile {
def apply(f: File): CpFile = CpFile(f, None, None)
def apply(s: String): CpFile = CpFile(sbt.file(s), None, None)
}
case class Write(name: String, data: String) extends StageAction
case class MkDir(name: String) extends StageAction
}
| Hydrospheredata/mist | project/StageDist.scala | Scala | apache-2.0 | 2,912 |
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package generic;
package collection;
/**
* Marker for being able to get the value of a map at a key as a double.
*
* @author dramage
*/
trait CanGetDouble[-Coll, @specialized(Int,Long) -K] {
def apply(coll : Coll, key : K) : Double;
}
object CanGetDouble {
type Op[Coll,K] = CanGetDouble[Coll,K];
implicit object OpArrayI extends Op[Array[Int],Int]
{ override def apply(coll : Array[Int], key : Int) = coll(key); }
implicit object OpArrayS extends Op[Array[Short],Int]
{ override def apply(coll : Array[Short], key : Int) = coll(key); }
implicit object OpArrayL extends Op[Array[Long],Int]
{ override def apply(coll : Array[Long], key : Int) = coll(key); }
implicit object OpArrayF extends Op[Array[Float],Int]
{ override def apply(coll : Array[Float], key : Int) = coll(key); }
implicit object OpArrayD extends Op[Array[Double],Int]
{ override def apply(coll : Array[Double], key : Int) = coll(key); }
implicit def opIndexedSeq[@specialized V](implicit cv : V => Double) =
new OpIndexedSeq[V];
class OpIndexedSeq[@specialized V](implicit cv : V => Double) extends CanGetDouble[IndexedSeq[V], Int] {
def apply(coll : IndexedSeq[V], key : Int) = coll(key);
}
implicit object OpIndexedSeqI extends OpIndexedSeq[Int];
implicit object OpIndexedSeqC extends OpIndexedSeq[Char];
implicit object OpIndexedSeqS extends OpIndexedSeq[Short];
implicit object OpIndexedSeqL extends OpIndexedSeq[Long];
implicit object OpIndexedSeqF extends OpIndexedSeq[Float];
implicit object OpIndexedSeqD extends OpIndexedSeq[Double];
implicit def opMap[K,V](implicit cv : V => Double) =
new OpMap[K,V];
class OpMap[K,V](implicit cv : V => Double) extends CanGetDouble[scala.collection.Map[K,V], K] {
def apply(coll : scala.collection.Map[K,V], key : K) = coll(key);
}
}
| scalala/Scalala | src/main/scala/scalala/generic/collection/CanGetDouble.scala | Scala | lgpl-2.1 | 2,687 |
package com.forged.data
/**
* Created by visitor15 on 12/6/15.
*/
case class PortfolioProject(title: String, description: String, images: List[String]) | Visitor15/webPresence-scala | src/main/scala/com/forged/data/PortfolioData.scala | Scala | mit | 154 |
package com.ing.baker.runtime.akka.actor
import scala.collection.immutable.List
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import org.scalatest.matchers.should.Matchers._
class UtilSpec extends AkkaTestBase("UtilSpec") {
implicit def ec: ExecutionContext = system.dispatcher
"The Util class" should {
"collect future results within specified timeout" in {
val fastFutures = (1 to 5).map(_ => Future { Thread.sleep(100); true } )
val slowFuture = Future { Thread.sleep(5000); false }
val futures = fastFutures :+ slowFuture
val collected = Util.collectFuturesWithin(futures, 1.second, system.scheduler)
val expectedResult = List.fill(5)(true)
collected shouldBe expectedResult
}
}
}
| ing-bank/baker | core/akka-runtime/src/test/scala/com/ing/baker/runtime/akka/actor/UtilSpec.scala | Scala | mit | 785 |
package spire.optional
import spire.algebra.Trig
import spire.math.Rational
object rationalTrig {
implicit val trigRational = new Trig[Rational] {
val r180 = Rational(180)
import spire.std.double._
def acos(a: Rational): Rational = Rational(spire.math.acos(a.toDouble))
def asin(a: Rational): Rational = Rational(spire.math.asin(a.toDouble))
def atan(a: Rational): Rational = Rational(spire.math.atan(a.toDouble))
def atan2(y: Rational,x: Rational): Rational = Rational(spire.math.atan2(y.toDouble, x.toDouble))
def cos(a: Rational): Rational = Rational(spire.math.cos(a.toDouble))
def cosh(x: Rational): Rational = Rational(spire.math.cosh(x.toDouble))
val e: Rational = Rational(spire.math.e)
def exp(a: Rational): Rational = Rational(spire.math.exp(a.toDouble))
def expm1(a: Rational): Rational = Rational(spire.math.expm1(a.toDouble))
def log(a: Rational): Rational = Rational(spire.math.log(a.toDouble))
def log1p(a: Rational): Rational = Rational(spire.math.log1p(a.toDouble))
val pi: Rational = Rational(spire.math.pi)
def sin(a: Rational): Rational = Rational(spire.math.sin(a.toDouble))
def sinh(x: Rational): Rational = Rational(spire.math.sinh(x.toDouble))
def tan(a: Rational): Rational = Rational(spire.math.tan(a.toDouble))
def tanh(x: Rational): Rational = Rational(spire.math.tanh(x.toDouble))
def toDegrees(a: Rational): Rational = (a * r180) / pi
def toRadians(a: Rational): Rational = (a / r180) * pi
}
}
| AlecZorab/spire | core/src/main/scala/spire/optional/rationalTrig.scala | Scala | mit | 1,510 |
package fpinscala.datastructures
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object Tree {
def size[A]( t : Tree[A] ) : Int = t match {
case Leaf(value) => 1
case Branch(left, right) => size(left) + size(right) + 1
}
def maximum( t : Tree[Int] ) : Int = t match {
case Leaf(value) => value
case Branch(left, right) => maximum(left).max(maximum(right))
}
def depth[A]( t : Tree[A] ) : Int = t match {
case Leaf(value) => 0
case Branch(left, right) => 1 + depth(left).max(depth(right))
}
def map[A,B]( t : Tree[A] ) (f:A=>B): Tree[B] = t match {
case Leaf(value) => Leaf(f(value))
case Branch(left, right) => Branch(map(left)(f), map(right)(f))
}
def fold[A,B] (t: Tree[A]) (leafFunc:A => B) (branchFunc : (B, B) => B) : B = t match {
case Leaf(value) => leafFunc(value)
case Branch(left, right) => branchFunc(fold(left)(leafFunc)(branchFunc), fold(right)(leafFunc)(branchFunc))
}
def sizeViaFold[A](t : Tree[A]) =
fold(t)(x=>1) ((x,y) => x + y + 1)
def depthViaFold[A](t : Tree[A]) : Int =
fold(t) (x=>0) ((x,y) => x.max(y) + 1)
def maximumViaFold(t : Tree[Int]) =
fold(t) (x=>x) ((x,y) => x.max(y))
def mapViaFold[A,B]( t : Tree[A] ) (f:A=>B): Tree[B] = {
def branchFunc (x : Tree[B], y : Tree[B]) : Tree[B] = Branch(x, y)
def leafFunc(x : A) : Tree[B] = Leaf(f(x))
fold(t) (leafFunc) (branchFunc)
}
}
| ramakocherlakota/fpinscala | exercises/src/main/scala/fpinscala/datastructures/Tree.scala | Scala | mit | 1,504 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.utils
import java.util.regex.{ Matcher, Pattern }
/** Competently find the plural of most common English words.
*
* In dealing with logged messages, error messages, and other output that people see,
* it is always nice to have the correct plural form of nouns be used. This class attempts to get pretty good
* coverage for the english language based on a variety of pluralization rules. This is also used in URL
* path matching to distinguish between operations that act on an instance (singular) or a collection of instances
* (plural)
*
* Inspired by: [[https://github.com/atteo/evo-inflector/blob/master/src/main/java/org/atteo/evo/inflector/TwoFormInflector.java]]
* Rules from: [[http://www.barstow.edu/lrc/tutorserv/handouts/015%20Irregular%20Plural%20Nouns.pd]]
* Oxford Rules: [[http://oxforddictionaries.com/words/plurals-of-nouns]]
*/
abstract class Pluralizer {
/** A rule is very simple, it maps a pattern to match with the substitution to make for that pattern.
* @param singular A regular expression with a substitution group to be replaced by `plural`
* @param plural The substitution for the group in `singular`
*/
case class Rule(singular : Pattern, plural : String) {}
val rules : scala.collection.mutable.ListBuffer[Rule] = new scala.collection.mutable.ListBuffer[Rule]
/** The main interface to this class.
* Call pluralize to pluralize any word and return its plural form.
* @param word The word to be pluralized
* @return The plural form of word
*/
def pluralize(word : String) : String =
{
rules.map { rule ⇒
val matcher : Matcher = rule.singular.matcher(word)
if (matcher.find())
return matcher.replaceFirst(rule.plural)
}
// Without a matching pluralized word, just return the word.
word
}
/** Declaration of a non-plural word.
* Subclasses can register a word here that does not have a plural form. The plural word will be the same
* as the singular word.
* @param word The word whose plural form is the same as its singular form
*/
protected def noplural(word : String) : Unit =
{
rules += new Rule(Pattern.compile("(?i)(" + word + ")$"), "$1")
}
/** Declaration of a list of non-plural words.
* Subclasses can register a list of words that do not have plural forms. The plural words will each be the
* same as their singular word counterpart.
* @param word_list A list of worlds whose plural form is the same as its singular form.
*/
protected def noplural(word_list : List[String]) : Unit =
{
val builder : StringBuilder = new StringBuilder()
builder.append("(?i)(").append(word_list.head)
word_list.tail.foreach { word : String ⇒
builder.append("|").append(word)
}
builder.append(")$")
rules += new Rule(Pattern.compile(builder.toString()), "$1")
}
/** Declaration of an irregular wwhose plural just doesn't match a rule
* Subclasses can register a pair of words that have an irregular plural form. That is, it is not easy to
* convert the singular to the plural so both must be specified.
* @param singular The singular form of the word
* @param plural The plural form of the word
*/
protected def irregular(singular : String, plural : String) : Unit = {
if (singular.charAt(0) == plural.charAt(0)) {
rules += new Rule(Pattern.compile("(?i)(" + singular.charAt(0) + ")" + singular.substring(1)
+ "$"), "$1" + plural.substring(1))
} else {
rules += new Rule(Pattern.compile(Character.toUpperCase(singular.charAt(0)) + "(?i)"
+ singular.substring(1) + "$"), Character.toUpperCase(plural.charAt(0))
+ plural.substring(1))
rules += new Rule(Pattern.compile(Character.toLowerCase(singular.charAt(0)) + "(?i)"
+ singular.substring(1) + "$"), Character.toLowerCase(plural.charAt(0))
+ plural.substring(1))
}
}
protected def irregular(pair : (String, String)) : Unit = irregular(pair._1, pair._2)
protected def irregular(pairs : List[(String, String)]) : Unit = pairs.foreach { pair ⇒ irregular(pair) }
protected def standard(singular : String, plural : String) : Unit =
rules += new Rule(Pattern.compile(singular, Pattern.CASE_INSENSITIVE), plural)
protected def standard(pair : (String, String)) : Unit = standard(pair._1, pair._2)
protected def standard(pairs : List[(String, String)]) : Unit = pairs.foreach { pair ⇒ standard(pair) }
protected def category(word_list : List[String], pattern : String, plural : String) : Unit =
{
val builder : StringBuilder = new StringBuilder()
builder.append("^(?=").append(word_list.head)
word_list.tail.foreach { word ⇒ builder.append("|").append(word) }
builder.append(")")
builder.append(pattern)
rules += new Rule(Pattern.compile(builder.toString(), Pattern.CASE_INSENSITIVE), plural)
}
}
/** Interface to Pluralizer
* This object allows us to write {{{Pluralizer("word")}}} to obtain the plural form or
* {{{Pluralizer("word",count}}} if we want it to be based on the value of count.
* This class also calls all the base class methods to install the rules.
*/
object Pluralizer extends Pluralizer {
def apply(word : String, count : Int = 2) = pluralize(word, count)
def apply(word : Symbol) = pluralize(word.name)
def pluralize(word : Symbol) : String = pluralize(word.name)
def pluralize(word : String, count : Int = 2) : String =
{
if (count == 1 || count == -1) {
word
} else {
pluralize(word)
}
}
/** Construct the Pluralizer by using the super class's methods to install the rules
*/
// Some words are the same with plural and singular. We list these first so they don't
// attempt to match other patterns below.
noplural(
List(
"bison", "bream", "breeches", "britches",
"carp", "chassis", "clippers", "cod", "contretemps", "corps",
"debris", "deer", "diabetes", "djinn",
"eland", "elk",
"fish", "flounder",
"gallows", "graffiti",
"headquarters", "herpes", "high-jinks", "homework",
"innings", "itis",
"jackanapes",
"mackerel", "measles", "mews", "moose", "mumps",
"news",
"ois",
"pincers", "pliers", "pox", "proceedings",
"rabies",
"salmon", "scissors", "sea-bass", "series", "shears", "sheep", "species", "sugar", "swine",
"trout", "tuna",
"wildebeest", "whiting"
)
)
// Some words defy categorization so we do a direct mapping for these ones.
irregular(
List(
("beef", "beefs"),
("brother", "brothers"),
("child", "children"),
("cow", "cows"),
("die", "dice"),
("ephemeris", "ephemerides"),
("genie", "genies"),
("genus", "genera"),
("money", "monies"),
("mongoose", "mongoose"),
("octopus", "octopi"),
("opus", "opuses"),
("ox", "oxen"),
("person", "people"),
("quiz", "quizzes"),
("runner-up", "runners-up"),
("soliloquy", "soliloquies"),
("son-in-law", "sons-in-law"),
("trilby", "trilbys")
)
)
// Certain nouns ending in "man" get pluralized as "mans" not "men", list those before we do the
// "man->men" endings.
category(List[String] (
"human", "Alabaman", "Bahaman", "Burman", "German", "Hiroshiman", "Liman", "Nakayaman", "Oklahoman",
"Panaman", "Selman", "Sonaman", "Tacoman", "Yakiman", "Yokohaman", "Yuman"
),
"(.*)$", "$1s")
// Certain irregular plurals have standard suffix inflections that we can count on.
standard(
List(
("man$", "men"),
("([lm])ouse$", "$1ice"),
("tooth$", "teeth"),
("goose$", "geese"),
("foot$", "feet"),
("zoon$", "zoa"),
("([csx])is$", "$1es")
)
)
// Some words ending in -ex become -ices
category(
List (
"apex",
"codex", "cortex",
"index",
"latex",
"murex",
"pontifex",
"silex", "simplex",
"vertex", "vortex"
),
"(.*)ex$", "$1ices"
)
// Some words ending in -ix become -ices
category(
List (
"appendix",
"crucifix",
"helix",
"matrix",
"radix"
),
"(.*)ix$", "$1ices"
)
// Some words ending in -um become -a
category(
List(
"addendum",
"agendum",
"aquarium",
"bacterium",
"candelabrum",
"curriculum",
"datum",
"desideratum",
"erratum",
"extremum",
"memorandum",
"ovum",
"stratum"
), "(.*)um$", "$1a")
// Some words ending in -on become -a
category(
List(
"aphelion",
"asyndeton",
"criterion",
"hyperbaton",
"noumenon",
"organon",
"perihelion",
"phenomenon",
"prolegomenon"
), "(.*)on$", "$1a")
// Some words ending in -a become -ae
category(
List(
"alga",
"alumna",
"persona",
"vertebra"
),
"(.*)a$", "$1ae"
)
// Some words that end in -f become -ves
category(
List(
"hoof",
"loaf",
"meatloaf",
"oaf",
"roof",
"sugarloaf",
"thief"
), "(.*)f", "$1ves"
)
// Some words ending in -en become -ina
category(
List(
"foramen",
"lumen",
"stamen"
),
"(.*)en$", "$1ina"
)
// Some words ending in -ma become -mata
category(
List (
"anathema",
"bema",
"carcinoma", "charisma",
"diploma", "dogma", "drama",
"edema", "enema", "enigma",
"gumma",
"lemma", "lymphoma",
"magma", "melisma", "miasma",
"oedema",
"sarcoma", "schema", "soma", "stigma", "stoma",
"trauma"
),
"(.*)a$", "$1ata"
)
// Some words ending in -la become -lae
category(
List(
"formula"
),
"(.*)a$", "$1ae"
)
// Clasically, a few words ending in -is become -ides
category(
List (
"iris", "clitoris"
),
"(.*)is$", "$1ides"
)
// Some words ending in -us become -uses instead of the more usual -i ending
category(
List (
"apparatus",
"cantus",
"coitus",
"hiatus",
"impetus",
"plexus",
"prospectus",
"nexus",
"sinus",
"status"
),
"(.*)us$", "$1uses"
)
// A few words ending in -us become -a instead of -uses or -i
category(
List (
"corpus"
),
"(.*)us$", "$1a"
)
// Now that the -us exceptions are handled we can specify th words for which -us becomes -i
category(
List(
"alumnus", "alveolus",
"bacillus", "bronchus",
"cactus",
"focus", "fungus",
"hippopotamus",
"locus",
"meniscus",
"nucleus",
"radius",
"stimulus",
"syllabus",
"thesaurus"
), "(.*)us$", "$1i"
)
// Classically words ending in -o can become -i but many words aren't used that way any more
category(
List (
"tempo",
"virtuoso"
), "(.*)o$", "$1i"
)
// Some words ending in -o become -os (including ones preceded by a vowel)
category(
List (
"albino", "alto", "archipelago", "armadillo", "auto",
"basso",
"canto", "casino", "commando", "contralto", "crescendo",
"fiasco",
"ditto", "dynamo",
"embryo",
"generalissimo", "ghetto", "guano",
"inferno",
"jumbo",
"lingo", "lumbago",
"macro",
"magneto", "manifesto", "medico",
"octavo",
"photo", "piano", "pro",
"quarto",
"rhino",
"solo", "soprano", "stylo",
"zero"
),
"(.*)o$", "$1os"
)
// A few words just get -i appended, generally they end in t preceded by a vowel
category(
List(
"afreet", "afrit", "efreet"
), "(.*)$", "$1i"
)
// A few words just get -im appended
category(
List(
"cherub", "goy", "seraph"
),
"(.*)$", "$1im"
)
// several words that might otherwise match pattersn get the -es suffix
category(
List(
"acropolis", "aegis", "asbestos", "alias", "atlas",
"bathos", "bias", "bus",
"caddis", "cannabis", "canvas", "chaos", "circus", "cosmos",
"dais", "digitalis",
"epidermis", "ethos",
"gas", "glottis",
"ibis",
"lens",
"mantis", "marquis",
"metropolis",
"pathos", "pelvis", "polis",
"rhinoceros",
"sassafras",
"trellis"
),
"(.*)$", "$1es"
)
// Words ending in ch where it sounds like "k" get s simple -s suffix, not -es
category(
List(
"stomach",
"epoch"
),
"(.*)ch$", "$1chs"
)
// The preceding irrelgular, category and noplural rules are processed first so now we can handle some standard
// rules without regard to specific words, just word ending patterns
standard(
List(
("trix$", "trices"), // trix at end of word becomes trices (matrix, index, etc.)
("eau$", "eaux"), // eau at end of word becomes eaux (tableau)
("ieu$", "ieux"), // ieu at end of word become ieux (millieu)
("(..[iay])nx$", "$1nges") // words ending in ynx, anx, or inx with at least two letters prior become nges
)
)
// Now, a standard rule: words ending in -ch -sh -z or -x become -es at the end
standard("([cs]h|[zx])$", "$1es")
// The suffixes -ch, -sh, and -ss all take -es in the plural (churches, classes, etc)...
standard(
List(
("([cs])h$", "$1hes"),
("ss$", "sses")
)
)
// Certain words ending in -f or -fe take -ves in the plural (lives, wolves, etc)...
standard(
List (
("([aeo]l)f$", "$1ves"), // elf -> elves
("([^d]ea)f$", "$1ves"), // leaf -> leaves
("(ar)f$", "$1ves"), // scarf -> scarves
("([nlw]i)fe$", "$1ves") // wife -> wives
)
)
// Words ending in -y become -ys
standard(
List(
("([aeiou])y$", "$1ys"), // boy -> boys
("y$", "ies") // ply -> plies
)
)
// Vowels followed by -o end in -os
standard("([aeiou])o$", "$1os")
// Anything else followed by -o end in -oes
standard ("o$", "oes")
// Otherwise, assume that the plural just adds -s
standard("(.*)$", "$1s")
}
| scrupal/scrupal | scrupal-utils/src/main/scala/scrupal/utils/Pluralizer.scala | Scala | apache-2.0 | 15,871 |
package scalacookbook.chapter02
/**
* Created by liguodong on 2016/6/12.
*/
object CompareFloatPointNum {
def main(args: Array[String]) {
val a = 0.3 //a: Double = 0.3
println(a)
val b = 0.1 + 0.2 //b: Double = 0.30000000000000004
println(b)
//判断浮点数是否相等
println(~=(a, b, 0.0001)) //res0: Boolean = true
println(~=(b, a, 0.0001)) //res1: Boolean = true
val c = 0.1+0.1
println(c)
println(a == b)
//隐式转换
//define an implicit conversion to add a method like this to the Double class
import DoubleUtils._
println(a.~=(b))
println(a ~= b)
//或者 add the same method to a utilities object
println(MathUtils.~=(a, b, 0.000001)) //不是一种好的方式
}
def ~=(x: Double, y: Double, precision: Double) = {
if ((x - y).abs < precision) true else false
}
}
//工具类
object MathUtils {
def ~=(x: Double, y: Double, precision: Double) = {
if ((x - y).abs < precision) true else false
}
}
object DoubleUtils{
/*
With an implicit conversion, the name ~= is very readable,
but in a utilities object like this,
it doesn’t look quite right,
so it might be better named approximatelyEqual,
equalWithinTolerance, or some other name.
采用工具类看起来并不十分正确。
*/
implicit class EqualFloatNum(a: Double) {
def ~=(b: Double) = if ((a - b).abs < 0.000001) true else false
}
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter02/CompareFloatPointNum.scala | Scala | apache-2.0 | 1,466 |
package com.wavesplatform.lang.v1.repl.node.http.response
import com.wavesplatform.lang.v1.repl.node.http.response.model._
import com.wavesplatform.lang.v1.traits.domain.Recipient.Address
import com.wavesplatform.lang.v1.traits.domain.{BlockInfo, ScriptAssetInfo, Tx}
case class ImplicitMappings(chainId: Byte) {
private val chainDependentMapper = new ChainDependentMapper(chainId)
implicit val heightO: HeightResponse => Option[Long] =
(r: HeightResponse) => if(r.succeed) {
Some(r.height)
} else {
None
}
implicit val heightM: HeightResponse => Long =
_.height
implicit val transferTxO: TransferTransaction => Option[Tx.Transfer] =
chainDependentMapper.toRideModelO
implicit val transferTxM: TransferTransaction => Tx.Transfer =
chainDependentMapper.toRideModel
implicit val assetInfoM: AssetInfoResponse => ScriptAssetInfo =
chainDependentMapper.toRideModel
implicit val blockInfoM: BlockInfoResponse => BlockInfo =
chainDependentMapper.toRideModel
implicit val dataEntryM: DataEntry => Any =
_.value
implicit val addressM: AddressResponse => Address =
a => Address(a.address.byteStr)
implicit val balanceM: BalanceResponse => Long =
_.balance
implicit val balancesM: List[BalanceResponse] => Long =
_.headOption.fold(0L)(_.balance)
implicit val addressFromString: String => Either[String, Address] =
chainDependentMapper.addressFromString
}
| wavesplatform/Waves | repl/shared/src/main/scala/com/wavesplatform/lang/v1/repl/node/http/response/ImplicitMappings.scala | Scala | mit | 1,446 |
package org.lords.classification
import java.io.PrintWriter
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import scala.util.parsing.json._
import scala.util.Random
object UserClassifier {
/**
Extract items from json string, which 'played'/'reviewed'/'rated' by user
@param line, json string
*/
def extractItems(line: String): Option[Tuple2[String, (Long, Long, Char, Iterable[String])]] = {
def keys = (x: Any) => x.asInstanceOf[Map[String, List[String]]].keys
// if scala version is smaller than 2.11, need this
type StringMap = Map[String, Any]
JSON.parseFull(line) match {
case Some(m: StringMap) =>
val user = m("user").asInstanceOf[String]
val start = m("start").asInstanceOf[String].toLong
val end = m("end").asInstanceOf[String].toLong
val items = keys(m("played")) ++ keys(m("rated")) ++ keys(m("reviewed"))
// n -> null, t -> true, f -> false
val kid = if(m("kid") == null) 'n' else m("kid").toString.head
Some((user, (start, end, kid, items)))
case _ => None
}
}
/**
Merge all items in this group. If already known the user is 'kid', add suffix 'k',
'adult' user add suffix 'a'. If seen a conflicting label, treat this user as two
different user.
@param group, a group of sessions
*/
def combine(group: Tuple2[String, Iterable[(Long, Long, Char, Iterable[String])]]) = {
val user = group._1
// sort sessions by time, x._1 is start time, x._2 is end time
val sortedValue = group._2.toSeq.sortBy(x => (x._1, x._2))
def emptyIter = Iterable.empty[String]
// all data are belong to a same user, but this user can treat as two different user
// items._1 current user type ('a' or 'k')
// items._2 items belong to unlabeled user
// items._3 items belong to kid user
// items._4 items belong to adult user
val items = sortedValue.foldLeft(('n', emptyIter, emptyIter, emptyIter)) { (acc, x) =>
val unknown = acc._2
val kid = acc._3
val adult = acc._4
val pre = acc._1
val cur = x._3
val newItems = x._4
(pre, cur) match {
case ('n', 'n') => ('n', newItems ++ unknown, kid, adult)
case ('n', 't') | ('t', 'n') | ('t', 't') => ('t', emptyIter, newItems ++ kid ++ unknown, adult)
case ('n', 'f') | ('f', 'n') | ('f', 'f') => ('f', emptyIter, kid, newItems ++ adult ++ unknown)
case ('t', 'f') => ('f', emptyIter, kid, newItems ++ adult ++ unknown)
case ('f', 't') => ('t', emptyIter, newItems ++ kid ++ unknown, adult)
}
}
// merge all items, add 'a' or 'k' suffix to user name if need
val data = (if(items._2.isEmpty) None else Some((user, items._2))) ::
(if(items._3.isEmpty) None else Some((s"${user}k", items._3))) ::
(if(items._4.isEmpty) None else Some((s"${user}a", items._4))) :: Nil
// user -> list of items
data.flatMap(x => x)
}
/**
simrank implements
*/
def simrank(matrix: RDD[(String, Iterable[String])], source: Array[String],
beta: Double = 0.8, threshold: Double = 0.01): Map[String, Double] = {
// initial probability which assignment to each source
val N = source.size
val prob = 1.0 / N
val telport = (1.0 - beta) / N
// v_0
val v0 = source.foldLeft(Map.empty[String, Double])((acc, x) => acc.updated(x, prob))
// recursively compute v_{k+1}
def _simrank(v: Map[String,Double]): Map[String, Double] = {
// compute the major distribution vector of next state
val majorVn = matrix.flatMap { case (col, rows) =>
rows.flatMap { row => if(v.contains(col)) Option((row, v(col) / rows.size)) else None }
} .groupByKey().map(
x => (x._1, if(source.contains(x._1)) x._2.sum * beta + telport else x._2.sum * beta)
) .collect
// find all 'user' which appear in source, but not appear in 'majorVn'
val noAppearSource = source.filter(x => !majorVn.contains(x))
val onlyTelportV = noAppearSource.foldLeft(Map.empty[String, Double])((acc, x) => acc.updated(x, telport))
// merge 'majorVn' and 'onlyTelportV', we got the final distribution vector of next state
val nv = majorVn.foldLeft(onlyTelportV)((acc, x) => acc.updated(x._1, x._2))
val diff = nv.foldLeft(0.0)((acc, x) => if(v.contains(x._1)) acc + Math.abs(v(x._1) - x._2) else acc + x._2)
// if 'diff' bigger than threshold loop again, else return distribution vector
if(diff > threshold) { _simrank(nv) } else { nv }
}
_simrank(v0)
}
def beta = 0.8
def threshold = 1E-6
/**
classifition
@param data, data
@param adult, labeled adult users
@param kid, labeled kid users
*/
def solve(data: RDD[(String, Iterable[String])], adult: Array[String], kid: Array[String]) = {
// Step 3. Build an adjacency matrix
val matrix = data.union(data.flatMap(x => x._2.map(item => (item, x._1))).groupByKey())
// a regex pattern which match user name
val pattern = """\\d{7,8}[ak]?""".r.pattern
val adultV = simrank(matrix, adult, beta, threshold).filter(x => pattern.matcher(x._1).matches)
val kidV = simrank(matrix, kid, beta, threshold).filter(x => pattern.matcher(x._1).matches)
// normalize
val adultNum = adult.size
val kidNum = kid.size
val normalAdultV = adultV.map(x => (x._1, -1.0 * adultNum / kidNum))
// merge
val v = normalAdultV.foldLeft(kidV) { case (m, (key, value)) => m + (key -> (value + m.getOrElse(key, 0.0) + 1.0))}
// drop duplicate user, remove 'a' or 'k' suffix
val solution = v.keySet.toList.sorted.foldLeft(Map.empty[String, Int]) {(acc, k) =>
val user = if(k.endsWith("k") || k.endsWith("a")) k.init else k
acc.updated(user, v(k).toInt)
}
solution
}
def classify(data: RDD[(String, Iterable[String])], solutionFileName: String) = {
val user = data.map(_._1)
val adult = user.filter(_.endsWith("a")).collect
val kid = user.filter(_.endsWith("k")).collect
val solution = solve(data, adult, kid)
val out = new PrintWriter(solutionFileName)
solution.foreach(x => out.println(f"${x._1}\\t${x._2}"))
out.close()
}
def test(data: RDD[(String, Iterable[String])]) = {
val user = data.map(_._1)
// split adult users as training set and test set
val adult = user.filter(_.endsWith("a")).collect
val (adultTrain, adultTest) = Random.shuffle(adult.toSeq).splitAt((adult.size / 5.0 * 4).toInt)
// split kid users as training set and test set
val kid = user.filter(_.endsWith("k")).collect
val (kidTrain, kidTest) = Random.shuffle(kid.toSeq).splitAt((kid.size / 5.0 * 4).toInt)
// convert test set to Map format, eg. 1234567 -> 0 or 7654321 -> 1
val testSet = kidTest.map(_.init).foldLeft(adultTest.map(_.init -> 0).toMap) {
(acc, x) => acc.updated(x, 1)
}
val solution = solve(data, adultTrain.toArray, kidTrain.toArray)
val accuracy = testSet.filter{
case(user, label) => if (solution.contains(user)) solution(user) == label else true
}.size / testSet.size.toDouble * 100
println("="*80)
println(f"Accuracy: ${accuracy}%.2f %%")
println("="*80)
}
def usage = {
println("="*80)
println("usage.")
println(" UserClassifier classify data solution")
println(" UserClassifier test data")
println("="*80)
}
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("User Classifier")
val sc = new SparkContext(conf)
args.toList match {
case "classify" :: tail if tail.size == 2 => {
val dataFileName = args(1)
val solutionFileName = args(2)
val data = sc.textFile(dataFileName).flatMap(extractItems).groupByKey().flatMap(combine)
classify(data, solutionFileName)
}
case "test" :: tail if tail.size == 1 => {
val dataFileName = args(1)
val data = sc.textFile(dataFileName).flatMap(extractItems).groupByKey().flatMap(combine)
test(data)
}
case _ => usage
}
// "hdfs://127.0.0.1:8020/user/cloudera/clean/"
}
}
| Data-Scientist/LP01_DSWAC_0706 | ccp_spark/src/main/scala/org/lords/classification/UserClassifier.scala | Scala | mit | 8,207 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.suiteprop
import org.scalatest._
// SKIP-SCALATESTJS,NATIVE-START
import refspec.RefSpec
// SKIP-SCALATESTJS,NATIVE-END
import org.scalatest.{ freespec, funspec }
import org.scalatest.featurespec.AnyFeatureSpec
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.propspec.AnyPropSpec
import org.scalatest.wordspec.AnyWordSpec
class TwoTestsIgnoredExamples extends org.scalatest.suiteprop.SuiteExamples {
trait Services {
val theTestNames = Vector("first test", "second test")
}
trait NestedTestNames extends Services {
override val theTestNames = Vector("A subject should first test", "A subject should second test")
}
trait DeeplyNestedTestNames extends Services {
override val theTestNames = Vector("A subject when created should first test", "A subject when created should second test")
}
trait NestedTestNamesWithMust extends Services {
override val theTestNames = Vector("A subject must first test", "A subject must second test")
}
trait DeeplyNestedTestNamesWithMust extends Services {
override val theTestNames = Vector("A subject when created must first test", "A subject when created must second test")
}
trait NestedTestNamesWithCan extends Services {
override val theTestNames = Vector("A subject can first test", "A subject can second test")
}
trait DeeplyNestedTestNamesWithCan extends Services {
override val theTestNames = Vector("A subject when created can first test", "A subject when created can second test")
}
type FixtureServices = Services
// SKIP-SCALATESTJS,NATIVE-START
class SpecExample extends RefSpec with Services {
@Ignore def `test first`: Unit = {}
@Ignore def `test second`: Unit = {}
override val theTestNames = Vector("test first", "test second")
}
// SKIP-SCALATESTJS,NATIVE-END
class FunSuiteExample extends AnyFunSuite with Services {
ignore("first test") {}
ignore("second test") {}
}
class FixtureFunSuiteExample extends StringFixtureFunSuite with Services {
ignore("first test") { s => }
ignore("second test") { s => }
}
class FunSpecExample extends AnyFunSpec with Services {
ignore("first test") {}
ignore("second test") {}
}
class NestedFunSpecExample extends AnyFunSpec with NestedTestNames {
describe("A subject") {
ignore("should first test") {}
ignore("should second test") {}
}
}
class DeeplyNestedFunSpecExample extends AnyFunSpec with DeeplyNestedTestNames {
describe("A subject") {
describe("when created") {
ignore("should first test") {}
ignore("should second test") {}
}
}
}
class FixtureFunSpecExample extends StringFixtureFunSpec with Services {
ignore("first test") { s => }
ignore("second test") { s => }
}
class NestedFixtureFunSpecExample extends StringFixtureFunSpec with NestedTestNames {
describe("A subject") {
ignore("should first test") { s => }
ignore("should second test") { s => }
}
}
class DeeplyNestedFixtureFunSpecExample extends StringFixtureFunSpec with DeeplyNestedTestNames {
describe("A subject") {
describe("when created") {
ignore("should first test") { s => }
ignore("should second test") { s => }
}
}
}
class PathFunSpecExample extends funspec.PathAnyFunSpec with Services {
ignore("first test") {}
ignore("second test") {}
override def newInstance = new PathFunSpecExample
}
class NestedPathFunSpecExample extends funspec.PathAnyFunSpec with NestedTestNames {
describe("A subject") {
ignore("should first test") {}
ignore("should second test") {}
}
override def newInstance = new NestedPathFunSpecExample
}
class DeeplyNestedPathFunSpecExample extends funspec.PathAnyFunSpec with DeeplyNestedTestNames {
describe("A subject") {
describe("when created") {
ignore("should first test") {}
ignore("should second test") {}
}
}
override def newInstance = new DeeplyNestedPathFunSpecExample
}
class WordSpecExample extends AnyWordSpec with Services {
"first test" ignore {}
"second test" ignore {}
}
class NestedWordSpecExample extends AnyWordSpec with NestedTestNames {
"A subject" should {
"first test" ignore {}
"second test" ignore {}
}
}
class DeeplyNestedWordSpecExample extends AnyWordSpec with DeeplyNestedTestNames {
"A subject" when {
"created" should {
"first test" ignore {}
"second test" ignore {}
}
}
}
class FixtureWordSpecExample extends StringFixtureWordSpec with Services {
"first test" ignore { s => }
"second test" ignore { s => }
}
class NestedFixtureWordSpecExample extends StringFixtureWordSpec with NestedTestNames {
"A subject" should {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
class DeeplyNestedFixtureWordSpecExample extends StringFixtureWordSpec with DeeplyNestedTestNames {
"A subject" when {
"created" should {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
}
class NestedWordSpecWithMustExample extends AnyWordSpec with NestedTestNamesWithMust {
"A subject" must {
"first test" ignore {}
"second test" ignore {}
}
}
class DeeplyNestedWordSpecWithMustExample extends AnyWordSpec with DeeplyNestedTestNamesWithMust {
"A subject" when {
"created" must {
"first test" ignore {}
"second test" ignore {}
}
}
}
class NestedFixtureWordSpecWithMustExample extends StringFixtureWordSpec with NestedTestNamesWithMust {
"A subject" must {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
class DeeplyNestedFixtureWordSpecWithMustExample extends StringFixtureWordSpec with DeeplyNestedTestNamesWithMust {
"A subject" when {
"created" must {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
}
class NestedWordSpecWithCanExample extends AnyWordSpec with NestedTestNamesWithCan {
"A subject" can {
"first test" ignore {}
"second test" ignore {}
}
}
class DeeplyNestedWordSpecWithCanExample extends AnyWordSpec with DeeplyNestedTestNamesWithCan {
"A subject" when {
"created" can {
"first test" ignore {}
"second test" ignore {}
}
}
}
class NestedFixtureWordSpecWithCanExample extends StringFixtureWordSpec with NestedTestNamesWithCan {
"A subject" can {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
class DeeplyNestedFixtureWordSpecWithCanExample extends StringFixtureWordSpec with DeeplyNestedTestNamesWithCan {
"A subject" when {
"created" can {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
}
class FlatSpecExample extends AnyFlatSpec with Services {
it should "first test" ignore {}
it should "second test" ignore {}
override val theTestNames = Vector("should first test", "should second test")
}
class SubjectFlatSpecExample extends AnyFlatSpec with NestedTestNames {
behavior of "A subject"
it should "first test" ignore {}
it should "second test" ignore {}
}
class ShorthandSubjectFlatSpecExample extends AnyFlatSpec with NestedTestNames {
"A subject" should "first test" ignore {}
it should "second test" ignore {}
}
class FixtureFlatSpecExample extends StringFixtureFlatSpec with Services {
it should "first test" ignore { s => }
it should "second test" ignore { s => }
override val theTestNames = Vector("should first test", "should second test")
}
class SubjectFixtureFlatSpecExample extends StringFixtureFlatSpec with NestedTestNames {
behavior of "A subject"
it should "first test" ignore { s => }
it should "second test" ignore { s => }
}
class ShorthandSubjectFixtureFlatSpecExample extends StringFixtureFlatSpec with NestedTestNames {
"A subject" should "first test" ignore { s => }
it should "second test" ignore { s => }
}
class FlatSpecWithMustExample extends AnyFlatSpec with Services {
it must "first test" ignore {}
it must "second test" ignore {}
override val theTestNames = Vector("must first test", "must second test")
}
class SubjectFlatSpecWithMustExample extends AnyFlatSpec with NestedTestNamesWithMust {
behavior of "A subject"
it must "first test" ignore {}
it must "second test" ignore {}
}
class ShorthandSubjectFlatSpecWithMustExample extends AnyFlatSpec with NestedTestNamesWithMust {
"A subject" must "first test" ignore {}
it must "second test" ignore {}
}
class FixtureFlatSpecWithMustExample extends StringFixtureFlatSpec with Services {
it must "first test" ignore { s => }
it must "second test" ignore { s => }
override val theTestNames = Vector("must first test", "must second test")
}
class SubjectFixtureFlatSpecWithMustExample extends StringFixtureFlatSpec with NestedTestNamesWithMust {
behavior of "A subject"
it must "first test" ignore { s => }
it must "second test" ignore { s => }
}
class ShorthandSubjectFixtureFlatSpecWithMustExample extends StringFixtureFlatSpec with NestedTestNamesWithMust {
"A subject" must "first test" ignore { s => }
it must "second test" ignore { s => }
}
class FlatSpecWithCanExample extends AnyFlatSpec with Services {
it can "first test" ignore {}
it can "second test" ignore {}
override val theTestNames = Vector("can first test", "can second test")
}
class SubjectFlatSpecWithCanExample extends AnyFlatSpec with NestedTestNamesWithCan {
behavior of "A subject"
it can "first test" ignore {}
it can "second test" ignore {}
}
class ShorthandSubjectFlatSpecWithCanExample extends AnyFlatSpec with NestedTestNamesWithCan {
"A subject" can "first test" ignore {}
it can "second test" ignore {}
}
class FixtureFlatSpecWithCanExample extends StringFixtureFlatSpec with Services {
it can "first test" ignore { s => }
it can "second test" ignore { s => }
override val theTestNames = Vector("can first test", "can second test")
}
class SubjectFixtureFlatSpecWithCanExample extends StringFixtureFlatSpec with NestedTestNamesWithCan {
behavior of "A subject"
it can "first test" ignore { s => }
it can "second test" ignore { s => }
}
class ShorthandSubjectFixtureFlatSpecWithCanExample extends StringFixtureFlatSpec with NestedTestNamesWithCan {
"A subject" can "first test" ignore { s => }
it can "second test" ignore { s => }
}
class FreeSpecExample extends AnyFreeSpec with Services {
"first test" ignore {}
"second test" ignore {}
}
class NestedFreeSpecExample extends AnyFreeSpec with NestedTestNames {
"A subject" - {
"should first test" ignore {}
"should second test" ignore {}
}
}
class DeeplyNestedFreeSpecExample extends AnyFreeSpec with DeeplyNestedTestNames {
"A subject" - {
"when created" - {
"should first test" ignore {}
"should second test" ignore {}
}
}
}
class FixtureFreeSpecExample extends StringFixtureFreeSpec with Services {
"first test" ignore { s => }
"second test" ignore { s => }
}
class NestedFixtureFreeSpecExample extends StringFixtureFreeSpec with NestedTestNames {
"A subject" - {
"should first test" ignore { s => }
"should second test" ignore { s => }
}
}
class DeeplyNestedFixtureFreeSpecExample extends StringFixtureFreeSpec with DeeplyNestedTestNames {
"A subject" - {
"when created" - {
"should first test" ignore { s => }
"should second test" ignore { s => }
}
}
}
class PathFreeSpecExample extends freespec.PathAnyFreeSpec with Services {
"first test" ignore {}
"second test" ignore {}
override def newInstance = new PathFreeSpecExample
}
class NestedPathFreeSpecExample extends freespec.PathAnyFreeSpec with NestedTestNames {
"A subject" - {
"should first test" ignore {}
"should second test" ignore {}
}
override def newInstance = new NestedPathFreeSpecExample
}
class DeeplyNestedPathFreeSpecExample extends freespec.PathAnyFreeSpec with DeeplyNestedTestNames {
"A subject" - {
"when created" - {
"should first test" ignore {}
"should second test" ignore {}
}
}
override def newInstance = new DeeplyNestedPathFreeSpecExample
}
class FeatureSpecExample extends AnyFeatureSpec with Services {
ignore("first test") {}
ignore("second test") {}
override val theTestNames = Vector("Scenario: first test", "Scenario: second test")
}
class NestedFeatureSpecExample extends AnyFeatureSpec with Services {
Feature("A feature") {
ignore("first test") {}
ignore("second test") {}
}
override val theTestNames = Vector("Feature: A feature Scenario: first test", "Feature: A feature Scenario: second test")
}
class FixtureFeatureSpecExample extends StringFixtureFeatureSpec with Services {
ignore("first test") { s => }
ignore("second test") { s => }
override val theTestNames = Vector("Scenario: first test", "Scenario: second test")
}
class NestedFixtureFeatureSpecExample extends StringFixtureFeatureSpec with Services {
Feature("A feature") {
ignore("first test") { s => }
ignore("second test") { s => }
}
override val theTestNames = Vector("Feature: A feature Scenario: first test", "Feature: A feature Scenario: second test")
}
class PropSpecExample extends AnyPropSpec with Services {
ignore("first test") {}
ignore("second test") {}
}
class FixturePropSpecExample extends StringFixturePropSpec with Services {
ignore("first test") { s => }
ignore("second test") { s => }
}
// SKIP-SCALATESTJS,NATIVE-START
lazy val spec = new SpecExample
// SKIP-SCALATESTJS,NATIVE-END
lazy val funSuite = new FunSuiteExample
lazy val fixtureFunSuite = new FixtureFunSuiteExample
lazy val funSpec = new FunSpecExample
lazy val nestedFunSpec = new NestedFunSpecExample
lazy val deeplyNestedFunSpec = new DeeplyNestedFunSpecExample
lazy val fixtureFunSpec = new FixtureFunSpecExample
lazy val nestedFixtureFunSpec = new NestedFixtureFunSpecExample
lazy val deeplyNestedFixtureFunSpec = new DeeplyNestedFixtureFunSpecExample
lazy val pathFunSpec = new PathFunSpecExample
lazy val nestedPathFunSpec = new NestedPathFunSpecExample
lazy val deeplyNestedPathFunSpec = new DeeplyNestedPathFunSpecExample
lazy val wordSpec = new WordSpecExample
lazy val nestedWordSpec = new NestedWordSpecExample
lazy val deeplyNestedWordSpec = new DeeplyNestedWordSpecExample
lazy val fixtureWordSpec = new FixtureWordSpecExample
lazy val nestedFixtureWordSpec = new NestedFixtureWordSpecExample
lazy val deeplyNestedFixtureWordSpec = new DeeplyNestedFixtureWordSpecExample
lazy val nestedWordSpecWithMust = new NestedWordSpecWithMustExample
lazy val deeplyNestedWordSpecWithMust = new DeeplyNestedWordSpecWithMustExample
lazy val nestedFixtureWordSpecWithMust = new NestedFixtureWordSpecWithMustExample
lazy val deeplyNestedFixtureWordSpecWithMust = new DeeplyNestedFixtureWordSpecWithMustExample
lazy val nestedWordSpecWithCan = new NestedWordSpecWithCanExample
lazy val deeplyNestedWordSpecWithCan = new DeeplyNestedWordSpecWithCanExample
lazy val nestedFixtureWordSpecWithCan = new NestedFixtureWordSpecWithCanExample
lazy val deeplyNestedFixtureWordSpecWithCan = new DeeplyNestedFixtureWordSpecWithCanExample
lazy val flatSpec = new FlatSpecExample
lazy val subjectFlatSpec = new SubjectFlatSpecExample
lazy val shorthandSubjectFlatSpec = new ShorthandSubjectFlatSpecExample
lazy val fixtureFlatSpec = new FixtureFlatSpecExample
lazy val subjectFixtureFlatSpec = new SubjectFixtureFlatSpecExample
lazy val shorthandSubjectFixtureFlatSpec = new ShorthandSubjectFixtureFlatSpecExample
lazy val flatSpecWithMust = new FlatSpecWithMustExample
lazy val subjectFlatSpecWithMust = new SubjectFlatSpecWithMustExample
lazy val shorthandSubjectFlatSpecWithMust = new ShorthandSubjectFlatSpecWithMustExample
lazy val fixtureFlatSpecWithMust = new FixtureFlatSpecWithMustExample
lazy val subjectFixtureFlatSpecWithMust = new SubjectFixtureFlatSpecWithMustExample
lazy val shorthandSubjectFixtureFlatSpecWithMust = new ShorthandSubjectFixtureFlatSpecWithMustExample
lazy val flatSpecWithCan = new FlatSpecWithCanExample
lazy val subjectFlatSpecWithCan = new SubjectFlatSpecWithCanExample
lazy val shorthandSubjectFlatSpecWithCan = new ShorthandSubjectFlatSpecWithCanExample
lazy val fixtureFlatSpecWithCan = new FixtureFlatSpecWithCanExample
lazy val subjectFixtureFlatSpecWithCan = new SubjectFixtureFlatSpecWithCanExample
lazy val shorthandSubjectFixtureFlatSpecWithCan = new ShorthandSubjectFixtureFlatSpecWithCanExample
lazy val freeSpec = new FreeSpecExample
lazy val nestedFreeSpec = new NestedFreeSpecExample
lazy val deeplyNestedFreeSpec = new DeeplyNestedFreeSpecExample
lazy val fixtureFreeSpec = new FixtureFreeSpecExample
lazy val nestedFixtureFreeSpec = new NestedFixtureFreeSpecExample
lazy val deeplyNestedFixtureFreeSpec = new DeeplyNestedFixtureFreeSpecExample
lazy val pathFreeSpec = new PathFreeSpecExample
lazy val nestedPathFreeSpec = new NestedPathFreeSpecExample
lazy val deeplyNestedPathFreeSpec = new DeeplyNestedPathFreeSpecExample
lazy val featureSpec = new FeatureSpecExample
lazy val nestedFeatureSpec = new NestedFeatureSpecExample
lazy val fixtureFeatureSpec = new FixtureFeatureSpecExample
lazy val nestedFixtureFeatureSpec = new NestedFixtureFeatureSpecExample
lazy val propSpec = new PropSpecExample
lazy val fixturePropSpec = new FixturePropSpecExample
// Two ways to ignore in a flat spec, so add two more examples
override def examples: org.scalatest.prop.TableFor1[Suite with Services] = super.examples ++
List(
new FlatSpecExample2,
new FixtureFlatSpecExample2,
new FlatSpecWithMustExample2,
new FixtureFlatSpecWithMustExample2,
new FlatSpecWithCanExample2,
new FixtureFlatSpecWithCanExample2
)
class FlatSpecExample2 extends AnyFlatSpec with Services {
ignore should "first test" in {}
ignore should "second test" in {}
override val theTestNames = Vector("should first test", "should second test")
}
class FixtureFlatSpecExample2 extends StringFixtureFlatSpec with Services {
ignore should "first test" in { s => }
ignore should "second test" in { s => }
override val theTestNames = Vector("should first test", "should second test")
}
class FlatSpecWithMustExample2 extends AnyFlatSpec with Services {
ignore must "first test" in {}
ignore must "second test" in {}
override val theTestNames = Vector("must first test", "must second test")
}
class FixtureFlatSpecWithMustExample2 extends StringFixtureFlatSpec with Services {
ignore must "first test" in { s => }
ignore must "second test" in { s => }
override val theTestNames = Vector("must first test", "must second test")
}
class FlatSpecWithCanExample2 extends AnyFlatSpec with Services {
ignore can "first test" in {}
ignore can "second test" in {}
override val theTestNames = Vector("can first test", "can second test")
}
class FixtureFlatSpecWithCanExample2 extends StringFixtureFlatSpec with Services {
ignore can "first test" in { s => }
ignore can "second test" in { s => }
override val theTestNames = Vector("can first test", "can second test")
}
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/suiteprop/TwoTestsIgnoredExamples.scala | Scala | apache-2.0 | 20,507 |
package aecor.runtime.akkapersistence.serialization
/**
* Marker trait for all protobuf-serializable messages in `aecor.runtime.akkapersistence`.
*/
private[aecor] trait Message
| notxcain/aecor | modules/akka-persistence-runtime/src/main/scala/aecor/runtime/akkapersistence/serialization/Message.scala | Scala | mit | 183 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Date, UUID}
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, ExternalCatalogUtils}
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.execution.{QueryExecution, SortExec, SQLExecution}
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.util.{SerializableConfiguration, Utils}
/** A helper object for writing FileFormat data out to a location. */
object FileFormatWriter extends Logging {
/**
* Max number of files a single task writes out due to file size. In most cases the number of
* files written should be very small. This is just a safe guard to protect some really bad
* settings, e.g. maxRecordsPerFile = 1.
*/
private val MAX_FILE_COUNTER = 1000 * 1000
/** Describes how output files should be placed in the filesystem. */
case class OutputSpec(
outputPath: String, customPartitionLocations: Map[TablePartitionSpec, String])
/** A shared job description for all the write tasks. */
private class WriteJobDescription(
val uuid: String, // prevent collision between different (appending) write jobs
val serializableHadoopConf: SerializableConfiguration,
val outputWriterFactory: OutputWriterFactory,
val allColumns: Seq[Attribute],
val dataColumns: Seq[Attribute],
val partitionColumns: Seq[Attribute],
val bucketIdExpression: Option[Expression],
val path: String,
val customPartitionLocations: Map[TablePartitionSpec, String],
val maxRecordsPerFile: Long,
val timeZoneId: String)
extends Serializable {
assert(AttributeSet(allColumns) == AttributeSet(partitionColumns ++ dataColumns),
s"""
|All columns: ${allColumns.mkString(", ")}
|Partition columns: ${partitionColumns.mkString(", ")}
|Data columns: ${dataColumns.mkString(", ")}
""".stripMargin)
}
/**
* Basic work flow of this command is:
* 1. Driver side setup, including output committer initialization and data source specific
* preparation work for the write job to be issued.
* 2. Issues a write job consists of one or more executor side tasks, each of which writes all
* rows within an RDD partition.
* 3. If no exception is thrown in a task, commits that task, otherwise aborts that task; If any
* exception is thrown during task commitment, also aborts that task.
* 4. If all tasks are committed, commit the job, otherwise aborts the job; If any exception is
* thrown during job commitment, also aborts the job.
*/
def write(
sparkSession: SparkSession,
queryExecution: QueryExecution,
fileFormat: FileFormat,
committer: FileCommitProtocol,
outputSpec: OutputSpec,
hadoopConf: Configuration,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
refreshFunction: (Seq[TablePartitionSpec]) => Unit,
options: Map[String, String]): Unit = {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, new Path(outputSpec.outputPath))
val allColumns = queryExecution.logical.output
val partitionSet = AttributeSet(partitionColumns)
val dataColumns = queryExecution.logical.output.filterNot(partitionSet.contains)
val bucketIdExpression = bucketSpec.map { spec =>
val bucketColumns = spec.bucketColumnNames.map(c => dataColumns.find(_.name == c).get)
// Use `HashPartitioning.partitionIdExpression` as our bucket id expression, so that we can
// guarantee the data distribution is same between shuffle and bucketed data source, which
// enables us to only shuffle one side when join a bucketed table and a normal one.
HashPartitioning(bucketColumns, spec.numBuckets).partitionIdExpression
}
val sortColumns = bucketSpec.toSeq.flatMap {
spec => spec.sortColumnNames.map(c => dataColumns.find(_.name == c).get)
}
val caseInsensitiveOptions = CaseInsensitiveMap(options)
// Note: prepareWrite has side effect. It sets "job".
val outputWriterFactory =
fileFormat.prepareWrite(sparkSession, job, caseInsensitiveOptions, dataColumns.toStructType)
val description = new WriteJobDescription(
uuid = UUID.randomUUID().toString,
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
outputWriterFactory = outputWriterFactory,
allColumns = allColumns,
dataColumns = dataColumns,
partitionColumns = partitionColumns,
bucketIdExpression = bucketIdExpression,
path = outputSpec.outputPath,
customPartitionLocations = outputSpec.customPartitionLocations,
maxRecordsPerFile = caseInsensitiveOptions.get("maxRecordsPerFile").map(_.toLong)
.getOrElse(sparkSession.sessionState.conf.maxRecordsPerFile),
timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone)
)
// We should first sort by partition columns, then bucket id, and finally sorting columns.
val requiredOrdering = partitionColumns ++ bucketIdExpression ++ sortColumns
// the sort order doesn't matter
val actualOrdering = queryExecution.executedPlan.outputOrdering.map(_.child)
val orderingMatched = if (requiredOrdering.length > actualOrdering.length) {
false
} else {
requiredOrdering.zip(actualOrdering).forall {
case (requiredOrder, childOutputOrder) =>
requiredOrder.semanticEquals(childOutputOrder)
}
}
SQLExecution.withNewExecutionId(sparkSession, queryExecution) {
// This call shouldn't be put into the `try` block below because it only initializes and
// prepares the job, any exception thrown from here shouldn't cause abortJob() to be called.
committer.setupJob(job)
try {
val rdd = if (orderingMatched) {
queryExecution.toRdd
} else {
SortExec(
requiredOrdering.map(SortOrder(_, Ascending)),
global = false,
child = queryExecution.executedPlan).execute()
}
val ret = sparkSession.sparkContext.runJob(rdd,
(taskContext: TaskContext, iter: Iterator[InternalRow]) => {
executeTask(
description = description,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.attemptNumber(),
committer,
iterator = iter)
})
val commitMsgs = ret.map(_._1)
val updatedPartitions = ret.flatMap(_._2).distinct.map(PartitioningUtils.parsePathFragment)
committer.commitJob(job, commitMsgs)
logInfo(s"Job ${job.getJobID} committed.")
refreshFunction(updatedPartitions)
} catch { case cause: Throwable =>
logError(s"Aborting job ${job.getJobID}.", cause)
committer.abortJob(job)
throw new SparkException("Job aborted.", cause)
}
}
}
/** Writes data out in a single Spark task. */
private def executeTask(
description: WriteJobDescription,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow]): (TaskCommitMessage, Set[String]) = {
val jobId = SparkHadoopWriterUtils.createJobID(new Date, sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)
// Set up the attempt context required to use in the output committer.
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
hadoopConf.set("mapreduce.job.id", jobId.toString)
hadoopConf.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapreduce.task.ismap", true)
hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
committer.setupTask(taskAttemptContext)
val writeTask =
if (description.partitionColumns.isEmpty && description.bucketIdExpression.isEmpty) {
new SingleDirectoryWriteTask(description, taskAttemptContext, committer)
} else {
new DynamicPartitionWriteTask(description, taskAttemptContext, committer)
}
try {
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
// Execute the task to write rows out and commit the task.
val outputPartitions = writeTask.execute(iterator)
writeTask.releaseResources()
(committer.commitTask(taskAttemptContext), outputPartitions)
})(catchBlock = {
// If there is an error, release resource and then abort the task
try {
writeTask.releaseResources()
} finally {
committer.abortTask(taskAttemptContext)
logError(s"Job $jobId aborted.")
}
})
} catch {
case t: Throwable =>
throw new SparkException("Task failed while writing rows", t)
}
}
/**
* A simple trait for writing out data in a single Spark task, without any concerns about how
* to commit or abort tasks. Exceptions thrown by the implementation of this trait will
* automatically trigger task aborts.
*/
private trait ExecuteWriteTask {
/**
* Writes data out to files, and then returns the list of partition strings written out.
* The list of partitions is sent back to the driver and used to update the catalog.
*/
def execute(iterator: Iterator[InternalRow]): Set[String]
def releaseResources(): Unit
}
/** Writes data to a single directory (used for non-dynamic-partition writes). */
private class SingleDirectoryWriteTask(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) extends ExecuteWriteTask {
private[this] var currentWriter: OutputWriter = _
private def newOutputWriter(fileCounter: Int): Unit = {
val ext = description.outputWriterFactory.getFileExtension(taskAttemptContext)
val tmpFilePath = committer.newTaskTempFile(
taskAttemptContext,
None,
f"-c$fileCounter%03d" + ext)
currentWriter = description.outputWriterFactory.newInstance(
path = tmpFilePath,
dataSchema = description.dataColumns.toStructType,
context = taskAttemptContext)
}
override def execute(iter: Iterator[InternalRow]): Set[String] = {
var fileCounter = 0
var recordsInFile: Long = 0L
newOutputWriter(fileCounter)
while (iter.hasNext) {
if (description.maxRecordsPerFile > 0 && recordsInFile >= description.maxRecordsPerFile) {
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
recordsInFile = 0
releaseResources()
newOutputWriter(fileCounter)
}
val internalRow = iter.next()
currentWriter.write(internalRow)
recordsInFile += 1
}
releaseResources()
Set.empty
}
override def releaseResources(): Unit = {
if (currentWriter != null) {
currentWriter.close()
currentWriter = null
}
}
}
/**
* Writes data to using dynamic partition writes, meaning this single function can write to
* multiple directories (partitions) or files (bucketing).
*/
private class DynamicPartitionWriteTask(
desc: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) extends ExecuteWriteTask {
// currentWriter is initialized whenever we see a new key
private var currentWriter: OutputWriter = _
/** Expressions that given partition columns build a path string like: col1=val/col2=val/... */
private def partitionPathExpression: Seq[Expression] = {
desc.partitionColumns.zipWithIndex.flatMap { case (c, i) =>
val partitionName = ScalaUDF(
ExternalCatalogUtils.getPartitionPathString _,
StringType,
Seq(Literal(c.name), Cast(c, StringType, Option(desc.timeZoneId))))
if (i == 0) Seq(partitionName) else Seq(Literal(Path.SEPARATOR), partitionName)
}
}
/**
* Opens a new OutputWriter given a partition key and optional bucket id.
* If bucket id is specified, we will append it to the end of the file name, but before the
* file extension, e.g. part-r-00009-ea518ad4-455a-4431-b471-d24e03814677-00002.gz.parquet
*
* @param partColsAndBucketId a row consisting of partition columns and a bucket id for the
* current row.
* @param getPartitionPath a function that projects the partition values into a path string.
* @param fileCounter the number of files that have been written in the past for this specific
* partition. This is used to limit the max number of records written for a
* single file. The value should start from 0.
* @param updatedPartitions the set of updated partition paths, we should add the new partition
* path of this writer to it.
*/
private def newOutputWriter(
partColsAndBucketId: InternalRow,
getPartitionPath: UnsafeProjection,
fileCounter: Int,
updatedPartitions: mutable.Set[String]): Unit = {
val partDir = if (desc.partitionColumns.isEmpty) {
None
} else {
Option(getPartitionPath(partColsAndBucketId).getString(0))
}
partDir.foreach(updatedPartitions.add)
// If the bucketId expression is defined, the bucketId column is right after the partition
// columns.
val bucketId = if (desc.bucketIdExpression.isDefined) {
BucketingUtils.bucketIdToString(partColsAndBucketId.getInt(desc.partitionColumns.length))
} else {
""
}
// This must be in a form that matches our bucketing format. See BucketingUtils.
val ext = f"$bucketId.c$fileCounter%03d" +
desc.outputWriterFactory.getFileExtension(taskAttemptContext)
val customPath = partDir match {
case Some(dir) =>
desc.customPartitionLocations.get(PartitioningUtils.parsePathFragment(dir))
case _ =>
None
}
val path = if (customPath.isDefined) {
committer.newTaskTempFileAbsPath(taskAttemptContext, customPath.get, ext)
} else {
committer.newTaskTempFile(taskAttemptContext, partDir, ext)
}
currentWriter = desc.outputWriterFactory.newInstance(
path = path,
dataSchema = desc.dataColumns.toStructType,
context = taskAttemptContext)
}
override def execute(iter: Iterator[InternalRow]): Set[String] = {
val getPartitionColsAndBucketId = UnsafeProjection.create(
desc.partitionColumns ++ desc.bucketIdExpression, desc.allColumns)
// Generates the partition path given the row generated by `getPartitionColsAndBucketId`.
val getPartPath = UnsafeProjection.create(
Seq(Concat(partitionPathExpression)), desc.partitionColumns)
// Returns the data columns to be written given an input row
val getOutputRow = UnsafeProjection.create(desc.dataColumns, desc.allColumns)
// If anything below fails, we should abort the task.
var recordsInFile: Long = 0L
var fileCounter = 0
var currentPartColsAndBucketId: UnsafeRow = null
val updatedPartitions = mutable.Set[String]()
for (row <- iter) {
val nextPartColsAndBucketId = getPartitionColsAndBucketId(row)
if (currentPartColsAndBucketId != nextPartColsAndBucketId) {
// See a new partition or bucket - write to a new partition dir (or a new bucket file).
currentPartColsAndBucketId = nextPartColsAndBucketId.copy()
logDebug(s"Writing partition: $currentPartColsAndBucketId")
recordsInFile = 0
fileCounter = 0
releaseResources()
newOutputWriter(currentPartColsAndBucketId, getPartPath, fileCounter, updatedPartitions)
} else if (desc.maxRecordsPerFile > 0 &&
recordsInFile >= desc.maxRecordsPerFile) {
// Exceeded the threshold in terms of the number of records per file.
// Create a new file by increasing the file counter.
recordsInFile = 0
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
releaseResources()
newOutputWriter(currentPartColsAndBucketId, getPartPath, fileCounter, updatedPartitions)
}
currentWriter.write(getOutputRow(row))
recordsInFile += 1
}
releaseResources()
updatedPartitions.toSet
}
override def releaseResources(): Unit = {
if (currentWriter != null) {
currentWriter.close()
currentWriter = null
}
}
}
}
| jianran/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala | Scala | apache-2.0 | 18,986 |
package protocbridge
object gens {
// Prevent the organization name from getting shaded...
// See https://github.com/scalapb/ScalaPB/issues/150
private val JavaProtobufArtifact: String =
"com+google+protobuf".replace('+', '.')
val cpp = BuiltinGenerator("cpp")
val csharp = BuiltinGenerator("csharp")
val java: BuiltinGenerator = java("3.11.4")
def java(runtimeVersion: String): BuiltinGenerator =
BuiltinGenerator(
"java",
suggestedDependencies =
Seq(Artifact(JavaProtobufArtifact, "protobuf-java", runtimeVersion))
)
def plugin(name: String): PluginGenerator = PluginGenerator(name, Nil, None)
def plugin(name: String, path: String): PluginGenerator =
PluginGenerator(name, Nil, Some(path))
val javanano = BuiltinGenerator("javanano")
val kotlin: BuiltinGenerator = kotlin("3.17.2")
def kotlin(runtimeVersion: String): BuiltinGenerator =
BuiltinGenerator(
"kotlin",
suggestedDependencies =
Seq(Artifact(JavaProtobufArtifact, "protobuf-kotlin", runtimeVersion))
)
val js = BuiltinGenerator("js")
val objc = BuiltinGenerator("objc")
val python = BuiltinGenerator("python")
val ruby = BuiltinGenerator("ruby")
val go = BuiltinGenerator("go")
val swagger = BuiltinGenerator("swagger")
val gateway = BuiltinGenerator("grpc-gateway")
val descriptorSet = DescriptorSetGenerator()
}
| trueaccord/protoc-bridge | bridge/src/main/scala/protocbridge/gens.scala | Scala | apache-2.0 | 1,388 |
package fr.montuelle.arduino.sensors
import com.google.inject.AbstractModule
import fr.montuelle.arduino.sensors.services.{SensorsServerImpl, SensorsServer}
class SensorsModule extends AbstractModule {
def configure() = {
bind(classOf[SensorsServer])
.to(classOf[SensorsServerImpl]).asEagerSingleton
}
} | bmontuelle/sensors-history | app/fr/montuelle/arduino/sensors/SensorsModule.scala | Scala | mit | 320 |
package pipelines.speech
import breeze.stats.distributions.{CauchyDistribution, RandBasis, ThreadLocalRandomGenerator}
import breeze.linalg.DenseVector
import org.apache.commons.math3.random.MersenneTwister
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import evaluation.MulticlassClassifierEvaluator
import loaders.TimitFeaturesDataLoader
import nodes.learning.{BlockLinearMapper, BlockLeastSquaresEstimator}
import nodes.stats.{CosineRandomFeatures, StandardScaler}
import nodes.util.{VectorCombiner, ClassLabelIndicatorsFromIntLabels, MaxClassifier}
import pipelines._
import workflow.{Optimizer, Pipeline}
object TimitPipeline extends Logging {
val appName = "Timit"
case class TimitConfig(
trainDataLocation: String = "",
trainLabelsLocation: String = "",
testDataLocation: String = "",
testLabelsLocation: String = "",
numParts: Int = 512,
numCosines: Int = 50,
gamma: Double = 0.05555,
rfType: Distributions.Value = Distributions.Gaussian,
lambda: Double = 0.0,
numEpochs: Int = 5,
checkpointDir: Option[String] = None)
def run(sc: SparkContext, conf: TimitConfig) {
conf.checkpointDir.foreach(_ => sc.setCheckpointDir(_))
Thread.sleep(5000)
// Set the constants
val seed = 123L
val random = new java.util.Random(seed)
val randomSource = new RandBasis(new ThreadLocalRandomGenerator(new MersenneTwister(random.nextLong())))
val numCosineFeatures = 4096
val numCosineBatches = conf.numCosines
val colsPerBatch = numCosineFeatures + 1
// Load the data
val timitFeaturesData = TimitFeaturesDataLoader(
sc,
conf.trainDataLocation,
conf.trainLabelsLocation,
conf.testDataLocation,
conf.testLabelsLocation,
conf.numParts)
// Build the pipeline
val trainData = timitFeaturesData.train.data.cache().setName("trainRaw")
trainData.count()
val labels = ClassLabelIndicatorsFromIntLabels(TimitFeaturesDataLoader.numClasses).apply(
timitFeaturesData.train.labels
).cache().setName("trainLabels")
// Train the model
val featurizer = Pipeline.gather {
Seq.fill(numCosineBatches) {
if (conf.rfType == Distributions.Cauchy) {
// TODO: Once https://github.com/scalanlp/breeze/issues/398 is released,
// use a RandBasis for cauchy
CosineRandomFeatures(
TimitFeaturesDataLoader.timitDimension,
numCosineFeatures,
conf.gamma,
new CauchyDistribution(0, 1),
randomSource.uniform)
} else {
CosineRandomFeatures(
TimitFeaturesDataLoader.timitDimension,
numCosineFeatures,
conf.gamma,
randomSource.gaussian,
randomSource.uniform)
}
}
} andThen VectorCombiner()
val predictorPipeline = featurizer andThen
(new BlockLeastSquaresEstimator(numCosineFeatures, conf.numEpochs, conf.lambda),
trainData, labels) andThen MaxClassifier
val predictor = Optimizer.execute(predictorPipeline)
logInfo("\\n" + predictor.toDOTString)
val testData = timitFeaturesData.test.data.cache().setName("testRaw")
val numTest = testData.count()
val actual = timitFeaturesData.test.labels.cache().setName("actual")
// Calculate test error
val testEval = MulticlassClassifierEvaluator(
predictor(testData),
actual,
TimitFeaturesDataLoader.numClasses)
logInfo("TEST Error is " + (100 * testEval.totalError) + "%")
}
object Distributions extends Enumeration {
type Distributions = Value
val Gaussian, Cauchy = Value
}
def parse(args: Array[String]): TimitConfig = new OptionParser[TimitConfig](appName) {
head(appName, "0.1")
help("help") text("prints this usage text")
opt[String]("trainDataLocation") required() action { (x,c) => c.copy(trainDataLocation=x) }
opt[String]("trainLabelsLocation") required() action { (x,c) => c.copy(trainLabelsLocation=x) }
opt[String]("testDataLocation") required() action { (x,c) => c.copy(testDataLocation=x) }
opt[String]("testLabelsLocation") required() action { (x,c) => c.copy(testLabelsLocation=x) }
opt[String]("checkpointDir") action { (x,c) => c.copy(checkpointDir=Some(x)) }
opt[Int]("numParts") action { (x,c) => c.copy(numParts=x) }
opt[Int]("numCosines") action { (x,c) => c.copy(numCosines=x) }
opt[Int]("numEpochs") action { (x,c) => c.copy(numEpochs=x) }
opt[Double]("gamma") action { (x,c) => c.copy(gamma=x) }
opt[Double]("lambda") action { (x,c) => c.copy(lambda=x) }
opt("rfType")(scopt.Read.reads(Distributions withName _)) action { (x,c) => c.copy(rfType = x)}
}.parse(args, TimitConfig()).get
/**
* The actual driver receives its configuration parameters from spark-submit usually.
* @param args
*/
def main(args: Array[String]) = {
val appConfig = parse(args)
val conf = new SparkConf().setAppName(appName)
conf.setIfMissing("spark.master", "local[2]")
val sc = new SparkContext(conf)
run(sc, appConfig)
sc.stop()
}
}
| kcompher/keystone | src/main/scala/pipelines/speech/TimitPipeline.scala | Scala | apache-2.0 | 5,167 |
package smartbot
import scala.io.Source
import java.io.FileWriter
object LogParser {
def getDepth(s: String): Option[Int] = {
/** If the string is a number, returns the option of it,
* otherwise it returns None */
val IntRegEx = "(\\\\d+)".r
s match {
case IntRegEx(num) => Some(num.toInt)
case _ => None
}
}
def getMessages(file: String): (Option[Int], Iterator[String]) = {
val messages = Source.fromFile(file).getLines
val firstLine = messages.next()
(getDepth(firstLine), messages)
}
def addToLog(file: String, message: String) = {
val log = new FileWriter(file, true)
log.write(message + "\\n")
log.close()
println("Added " + message + " to " + file)
}
}
| jneen/smartbot | src/main/scala/logParser.scala | Scala | mit | 737 |
/**
* Copyright (C) 2007-2008 Scala OTP Team
*/
package scala.actors.behavior
sealed abstract class TestMessage
case object Ping extends TestMessage
case object Pong extends TestMessage
case object OneWay extends TestMessage
case object Die extends TestMessage
case object NotifySupervisorExit extends TestMessage
| bingoyang/scala-otp | behavior/src/test/scala/scala/actors/behavior/Messages.scala | Scala | bsd-3-clause | 318 |
/**
* Copyright 2012-2013 StackMob
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stackmob.newman
package serialization.common
import scalaz._
import Scalaz._
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
object HeadersSerialization extends SerializationBase[Headers] {
implicit override val writer = new JSONW[Headers] {
override def write(h: Headers): JValue = {
val headersList: List[JObject] = h.map {
headerList: HeaderList =>
(headerList.list.map { header: Header =>
JObject(JField("name", JString(header._1)) :: JField("value", JString(header._2)) :: Nil)
}).toList
} | List[JObject]()
JArray(headersList)
}
}
implicit override val reader = new JSONR[Headers] {
override def read(json: JValue): Result[Headers] = {
//example incoming AST:
//JArray(
// List(
// JObject(
// List(
// JField(name,JString(header1)),
// JField(value,JString(header1))
// )
// )
// )
//)
json match {
case JArray(jObjectList) => {
val list = jObjectList.flatMap {
jValue: JValue =>
jValue match {
case JObject(jFieldList) => jFieldList match {
case JField(_, JString(headerName)) :: JField(_, JString(headerVal)) :: Nil => List(headerName -> headerVal)
//TODO: error here
case _ => List[(String, String)]()
}
//TODO: error here
case _ => List[(String, String)]()
}
}
val headers: Headers = Headers(list)
headers.successNel[Error]
}
case j => UnexpectedJSONError(j, classOf[JArray]).failureNel[Headers]
}
}
}
}
| indykish/newman | src/main/scala/com/stackmob/newman/serialization/common/HeadersSerialization.scala | Scala | apache-2.0 | 2,365 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.sql.{InternalOutputModes, SparkSession}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.{QueryExecution, SparkPlan, SparkPlanner, UnaryExecNode}
import org.apache.spark.sql.streaming.OutputMode
/**
* A variant of [[QueryExecution]] that allows the execution of the given [[LogicalPlan]]
* plan incrementally. Possibly preserving state in between each execution.
*/
class IncrementalExecution(
sparkSession: SparkSession,
logicalPlan: LogicalPlan,
val outputMode: OutputMode,
val checkpointLocation: String,
val currentBatchId: Long)
extends QueryExecution(sparkSession, logicalPlan) {
// TODO: make this always part of planning.
val stateStrategy = sparkSession.sessionState.planner.StatefulAggregationStrategy +:
sparkSession.sessionState.planner.StreamingRelationStrategy +:
sparkSession.sessionState.experimentalMethods.extraStrategies
// Modified planner with stateful operations.
override def planner: SparkPlanner =
new SparkPlanner(
sparkSession.sparkContext,
sparkSession.sessionState.conf,
stateStrategy)
/**
* Records the current id for a given stateful operator in the query plan as the `state`
* preparation walks the query plan.
*/
private var operatorId = 0
/** Locates save/restore pairs surrounding aggregation. */
val state = new Rule[SparkPlan] {
override def apply(plan: SparkPlan): SparkPlan = plan transform {
case StateStoreSaveExec(keys, None, None,
UnaryExecNode(agg,
StateStoreRestoreExec(keys2, None, child))) =>
val stateId = OperatorStateId(checkpointLocation, operatorId, currentBatchId)
val returnAllStates = if (outputMode == InternalOutputModes.Complete) true else false
operatorId += 1
StateStoreSaveExec(
keys,
Some(stateId),
Some(returnAllStates),
agg.withNewChildren(
StateStoreRestoreExec(
keys,
Some(stateId),
child) :: Nil))
}
}
override def preparations: Seq[Rule[SparkPlan]] = state +: super.preparations
/** No need assert supported, as this check has already been done */
override def assertSupported(): Unit = { }
}
| gioenn/xSpark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/IncrementalExecution.scala | Scala | apache-2.0 | 3,177 |
package com.cloudray.scalapress.plugin.ecommerce.controller.renderers
import com.cloudray.scalapress.util.Scalate
/** @author Stephen Samuel */
object OrderStatusRenderer {
def form = Scalate.layout("/com/cloudray/scalapress/plugin/ecommerce/order_status.ssp")
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/plugin/ecommerce/controller/renderers/OrderStatusRenderer.scala | Scala | apache-2.0 | 267 |
// scalac: -deprecation
//
//############################################################################
// Literals
//############################################################################
//############################################################################
object Test {
def check_success[A](name: String, closure: => A, expected: A): Unit = {
val res: Option[String] =
try {
val actual: A = closure
if (actual == expected) None //print(" was successful")
else Some(s" failed: expected $expected, found $actual")
} catch {
case exception: Throwable => Some(s" raised exception $exception")
}
for (e <- res) println(s"test $name $e")
}
def main(args: Array[String]): Unit = {
// char
//unicode escapes escape in char literals
check_success("'\\\\u0024' == '$'", '\\u0024', '$')
check_success("'\\\\u005f' == '_'", '\\u005f', '_')
//unicode escapes escape in interpolations
check_success("""s"\\\\u0024" == "$"""", s"\\u0024", "$")
check_success("s\\"\\"\\"\\\\u0024\\"\\"\\" == \\"$\\"", s"""\\u0024""", "$")
//Int#asInstanceOf[Char] gets the char at the codepoint
check_success("65.asInstanceOf[Char] == 'A'", 65.asInstanceOf[Char], 'A')
// boolean
check_success("(65 : Byte) == 'A'", (65: Byte) == 'A', true) // contrib #176
// int
check_success("0X01 == 1", 0X01, 1)
check_success("0x01 == 1", 0x01, 1)
check_success("0x10 == 16", 0x10, 16)
check_success("0xa == 10", 0xa, 10)
check_success("0x0a == 10", 0x0a, 10)
check_success("+0x01 == 1", +0x01, 1)
check_success("+0x10 == 16", +0x10, 16)
check_success("+0xa == 10", +0xa, 10)
check_success("+0x0a == 10", +0x0a, 10)
check_success("-0x01 == -1", -0x01, -1)
check_success("-0x10 == -16", -0x10, -16)
check_success("-0xa == -10", -0xa, -10)
check_success("-0x0a == -10", -0x0a, -10)
check_success("0x7fffffff == 2147483647", 0x7fffffff, 2147483647)
check_success("0x80000000 == -2147483648", 0x80000000, -2147483648)
check_success("0xffffffff == -1", 0xffffffff, -1)
// long
check_success("1l == 1L", 1l, 1L)
check_success("1L == 1l", 1L, 1l)
check_success("1.asInstanceOf[Long] == 1l", 1.asInstanceOf[Long], 1l)
check_success("0x7fffffffffffffffL == 9223372036854775807L",
0x7fffffffffffffffL, 9223372036854775807L)
check_success("0x8000000000000000L == -9223372036854775808L",
0x8000000000000000L, -9223372036854775808L)
check_success("0xffffffffffffffffL == -1L",
0xffffffffffffffffL, -1L)
// see JLS at address:
// https://java.sun.com/docs/books/jls/second_edition/html/lexical.doc.html#230798
// float
check_success("1e1f == 10.0f", 1e1f, 10.0f)
check_success(".3f == 0.3f", .3f, 0.3f)
check_success("0f == 0.0f", 0f, 0.0f)
check_success("0f == -0.000000000000000000e+00f", 0f, -0.000000000000000000e+00f)
check_success("0f == -0.000000000000000000e+00F", 0f, -0.000000000000000000e+00F)
check_success("0f == -0.0000000000000000e14f", 0f, -0.0000000000000000e14f)
check_success("01.23f == 1.23f", 01.23f, 1.23f)
check_success("3.14f == 3.14f", 3.14f, 3.14f)
check_success("6.022e23f == 6.022e23f", 6.022e23f, 6.022e23f)
check_success("09f == 9.0f", 09f, 9.0f)
check_success("1.00000017881393421514957253748434595763683319091796875001f == 1.0000001f",
1.00000017881393421514957253748434595763683319091796875001f,
1.0000001f)
check_success("3.4028235E38f == Float.MaxValue", 3.4028235E38f, Float.MaxValue)
check_success("1.asInstanceOf[Float] == 1.0", 1.asInstanceOf[Float], 1.0f)
check_success("1L.asInstanceOf[Float] == 1.0", 1L.asInstanceOf[Float], 1.0f)
// double
check_success("1e1 == 10.0", 1e1, 10.0)
check_success(".3 == 0.3", .3, 0.3)
check_success("0.0 == 0.0", 0.0, 0.0)
check_success("0d == 0.0", 0d, 0.0)
check_success("0d == 0.000000000000000000e+00d", 0d, 0.000000000000000000e+00d)
check_success("0d == -0.000000000000000000e+00d", 0d, -0.000000000000000000e+00d)
check_success("0d == -0.000000000000000000e+00D", 0d, -0.000000000000000000e+00D)
check_success("0.0 == 0.000000000000000000e+00", 0.0, 0.000000000000000000e+00)
check_success("0.0 == -0.000000000000000000e+00", 0.0, -0.000000000000000000e+00)
check_success("01.23 == 1.23", 01.23, 1.23)
check_success("01.23d == 1.23d", 01.23d, 1.23d)
check_success("3.14 == 3.14", 3.14, 3.14)
check_success("1e-9d == 1.0e-9", 1e-9d, 1.0e-9)
check_success("1e137 == 1.0e137", 1e137, 1.0e137)
check_success("1.7976931348623157e308d == Double.MaxValue", 1.7976931348623157e308d, Double.MaxValue)
check_success("1.asInstanceOf[Double] == 1.0", 1.asInstanceOf[Double], 1.0)
check_success("1l.asInstanceOf[Double] == 1.0", 1l.asInstanceOf[Double], 1.0)
check_success("\\"\\".length()", "\\u001a".length(), 1)
}
}
//############################################################################
| scala/scala | test/files/run/literals.scala | Scala | apache-2.0 | 5,010 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.common
import kafka.integration.KafkaServerTestHarness
import kafka.server.KafkaConfig
import kafka.utils.{TestUtils, ZkUtils}
import org.junit.Test
class ZkNodeChangeNotificationListenerTest extends KafkaServerTestHarness {
override def generateConfigs() = List(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
@Test
def testProcessNotification() {
@volatile var notification: String = null
@volatile var invocationCount = 0
val notificationHandler = new NotificationHandler {
override def processNotification(notificationMessage: String): Unit = {
notification = notificationMessage
invocationCount += 1
}
}
val seqNodeRoot = "/root"
val seqNodePrefix = "prefix"
val seqNodePath = seqNodeRoot + "/" + seqNodePrefix
val notificationMessage1 = "message1"
val notificationMessage2 = "message2"
val changeExpirationMs = 100
val notificationListener = new ZkNodeChangeNotificationListener(zkUtils, seqNodeRoot, seqNodePrefix, notificationHandler, changeExpirationMs)
notificationListener.init()
zkUtils.createSequentialPersistentPath(seqNodePath, notificationMessage1)
TestUtils.waitUntilTrue(() => invocationCount == 1 && notification == notificationMessage1, "failed to send/process notification message in the timeout period.")
/*There is no easy way to test that purging. Even if we mock kafka time with MockTime, the purging compares kafka time with the time stored in zookeeper stat and the
embeded zookeeper server does not provide a way to mock time. so to test purging we will have to use Time.SYSTEM.sleep(changeExpirationMs + 1) issue a write and check
Assert.assertEquals(1, ZkUtils.getChildren(zkClient, seqNodeRoot).size) however even after that the assertion can fail as the second node it self can be deleted
depending on how threads get scheduled.*/
zkUtils.createSequentialPersistentPath(seqNodePath, notificationMessage2)
TestUtils.waitUntilTrue(() => invocationCount == 2 && notification == notificationMessage2, "failed to send/process notification message in the timeout period.")
}
}
| airbnb/kafka | core/src/test/scala/unit/kafka/common/ZkNodeChangeNotificationListenerTest.scala | Scala | apache-2.0 | 2,961 |
package dev.budget.reconciler.finagle.filter
import com.twitter.finagle.{Filter, Service}
import com.twitter.io.Charsets
import com.twitter.util.Future
import org.jboss.netty.buffer.ChannelBuffers
import org.jboss.netty.handler.codec.http.{DefaultHttpResponse, HttpRequest, HttpResponse, HttpResponseStatus}
class StringToHttpResponseFilter extends Filter[HttpRequest, HttpResponse, HttpRequest, String] {
override def apply(request: HttpRequest, service: Service[HttpRequest, String]): Future[HttpResponse] = {
service.apply(request).flatMap{ str =>
val response: DefaultHttpResponse = new DefaultHttpResponse(request.getProtocolVersion, HttpResponseStatus.OK)
response.setContent(ChannelBuffers.copiedBuffer(str, Charsets.Utf8))
Future.value(response)
}
}
}
| jhungerford/MintYnabReconciler | src/main/scala/dev/budget/reconciler/finagle/filter/StringToHttpResponseFilter.scala | Scala | apache-2.0 | 793 |
package org.openapitools.client.api
import argonaut._
import argonaut.EncodeJson._
import argonaut.DecodeJson._
import org.http4s.{EntityDecoder, EntityEncoder}
import org.http4s.argonaut._
import org.joda.time.DateTime
import GithubScmlinks._
case class GithubScmlinks (
self: Option[Link],
`class`: Option[String])
object GithubScmlinks {
import DateTimeCodecs._
implicit val GithubScmlinksCodecJson: CodecJson[GithubScmlinks] = CodecJson.derive[GithubScmlinks]
implicit val GithubScmlinksDecoder: EntityDecoder[GithubScmlinks] = jsonOf[GithubScmlinks]
implicit val GithubScmlinksEncoder: EntityEncoder[GithubScmlinks] = jsonEncoderOf[GithubScmlinks]
}
| cliffano/swaggy-jenkins | clients/scalaz/generated/src/main/scala/org/openapitools/client/api/GithubScmlinks.scala | Scala | mit | 672 |
package org.ciroque.ccr.datastores
import java.util.UUID
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.commons.{MongoDBList, MongoDBObject}
import com.mongodb.casbah.commons.conversions.scala.RegisterJodaTimeConversionHelpers
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.mongodb.{BasicDBList, BasicDBObject, DBObject}
import org.bson.types.ObjectId
import org.ciroque.ccr.core.Commons
import org.ciroque.ccr.datastores.DataStoreResults.{DataStoreResult, Deleted, Found, NotFound}
import org.ciroque.ccr.logging.ImplicitLogging._
import org.ciroque.ccr.models.ConfigurationFactory
import org.ciroque.ccr.models.ConfigurationFactory.{AuditHistory, AuditEntry, Configuration}
import org.joda.time.DateTime
import org.slf4j.Logger
import spray.json._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object MongoConversions {
def convertConfigurationToMongoDBObject(configuration: Configuration): MongoDBObject = {
import org.ciroque.ccr.core.Commons
RegisterJodaTimeConversionHelpers()
val coreKeys = List((Commons.KeyStrings.EnvironmentKey, configuration.key.environment)
, (Commons.KeyStrings.ApplicationKey, configuration.key.application)
, (Commons.KeyStrings.ScopeKey, configuration.key.scope)
, (Commons.KeyStrings.SettingKey, configuration.key.setting))
val keyValues = configuration.key.sourceId match {
case None => coreKeys
case Some(sourceId) => coreKeys :+ Commons.KeyStrings.SourceIdKey -> sourceId
}
def buildMongoDbObjectGraph(js: JsValue): Any = {
js match {
case JsString(s) ⇒ s
case JsNumber(n) ⇒ n.doubleValue()
case JsBoolean(b) ⇒ b
case JsObject(m) ⇒ m.map { case (k, v) ⇒ (k, buildMongoDbObjectGraph(v)) }
case JsArray(e) ⇒ e.map(j ⇒ buildMongoDbObjectGraph(j))
case JsFalse => "false"
case JsTrue ⇒ "true"
case JsNull ⇒ null
}
}
val value = buildMongoDbObjectGraph(configuration.value)
val mdbo = MongoDBObject(
Commons.KeyStrings.IdKey -> configuration._id,
Commons.KeyStrings.KeyKey -> MongoDBObject(keyValues),
Commons.KeyStrings.ValueKey -> value,
Commons.KeyStrings.TemporalizationKey -> MongoDBObject(
Commons.KeyStrings.EffectiveAtKey -> configuration.temporality.effectiveAt,
Commons.KeyStrings.ExpiresAtKey -> configuration.temporality.expiresAt,
Commons.KeyStrings.TtlKey -> configuration.temporality.ttl
)
)
mdbo
}
def convertMongoDBObjectToConfiguration(dbo: DBObject): Configuration = {
import org.ciroque.ccr.core.Commons
val db = dbo.toMap
val key = db.get(Commons.KeyStrings.KeyKey).asInstanceOf[DBObject]
val temporalization = db.get(Commons.KeyStrings.TemporalizationKey).asInstanceOf[DBObject]
val sourceId = if (key.containsField(Commons.KeyStrings.SourceIdKey))
Some(key.get(Commons.KeyStrings.SourceIdKey).toString)
else
None
val mongoValue = db.get(Commons.KeyStrings.ValueKey)
val value = toJsValue(mongoValue)
ConfigurationFactory(
UUID.fromString(db.get(Commons.KeyStrings.IdKey).toString),
key.get(Commons.KeyStrings.EnvironmentKey).toString,
key.get(Commons.KeyStrings.ApplicationKey).toString,
key.get(Commons.KeyStrings.ScopeKey).toString,
key.get(Commons.KeyStrings.SettingKey).toString,
sourceId,
value,
temporalization.get(Commons.KeyStrings.EffectiveAtKey).asInstanceOf[DateTime],
temporalization.get(Commons.KeyStrings.ExpiresAtKey).asInstanceOf[DateTime],
temporalization.get(Commons.KeyStrings.TtlKey).asInstanceOf[Long]
)
}
def toAuditHistory(dbo: DBObject): AuditHistory = {
val dboMap = dbo.toMap
def toAuditEntryList(a: Any): List[AuditEntry] = {
a match {
case dbl: BasicDBList =>
dbl.toList.map {
case dbo: BasicDBObject => dbObjectToAuditEntry(dbo)
}
}
}
def dbObjectToAuditEntry(dbo: DBObject): AuditEntry = {
val date = dbo.get("date").asInstanceOf[DateTime]
val original = convertMongoDBObjectToConfiguration(dbo.get(Commons.KeyStrings.OriginalKey).asInstanceOf[DBObject])
val updated = if(dbo.isDefinedAt(Commons.KeyStrings.UpdatedKey))
Some(convertMongoDBObjectToConfiguration(dbo.get(Commons.KeyStrings.UpdatedKey).asInstanceOf[DBObject]))
else
None
AuditEntry(date, original, updated)
}
val uuid = dboMap.get(Commons.KeyStrings.IdKey).asInstanceOf[UUID]
val dboHistory = dboMap.get(Commons.KeyStrings.HistoryKey)
val history = toAuditEntryList(dboHistory)
AuditHistory(uuid, history)
}
def convertMongoDBObjectToJsObject(obj: MongoDBObject): JsObject = {
JsObject(
obj.toSeq.map { case (key, value) =>
key -> toJsValue(value)
}.toMap)
}
def convertMongoDBListToJsArray(list: MongoDBList): JsArray = {
JsArray(list.map(toJsValue).toVector)
}
def toJsValue(a: Any): JsValue = a match {
case uuid: UUID => JsString(uuid.toString)
case id: ObjectId => JsString(id.toString)
case list: BasicDBList => convertMongoDBListToJsArray(list)
case obj: DBObject => convertMongoDBObjectToJsObject(obj)
case long: Long => JsNumber(long)
case int: Int => JsNumber(int)
case float: Float => JsNumber(float)
case double: Double => JsNumber(double)
case decimal: java.math.BigDecimal => JsNumber(decimal)
case decimal: scala.BigDecimal => JsNumber(decimal)
case string: String => JsString(string)
case boolean: Boolean => JsBoolean(boolean)
case null => JsNull
case jdt: DateTime => JsString(jdt.toString)
case list: List[Any] => JsArray(list.map(e => toJsValue(e)).toVector)
}
}
object MongoSettingsDataStore {
val defaultPort = 27017
RegisterJodaTimeConversionHelpers()
}
class MongoSettingsDataStore(settings: DataStoreParams)(implicit val logger: Logger) extends SettingsDataStore {
val mongoClient = MongoClient(settings.hostname, settings.port.getOrElse(MongoSettingsDataStore.defaultPort))
val configurationCollection = mongoClient(settings.database)(settings.catalog)
val auditingCollection = mongoClient(settings.database)(settings.auditCatalog)
RegisterJodaTimeConversionHelpers()
override def deleteConfiguration(configuration: Configuration): Future[DataStoreResult] = Future.successful(Deleted(configuration))
override def insertConfiguration(configuration: Configuration): Future[DataStoreResult] = {
val validatedConfiguration = configuration.copy(key = validateKey(configuration.key))
withImplicitLogging("MongoSettingsDataStore::insertConfiguration") {
recordValue("given-configuration", configuration.toJson.toString())
recordValue("added-configuration", validatedConfiguration.toJson.toString())
executeInCollection { collection =>
collection.insert(MongoConversions.convertConfigurationToMongoDBObject(validatedConfiguration))
val dsr = DataStoreResults.Added(validatedConfiguration)
insertAuditRecord(DateTime.now, validatedConfiguration, None).recoverWith {
case t: Throwable =>
recordValue("RecordAuditFailure::INSERT", t.toString)
Future.successful(DataStoreResults.Errored(validatedConfiguration, t.getMessage))
}
dsr
}.recoverWith {
case t: Throwable =>
// setResultException(t)
Future.successful(DataStoreResults.Errored(validatedConfiguration, Commons.DatastoreErrorMessages.DuplicateKeyError))
}
}
}
override def updateConfiguration(configuration: Configuration): Future[DataStoreResult] = {
val validatedConfiguration = configuration.copy(key = validateKey(configuration.key))
withImplicitLogging("MongoSettingsDataStore::updateConfiguration") {
recordValue("original-configuration", configuration.toJson.toString())
recordValue("validated-configuration", validatedConfiguration.toJson.toString())
val queryDoc = new BasicDBObject("_id", validatedConfiguration._id)
executeInCollection { collection =>
collection.findAndModify(queryDoc, MongoConversions.convertConfigurationToMongoDBObject(validatedConfiguration)) match {
case Some(foundDocument) =>
val previousConfiguration = MongoConversions.convertMongoDBObjectToConfiguration(foundDocument)
val dsr = DataStoreResults.Updated(previousConfiguration, validatedConfiguration)
insertAuditRecord(DateTime.now, previousConfiguration, Some(validatedConfiguration)).recoverWith {
case t: Throwable =>
recordValue("RecordAuditFailure::UPDATE", t.toString)
Future.successful(DataStoreResults.Errored(validatedConfiguration, t.getMessage))
}
dsr
case None => DataStoreResults.NotFound(Some(validatedConfiguration), Commons.DatastoreErrorMessages.NotFoundError)
}
}
}
}
override def retrieveScopes(environment: String, application: String): Future[DataStoreResult] = {
withImplicitLogging("MongoSettingsDataStore.retrieveScopes") {
import org.ciroque.ccr.core.Commons
recordValue(Commons.KeyStrings.EnvironmentKey, environment)
recordValue(Commons.KeyStrings.ApplicationKey, application)
executeInCollection { collection =>
val results = collection.distinct("key.scope", MongoDBObject("key.environment" -> checkWildcards(environment), "key.application" -> checkWildcards(application)))
results.map(res => res.asInstanceOf[String]).sortBy(app => app).toList match {
case Nil => DataStoreResults.NotFound(None, s"environment '$environment' / application '$application' combination was not found")
case list: List[String] => DataStoreResults.Found(list.toList)
}
}
}
}
override def retrieveSettings(environment: String, application: String, scope: String): Future[DataStoreResult] = {
withImplicitLogging("MongoSettingsDataStore.retrieveSettings") {
import org.ciroque.ccr.core.Commons
recordValue(Commons.KeyStrings.EnvironmentKey, environment)
recordValue(Commons.KeyStrings.ApplicationKey, application)
recordValue(Commons.KeyStrings.ScopeKey, scope)
executeInCollection { collection =>
val results = collection.distinct(
"key.setting",
MongoDBObject(
"key.environment" -> checkWildcards(environment),
"key.application" -> checkWildcards(application),
"key.scope" -> checkWildcards(scope)))
results.map(res => res.asInstanceOf[String]).sortBy(app => app).toList match {
case Nil => DataStoreResults.NotFound(None, s"environment '$environment' / application '$application' / scope '$scope' combination was not found")
case list: List[String] => DataStoreResults.Found(list.toList)
}
}
}
}
override def retrieveApplications(environment: String): Future[DataStoreResult] = {
withImplicitLogging("MongoSettingsDataStore.retrieveApplications") {
import org.ciroque.ccr.core.Commons
recordValue(Commons.KeyStrings.EnvironmentKey, environment)
executeInCollection { collection =>
val searchTerm = checkWildcards(environment)
val results = collection.distinct("key.application", MongoDBObject("key.environment" -> searchTerm))
results.map(res => res.asInstanceOf[String]).sortBy(app => app).toList match {
case Nil => DataStoreResults.NotFound(None, s"environment '$environment' was not found")
case list: List[String] => DataStoreResults.Found(list.toList)
}
}
}
}
override def retrieveEnvironments(): Future[DataStoreResult] = {
withImplicitLogging("MongoSettingsDataStore.retrieveEnvironments") {
executeInCollection { collection =>
val results = collection.distinct("key.environment")
val environments = results.map(result => result.asInstanceOf[String]).sortBy(environment => environment)
DataStoreResults.Found(environments.toList)
}
}
}
override def retrieveConfiguration(environment: String, application: String, scope: String, setting: String, sourceId: Option[String] = None): Future[DataStoreResult] = {
withImplicitLogging("MongoSettingsDataStore.retrieveConfiguration") {
import org.ciroque.ccr.core.Commons
recordValue(Commons.KeyStrings.EnvironmentKey, environment)
recordValue(Commons.KeyStrings.ApplicationKey, application)
recordValue(Commons.KeyStrings.ScopeKey, scope)
recordValue(Commons.KeyStrings.SettingKey, setting)
recordValue(Commons.KeyStrings.SourceIdKey, sourceId.toString)
queryConfigurations(environment, application, scope, setting) flatMap {
configurations =>
val dataStoreResult = configurations match {
case Nil => NotFound(None, s"environment '$environment' / application '$application' / scope '$scope' / setting '$setting' combination was not found")
case list => list.filter(_.isActive) match {
case Nil => NotFound(None, s"environment '$environment' / application '$application' / scope '$scope' / setting '$setting' found no active configuration")
case found: Seq[Configuration] => Found(filterBySourceId(found, sourceId))
}
}
Future.successful(dataStoreResult)
}
}
}
override def retrieveConfigurationSchedule(environment: String, application: String, scope: String, setting: String): Future[DataStoreResult] = {
import org.ciroque.ccr.models.ConfigurationFactory.ConfigurationOrdering._
withImplicitLogging("MongoSettingsDataStore.retrieveConfigurationSchedule") {
recordValue(Commons.KeyStrings.EnvironmentKey, environment)
recordValue(Commons.KeyStrings.ApplicationKey, application)
recordValue(Commons.KeyStrings.ScopeKey, scope)
recordValue(Commons.KeyStrings.SettingKey, setting)
queryConfigurations(environment, application, scope, setting) flatMap {
configurations =>
val dataStoreResult = configurations match {
case Nil => NotFound(None, s"environment '$environment' / application '$application' / scope '$scope' / setting '$setting' combination was not found")
case list => Found(list.sortBy(c => c))
}
Future.successful(dataStoreResult)
}
}
}
private def insertAuditRecord(when: DateTime, original: Configuration, updated: Option[Configuration]): Future[DataStoreResult] = {
Future {
val queryDoc = MongoDBObject("_id" -> original._id)
val baseAuditEntry = List(("date", when), ("original", MongoConversions.convertConfigurationToMongoDBObject(original)))
val auditEntry = updated match {
case None => baseAuditEntry
case Some(config) => baseAuditEntry :+ ("updated", MongoConversions.convertConfigurationToMongoDBObject(config))
}
val updateDoc = MongoDBObject("$push" -> MongoDBObject("history" -> MongoDBObject(auditEntry)))
val result = auditingCollection.update(queryDoc, updateDoc, true)
if(result.isUpdateOfExisting) {
DataStoreResults.Updated(original, updated.getOrElse(ConfigurationFactory.EmptyConfiguration))
} else {
DataStoreResults.Added(original)
}
}
}
private def queryConfigurations(environment: String, application: String, scope: String, setting: String): Future[List[Configuration]] = {
val configurationQuery: DBObject = buildConfigurationQuery(environment, application, scope, setting)
executeInCollection { collection =>
collection.find(configurationQuery).toList match {
case Nil => Nil
case list => list.map(MongoConversions.convertMongoDBObjectToConfiguration)
}
}
}
private def buildConfigurationQuery(environment: String, application: String, scope: String, setting: String) = {
val environmentQuery = checkWildcards(environment)
val applicationQuery = checkWildcards(application)
val scopeQuery = checkWildcards(scope)
val settingQuery = checkWildcards(setting)
val assQuery = $and("key.application" $eq applicationQuery, "key.scope" $eq scopeQuery, "key.setting" $eq settingQuery)
$or(("key.environment" $eq environmentQuery) :: ("key.environment" $eq ConfigurationFactory.DefaultEnvironment)) ++ assQuery
}
private def executeInCollection[T](fx: (MongoCollection) => T): Future[T] = {
Future { fx(configurationCollection) }
}
override def retrieveAuditHistory(uuid: UUID): Future[DataStoreResult] = {
withImplicitLogging("MongoSettingsDataStore::retrieveAuditHistory") {
val mongoObjects = auditingCollection.find(MongoDBObject("_id" -> uuid)).toList
val auditHistories = MongoConversions.toAuditHistory(mongoObjects.head)
Future.successful(DataStoreResults.Found(List(auditHistories)))
}
}
}
| ciroque/central-configuration-repository | src/main/scala/org/ciroque/ccr/datastores/MongoSettingsDataStore.scala | Scala | mit | 17,020 |
package inloopio.math.random
import java.io.BufferedOutputStream
import java.io.DataOutputStream
import java.io.File
import java.io.FileOutputStream
import java.io.IOException
import java.util.Random
/**
* Utility to generate an input file for the
* <a href="http://stat.fsu.edu/pub/diehard/" target="_top">DIEHARD</a> suite of statistical
* tests for random number generators.
* @author Daniel Dyer
*/
object DiehardInputGenerator {
// How many 32-bit values should be written to the output file.
private val INT_COUNT = 3000000
/**
* @param args The first argument is the class name of the RNG, the second
* is the file to use for output.
* @throws Exception If there are problems setting up the RNG or writing to
* the output file.
*/
@throws(classOf[Exception])
def main(args: Array[String]) {
if (args.length != 2) {
System.out.println("Expected arguments:")
System.out.println("\\t<Fully-qualified RNG class name> <Output file>")
System.exit(1)
}
val rngClass = Class.forName(args(0)).asInstanceOf[Class[_ <: Random]]
val outputFile = new File(args(1))
generateOutputFile(rngClass.newInstance, outputFile)
}
/**
* Generates a file of random data in a format suitable for the DIEHARD test.
* DIEHARD requires 3 million 32-bit integers.
* @param rng The random number generator to use to generate the data.
* @param outputFile The file that the random data is written to.
* @throws IOException If there is a problem writing to the file.
*/
@throws(classOf[IOException])
def generateOutputFile(rng: Random, outputFile: File) {
var out: DataOutputStream = null
try {
out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(outputFile)))
var i = 0
while (i < INT_COUNT) {
out.writeInt(rng.nextInt)
i += 1
}
out.flush
} finally {
if (out != null) {
out.close
}
}
}
}
| dcaoyuan/inloopio-libs | inloopio-math/src/main/scala/inloopio/math/random/DiehardInputGenerator.scala | Scala | bsd-3-clause | 1,965 |
package com.coiney.akka.rabbit.example
import akka.actor.{Props, ActorSystem}
import com.coiney.akka.rabbit.{QueueConfig, RabbitSystem}
import com.coiney.akka.rabbit.protocol._
object ConsumerExample1 extends App {
implicit val system = ActorSystem("ConsumerSystem")
val rabbitSystem = RabbitSystem()
// create the connection keeper and wait for it to be connected
val connectionKeeper = rabbitSystem waitFor rabbitSystem.createConnection("connection")
// // alternatively, you can also
// val connectionKeeper = rabbitSystem.createConnection("connection")
// rabbitSystem.waitForConnection(connectionKeeper)
// create the producer and wait for it to be connected
val consumeActor = system.actorOf(Props(classOf[ConsumeActor]))
val consumer = rabbitSystem waitFor rabbitSystem.createConsumer(connectionKeeper, consumeActor, "consumer")
// // Same here: alternatively, you can also
// val consumer = rabbitSystem.createConsumer(connectionKeeper, consumeActor, "consumer")
// rabbitSystem.waitForConnection(consumer)
// consume the queue
consumer ! ConsumeQueue(QueueConfig("my_queue"))
// shutdown the system
Thread.sleep(1000)
system.shutdown()
}
| Coiney/akka-rabbit | akka-rabbit-example/src/main/scala/com/coiney/akka/rabbit/example/ConsumerExample1.scala | Scala | bsd-3-clause | 1,193 |
package me.yingrui.segment.util
import org.junit.Assert
import org.junit.Test
class RomanNumberTest {
@Test
def should_return_number_when_input_is_single_roman_numeral_character() {
Assert.assertEquals(1, RomanNumeral.getBasicSymbol("I"));
Assert.assertEquals(5, RomanNumeral.getBasicSymbol("V"));
Assert.assertEquals(10, RomanNumeral.getBasicSymbol("X"));
Assert.assertEquals(50, RomanNumeral.getBasicSymbol("L"));
Assert.assertEquals(100, RomanNumeral.getBasicSymbol("C"));
Assert.assertEquals(500, RomanNumeral.getBasicSymbol("D"));
Assert.assertEquals(1000, RomanNumeral.getBasicSymbol("M"));
}
@Test
def should_get_number_of_roman_numeral() {
Assert.assertEquals(1, RomanNumeral.convert("I"))
Assert.assertEquals(39, RomanNumeral.convert("XXXIX"))
Assert.assertEquals(1944, RomanNumeral.convert("MCMXLIV"))
}
} | yingrui/mahjong | lib-segment/src/test/scala/me/yingrui/segment/util/RomanNumberTest.scala | Scala | gpl-3.0 | 871 |
package com.adendamedia.cornucopia
import akka.stream.scaladsl.Source
import akka.actor.{ActorRef, ActorSystem}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision}
import com.adendamedia.cornucopia.actors.CornucopiaSource
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import com.adendamedia.cornucopia.actors.SharedActorSystem.sharedActorSystem
object Config {
object Cornucopia {
private val config = ConfigFactory.load().getConfig("cornucopia")
val minReshardWait: FiniteDuration = config.getInt("reshard.interval").seconds
val refreshTimeout: Int = config.getInt("refresh.timeout") * 1000
val batchPeriod: FiniteDuration = config.getInt("batch.period").seconds
}
implicit val actorSystem: ActorSystem = sharedActorSystem
// Log failures and resume processing
private val decider: Supervision.Decider = { e =>
LoggerFactory.getLogger(this.getClass).error("Failed to process event", e)
Supervision.Resume
}
private val materializerSettings: ActorMaterializerSettings =
ActorMaterializerSettings(actorSystem).withSupervisionStrategy(decider)
implicit val materializer: ActorMaterializer = ActorMaterializer(materializerSettings)(actorSystem)
private val cornucopiaActorProps = CornucopiaSource.props
val cornucopiaActorSource: Source[CornucopiaSource.Task, ActorRef] =
Source.actorPublisher[CornucopiaSource.Task](cornucopiaActorProps)
object ReshardTableConfig {
final implicit val ExpectedTotalNumberSlots: Int = 16384
}
val reshardTimeout: Int = ConfigFactory.load().getConfig("cornucopia").getInt("reshard.timeout")
val migrateSlotTimeout: Int = ConfigFactory.load().getConfig("cornucopia").getInt("reshard.migrate.slot.timeout")
}
| sjking/cornucopia | src/main/scala/com/adendamedia/cornucopia/Config.scala | Scala | lgpl-3.0 | 1,800 |
import sbt._
import Keys._
object MyBuild extends Build {
lazy val buildSettings = Defaults.defaultSettings ++ Seq(
version := "0.1-SNAPSHOT",
organization := "com.simba",
scalaVersion := Option(System.getProperty("scala.version")).getOrElse("2.11.6")
)
lazy val clairvoyant = Project(
id = "clairvoyant",
base = file("."),
settings = Defaults.defaultSettings ++
sbtassembly.Plugin.assemblySettings
)
}
| ShiZhan/clairvoyant | project/Build.scala | Scala | apache-2.0 | 441 |
package com.outr.arango.api
import com.outr.arango.api.model._
import io.youi.client.HttpClient
import io.youi.http.HttpMethod
import io.youi.net._
import io.circe.Json
import scala.concurrent.{ExecutionContext, Future}
object APICollectionCollectionNameTruncate {
def put(client: HttpClient, collectionName: String)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Put)
.path(path"/_api/collection/{collection-name}/truncate".withArguments(Map("collection-name" -> collectionName)), append = true)
.call[Json]
} | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/APICollectionCollectionNameTruncate.scala | Scala | mit | 559 |
package synereo.client.handlers
import diode.ActionResult.ModelUpdate
import diode.RootModelRW
import shared.dtos.Connection
import shared.models.ConnectionsModel
import synereo.client.UnitTest
import synereo.client.rootmodels.ConnectionsRootModel
/**
* Created by shubham.k on 29-08-2016.
*/
class ConnectionHandlerTest extends UnitTest("ConnectionHandlerTest") {
val handler = new ConnectionHandler(new RootModelRW(ConnectionsRootModel()))
val newCnxnSeq = Seq(Connection("newSource1", "newLabel1", "newTarget1"),
Connection("newSource2", "newLabel2", "newTarget2"),
Connection("newSource3", "newLabel3", "newTarget3"))
val newConnectionModelSeq = Seq(ConnectionsModel("sessionUri1",newCnxnSeq(0),"name1",""),
ConnectionsModel("sessionUri2",newCnxnSeq(1),"name2",""),
ConnectionsModel("sessionUri3",newCnxnSeq(2),"name3",""))
"UpdateConnections" should "Updated connections as per responce" in {
val result = handler.handle(UpdateConnections(newConnectionModelSeq))
result match {
case ModelUpdate(newValue) =>
assert(newValue != null)
case _ =>
assert(false)
}
}
}
| LivelyGig/ProductWebUI | sclient/src/test/scala/synereo/client/handlers/ConnectionHandlerTest.scala | Scala | apache-2.0 | 1,163 |
package mesosphere.marathon
package api.validation
import com.wix.accord.validate
import mesosphere.UnitTest
import mesosphere.marathon.api.v2.AppNormalization
import mesosphere.marathon.api.v2.validation.AppValidation
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.raml.{ App, AppCContainer, AppUpdate, ContainerPortMapping, EngineType, Raml }
import mesosphere.marathon.state.AppDefinition
import org.scalatest.Matchers
import play.api.libs.json.Json
class AppUpdateValidatorTest extends UnitTest with Matchers {
implicit val appUpdateValidator = AppValidation.validateCanonicalAppUpdateAPI(Set.empty)
implicit val validAppDefinition = AppDefinition.validAppDefinition(Set.empty)(PluginManager.None)
"validation that considers container types" should {
"test that Docker container is validated" in {
val f = new Fixture
val update = AppUpdate(
id = Some("/test"),
container = Some(f.invalidDockerContainer))
assert(validate(update).isFailure)
}
"test that AppC container is validated" in {
val f = new Fixture
val update = AppUpdate(
id = Some("/test"),
container = Some(f.invalidAppCContainer))
assert(validate(update).isFailure)
}
}
"validation for network type changes" should {
// regression test for DCOS-10641
"allow updating from HOST to USER network for an app using a Docker container" in {
val originalApp = Json.parse(
"""
| {
| "id": "/sleepy-moby",
| "cmd": "sleep 1000",
| "instances": 1,
| "cpus": 1,
| "mem": 128,
| "disk": 0,
| "gpus": 0,
| "backoffSeconds": 1,
| "backoffFactor": 1.15,
| "maxLaunchDelaySeconds": 3600,
| "container": {
| "docker": {
| "image": "alpine",
| "forcePullImage": false,
| "privileged": false,
| "network": "HOST"
| }
| },
| "upgradeStrategy": {
| "minimumHealthCapacity": 0.5,
| "maximumOverCapacity": 0
| },
| "portDefinitions": [
| {
| "protocol": "tcp",
| "port": 10004
| }
| ],
| "requirePorts": false
|}
""".stripMargin).as[App]
val config = AppNormalization.Configure(None, "mesos-bridge-name")
val appDef = Raml.fromRaml(
AppNormalization.apply(config)
.normalized(AppNormalization.forDeprecated(config).normalized(originalApp)))
val appUpdate = AppNormalization.forUpdates(config).normalized(
AppNormalization.forDeprecatedUpdates(config).normalized(Json.parse(
"""
|{
| "id": "/sleepy-moby",
| "cmd": "sleep 1000",
| "instances": 1,
| "cpus": 1,
| "mem": 128,
| "disk": 0,
| "gpus": 0,
| "backoffSeconds": 1,
| "backoffFactor": 1.15,
| "maxLaunchDelaySeconds": 3600,
| "container": {
| "docker": {
| "image": "alpine",
| "forcePullImage": false,
| "privileged": false,
| "network": "USER"
| }
| },
| "upgradeStrategy": {
| "minimumHealthCapacity": 0.5,
| "maximumOverCapacity": 0
| },
| "portDefinitions": [],
| "ipAddress": {
| "networkName": "dcos"
| },
| "requirePorts": false
|}
""".stripMargin).as[AppUpdate]))
assert(validate(Raml.fromRaml(Raml.fromRaml(appUpdate -> appDef))).isSuccess)
}
}
class Fixture {
def invalidDockerContainer: raml.Container = raml.Container(
EngineType.Docker,
portMappings = Option(Seq(
ContainerPortMapping(
// Invalid (negative) port numbers
containerPort = -1, hostPort = Some(-1), servicePort = -1)
))
)
def invalidAppCContainer: raml.Container = raml.Container(EngineType.Mesos, appc = Some(AppCContainer(
image = "anImage",
id = Some("invalidID")))
)
}
}
| natemurthy/marathon | src/test/scala/mesosphere/marathon/api/validation/AppUpdateValidatorTest.scala | Scala | apache-2.0 | 4,250 |
package se.gigurra.leavu3.interfaces
import java.util.UUID
import com.twitter.finagle.FailedFastException
import com.twitter.util.{Await, Duration, Future}
import com.github.gigurra.heisenberg.MapData.SourceData
import com.github.gigurra.heisenberg._
import se.gigurra.leavu3.datamodel.DlinkData._
import se.gigurra.leavu3.datamodel.{Configuration, DcsRemoteRemoteConfig, Leavu3Instance}
import se.gigurra.leavu3.util.{DefaultTimer, Throttled, RestClient}
import com.github.gigurra.serviceutils.json.JSON
import com.github.gigurra.serviceutils.twitter.logging.Logging
import scala.language.implicitConversions
import scala.util.control.NonFatal
case class DcsRemote private(config: Configuration) extends Logging {
import DcsRemote._
private val forcedSlave = config.forcedSlave
private val ownInstanceId = UUID.randomUUID().toString
private val client = RestClient(config.dcsRemoteAddress, config.dcsRemotePort, "Dcs Remote")
private val cache = new scala.collection.concurrent.TrieMap[String, Map[String, Stored[_]]]
@volatile var remoteConfig = initialDownloadConfig()
@volatile var ownPriority: Int = 0
DefaultTimer.fps(1) {
downloadUpdatedConfig()
.onSuccess { staticData =>
remoteConfig = staticData
}
.onFailure {
case e: Throttled =>
case e: FailedFastException =>
case e => logger.warning(s"Unable to download configuration from Dcs Remote: $e")
}
}
DefaultTimer.fps(10) {
registerLeavu3Instance()
.onFailure {
case e: Throttled =>
case e: FailedFastException =>
case e => logger.warning(s"Unable to register Leavu3 instance on Dcs Remote: $e")
}
}
def store[T: MapProducer](category: String, id: String, data: => T): Future[Unit] = {
client.put(s"$category/$id")(JSON.write(data))
}
def store(category: String, id: String, data: => String): Future[Unit] = {
client.put(s"$category/$id")(data)
}
def delete(category: String, id: String): Future[Unit] = {
client.delete(s"$category/$id")
}
def loadFromSource[T: MapParser](category: String, maxAge: Option[Duration], minTimeDelta: Option[Duration] = None): Future[Map[String, Stored[T]]] = {
client.get(category, maxAge = maxAge, minTimeDelta = minTimeDelta).map { data =>
val out = JSON.readMap(data).asInstanceOf[Map[String, SourceData]].map {
case (k, v) => k ->
Stored[T](
timestamp = v("timestamp").asInstanceOf[Double],
age = v("age").asInstanceOf[Double],
item = MapParser.parse[T](v("data").asInstanceOf[Map[String, SourceData]])
)
}
cache.put(category, out)
out
}.onFailure {
case e: Throttled =>
case e: FailedFastException =>
case NonFatal(e) => logger.warning(s"Unable to download and process category $category from local Dcs Remote: $e")
}.rescue {
case NonFatal(e) => Future.value(getCached(category))
}
}
/**
* Load will always be empty on the first attempt,
* since it triggers the actual download from the Dcs Remote
*/
def loadFromCache[T: MapParser](category: String, maxAge: Option[Duration], minTimeDelta: Option[Duration] = None): Map[String, Stored[T]] = {
loadFromSource[T](category, maxAge, minTimeDelta)
getCached(category)
}
private def getCached[T](category: String): Map[String, Stored[T]] = {
cache.get(category).map(_.asInstanceOf[Map[String, Stored[T]]]).getOrElse(Map.empty[String, Stored[T]])
}
private def initialDownloadConfig(): DcsRemoteRemoteConfig = {
try Await.result(downloadUpdatedConfig()) catch {
case NonFatal(e) => throw new RuntimeException(s"Failed to communicate with dcs remote!", e)
}
}
private def downloadUpdatedConfig(): Future[DcsRemoteRemoteConfig] = {
client.get(s"static-data").map(JSON.read[DcsRemoteRemoteConfig])
}
private def registerLeavu3Instance(): Future[Unit] = {
store("leavu3-instances", ownInstanceId, Leavu3Instance(ownInstanceId, if (forcedSlave) -1 else ownPriority, isActingMaster))
}
def isActingSlave: Boolean = {
!isActingMaster
}
def isActingMaster: Boolean = {
if (forcedSlave) {
false
}
else {
val instanceLkup = loadFromCache[Leavu3Instance]("leavu3-instances", maxAge = Some(Duration.fromSeconds(1)), minTimeDelta = Some(Duration.fromMilliseconds(20)))
instanceLkup.get(ownInstanceId) match {
case None => true
case Some(myInstance) =>
val instances: Seq[Leavu3Instance] = instanceLkup.values.map(_.item).toSeq.sortBy(_.id)
val highestPriority = instances.map(_.priority).max
val instancesWithHighestPrio = instances.filter(_.priority == highestPriority)
if (instancesWithHighestPrio.size == 1) {
instancesWithHighestPrio.head.id == ownInstanceId
} else {
instances.head.id == ownInstanceId
}
}
}
}
}
/**
* Created by kjolh on 3/20/2016.
*/
object DcsRemote {
private var instance: DcsRemote = null
def init(appCfg: Configuration): Unit = {
require(instance == null, "Cannot call DcsRemote.init twice!")
instance = DcsRemote(appCfg)
}
implicit def remote2client(r: DcsRemote.type): RestClient = instance.client
implicit def remote2remote(r: DcsRemote.type): DcsRemote = instance
def remoteConfig: DcsRemoteRemoteConfig = instance.remoteConfig
case class Stored[T](timestamp: Double, age: Double, item: T)
}
| GiGurra/leavu3 | src/main/scala/se/gigurra/leavu3/interfaces/DcsRemote.scala | Scala | mit | 5,503 |
package lila.mailer
import akka.actor.ActorSystem
import io.methvin.play.autoconfig._
import play.api.i18n.Lang
import play.api.libs.mailer.{ Email, SMTPConfiguration, SMTPMailer }
import scala.concurrent.duration.{ span => _, _ }
import scala.concurrent.{ blocking, Future }
import scalatags.Text.all.{ html => htmlTag, _ }
import scalatags.Text.tags2.{ title => titleTag }
import lila.common.String.html.{ nl2br }
import lila.common.{ Chronometer, EmailAddress, ThreadLocalRandom }
import lila.i18n.I18nKeys.{ emails => trans }
final class Mailer(
config: Mailer.Config,
getSecondaryPermille: () => Int
)(implicit system: ActorSystem) {
implicit private val blockingExecutionContext = system.dispatchers.lookup("blocking-smtp-dispatcher")
private val primaryClient = new SMTPMailer(config.primary.toClientConfig)
private val secondaryClient = new SMTPMailer(config.secondary.toClientConfig)
private def randomClient(): (SMTPMailer, Mailer.Smtp) =
if (ThreadLocalRandom.nextInt(1000) < getSecondaryPermille()) (secondaryClient, config.secondary)
else (primaryClient, config.primary)
def send(msg: Mailer.Message): Funit =
if (msg.to.isNoReply) {
logger.warn(s"Can't send ${msg.subject} to noreply email ${msg.to}")
funit
} else
Future {
Chronometer.syncMon(_.email.send.time) {
blocking {
val (client, config) = randomClient()
client
.send(
Email(
subject = msg.subject,
from = config.sender,
to = Seq(msg.to.value),
bodyText = msg.text.some,
bodyHtml = msg.htmlBody map { body => Mailer.html.wrap(msg.subject, body).render }
)
)
.unit
}
}
}
}
object Mailer {
private val timeout = 5 seconds
case class Smtp(
mock: Boolean,
host: String,
port: Int,
tls: Boolean,
user: String,
sender: String,
password: String
) {
def toClientConfig = SMTPConfiguration(
host = host,
port = port,
tlsRequired = tls,
user = user.some,
password = password.some,
mock = mock,
timeout = Mailer.timeout.toMillis.toInt.some
)
}
implicit val smtpLoader = AutoConfig.loader[Smtp]
case class Config(
primary: Smtp,
secondary: Smtp
)
implicit val configLoader = AutoConfig.loader[Config]
case class Message(
to: EmailAddress,
subject: String,
text: String,
htmlBody: Option[Frag] = none
)
object txt {
private def serviceNote(implicit lang: Lang): String = s"""
${trans.common_note("https://lichess.org").render}
${trans.common_contact("https://lichess.org/contact").render}"""
def addServiceNote(body: String)(implicit lang: Lang) = s"""$body
$serviceNote"""
}
object html {
private val itemscope = attr("itemscope").empty
private val itemtype = attr("itemtype")
private val itemprop = attr("itemprop")
val emailMessage = div(itemscope, itemtype := "http://schema.org/EmailMessage")
val pDesc = p(itemprop := "description")
val potentialAction =
div(itemprop := "potentialAction", itemscope, itemtype := "http://schema.org/ViewAction")
def metaName(cont: String) = meta(itemprop := "name", content := cont)
val publisher = div(itemprop := "publisher", itemscope, itemtype := "http://schema.org/Organization")
val noteContact = a(itemprop := "url", href := "https://lichess.org/contact")(
span(itemprop := "name")("lichess.org/contact")
)
private val noteLink = a(
itemprop := "url",
href := "https://lichess.org/"
)(span(itemprop := "name")("lichess.org"))
def serviceNote(implicit lang: Lang) =
publisher(
small(
trans.common_note(Mailer.html.noteLink),
" ",
trans.common_contact(noteContact),
" ",
lila.i18n.I18nKeys.readAboutOur(
a(href := "https://lichess.org/privacy")(
lila.i18n.I18nKeys.privacyPolicy()
)
)
)
)
def standardEmail(body: String)(implicit lang: Lang): Frag =
emailMessage(
pDesc(nl2br(body)),
serviceNote
)
def url(u: String)(implicit lang: Lang) =
frag(
meta(itemprop := "url", content := u),
p(a(itemprop := "target", href := u)(u)),
p(trans.common_orPaste(lang))
)
private[Mailer] def wrap(subject: String, htmlBody: Frag): Frag =
frag(
raw("<!doctype html>"),
htmlTag(
head(
meta(httpEquiv := "Content-Type", content := "text/html; charset=utf-8"),
meta(name := "viewport", content := "width=device-width"),
titleTag(subject)
),
body(htmlBody)
)
)
}
}
| luanlv/lila | modules/mailer/src/main/Mailer.scala | Scala | mit | 4,926 |
package com.seanshubin.template.scala.web.domain
import java.time.ZonedDateTime
class ClockIntegration extends Clock {
override def zonedDateTimeNow: ZonedDateTime = ZonedDateTime.now()
}
| SeanShubin/template-scala-web | domain/src/main/scala/com/seanshubin/template/scala/web/domain/ClockIntegration.scala | Scala | unlicense | 192 |
/*
* Copyright 2001-2012 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.language.experimental.macros
import scala.reflect.macros.{ Context, TypecheckException, ParseException }
import org.scalatest.exceptions.StackDepthException._
import org.scalatest.exceptions.StackDepthExceptionHelper._
import org.scalatest.words.{TypeCheckWord, CompileWord}
private[scalatest] object CompileMacro {
// extract the code string from the AST
def getCodeStringFromCodeExpression(c: Context)(methodName: String, code: c.Expr[String]): String = {
import c.universe._
code.tree match {
case Literal(Constant(codeStr)) => codeStr.toString // normal string literal
case Select(
Apply(
Select(
_,
augmentStringTermName
),
List(
Literal(Constant(codeStr))
)
),
stripMarginTermName
) if augmentStringTermName.decoded == "augmentString" && stripMarginTermName.decoded == "stripMargin" => codeStr.toString.stripMargin // """xxx""".stripMargin string literal
case _ => c.abort(c.enclosingPosition, methodName + " only works with String literals.")
}
}
// parse and type check a code snippet, generate code to throw TestFailedException when type check passes or parse error
def assertTypeErrorImpl(c: Context)(code: c.Expr[String]): c.Expr[Unit] = {
import c.universe._
// extract code snippet
val codeStr = getCodeStringFromCodeExpression(c)("assertNoTypeError", code)
try {
c.typeCheck(c.parse("{ "+codeStr+" }")) // parse and type check code snippet
// If reach here, type check passes, let's generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedTypeErrorButGotNone", codeStr))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
} catch {
case e: TypecheckException =>
reify {
// type check failed as expected, generate code to do nothing
}
case e: ParseException =>
// parse error, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedTypeErrorButGotParseError", e.getMessage, codeStr))
reify {
throw new TestFailedException(messageExpr.splice, 0)
}
}
}
// parse and type check a code snippet, generate code to throw TestFailedException when both parse and type check succeeded
def assertDoesNotCompileImpl(c: Context)(code: c.Expr[String]): c.Expr[Unit] = {
import c.universe._
// extract code snippet
val codeStr = getCodeStringFromCodeExpression(c)("assertDoesNotCompile", code)
try {
c.typeCheck(c.parse("{ "+codeStr+" }")) // parse and type check code snippet
// Both parse and type check succeeded, the code snippet compiles unexpectedly, let's generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedCompileErrorButGotNone", codeStr))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
} catch {
case e: TypecheckException =>
reify {
// type check error, code snippet does not compile as expected, generate code to do nothing
}
case e: ParseException =>
reify {
// parse error, code snippet does not compile as expected, generate code to do nothing
}
}
}
// parse and type check a code snippet, generate code to throw TestFailedException when either parse or type check fails.
def assertCompilesImpl(c: Context)(code: c.Expr[String]): c.Expr[Unit] = {
import c.universe._
// extract code snippet
val codeStr = getCodeStringFromCodeExpression(c)("assertCompiles", code)
try {
c.typeCheck(c.parse("{ " + codeStr + " }")) // parse and type check code snippet
// Both parse and type check succeeded, the code snippet compiles as expected, generate code to do nothing
reify {
// Do nothing
}
} catch {
case e: TypecheckException =>
// type check error, compiles fails, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedNoErrorButGotTypeError", e.getMessage, codeStr))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
case e: ParseException =>
// parse error, compiles fails, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedNoErrorButGotParseError", e.getMessage, codeStr))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
}
}
// check that a code snippet does not compile
def notCompileImpl(c: Context)(compileWord: c.Expr[CompileWord])(shouldOrMust: String): c.Expr[Unit] = {
import c.universe._
// parse and type check a code snippet, generate code to throw TestFailedException if both parse and type check succeeded
def checkNotCompile(code: String): c.Expr[Unit] = {
try {
c.typeCheck(c.parse("{ " + code + " }")) // parse and type check code snippet
// both parse and type check succeeded, compiles succeeded unexpectedly, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedCompileErrorButGotNone", code))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
} catch {
case e: TypecheckException =>
reify {
// type check error, compile fails as expected, generate code to do nothing
}
case e: ParseException =>
reify {
// parse error, compile fails as expected, generate code to do nothing
}
}
}
val methodName = shouldOrMust + "Not"
c.macroApplication match {
case Apply(
Select(
Apply(
_,
List(
Literal(Constant(code))
)
),
methodNameTermName
),
_
) if methodNameTermName.decoded == methodName =>
// LHS is a normal string literal, call checkNotCompile with the extracted code string to generate code
val codeStr = code.toString
checkNotCompile(codeStr)
case Apply(
Select(
Apply(
_,
List(
Select(
Apply(
Select(
_,
augmentStringTermName
),
List(
Literal(
Constant(code)
)
)
),
stripMarginTermName
)
)
),
methodNameTermName
),
_
) if augmentStringTermName.decoded == "augmentString" && stripMarginTermName.decoded == "stripMargin" && methodNameTermName.decoded == methodName =>
// LHS is a """xxx""".stripMargin string literal, call checkNotCompile with the extracted code string to generate code
val codeStr = code.toString.stripMargin
checkNotCompile(codeStr)
case _ => c.abort(c.enclosingPosition, "The '" + shouldOrMust + "Not compile' syntax only works with String literals.")
}
}
// used by shouldNot compile syntax, delegate to notCompileImpl to generate code
def shouldNotCompileImpl(c: Context)(compileWord: c.Expr[CompileWord]): c.Expr[Unit] =
notCompileImpl(c)(compileWord)("should")
// used by mustNot compile syntax, delegate to notCompileImpl to generate code
def mustNotCompileImpl(c: Context)(compileWord: c.Expr[CompileWord]): c.Expr[Unit] =
notCompileImpl(c)(compileWord)("must")
// check that a code snippet does not compile
def notTypeCheckImpl(c: Context)(typeCheckWord: c.Expr[TypeCheckWord])(shouldOrMust: String): c.Expr[Unit] = {
import c.universe._
// parse and type check a code snippet, generate code to throw TestFailedException if parse error or both parse and type check succeeded
def checkNotTypeCheck(code: String): c.Expr[Unit] = {
try {
c.typeCheck(c.parse("{ " + code + " }")) // parse and type check code snippet
// both parse and type check succeeded unexpectedly, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedTypeErrorButGotNone", code))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
} catch {
case e: TypecheckException =>
reify {
// type check error as expected, generate code to do nothing
}
case e: ParseException =>
// expect type check error but got parse error, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedTypeErrorButGotParseError", e.getMessage, code))
reify {
throw new TestFailedException(messageExpr.splice, 0)
}
}
}
val methodName = shouldOrMust + "Not"
c.macroApplication match {
case Apply(
Select(
Apply(
_,
List(
Literal(Constant(code))
)
),
methodNameTermName
),
_
) if methodNameTermName.decoded == methodName =>
// LHS is a normal string literal, call checkNotTypeCheck with the extracted code string to generate code
val codeStr = code.toString
checkNotTypeCheck(codeStr)
case Apply(
Select(
Apply(
_,
List(
Select(
Apply(
Select(
_,
augmentStringTermName
),
List(
Literal(
Constant(code)
)
)
),
stripMarginTermName
)
)
),
methodNameTermName
),
_
) if augmentStringTermName.decoded == "augmentString" && stripMarginTermName.decoded == "stripMargin" && methodNameTermName.decoded == methodName =>
// LHS is a """xxx""".stripMargin string literal, call checkNotTypeCheck with the extracted code string to generate code
val codeStr = code.toString.stripMargin
checkNotTypeCheck(codeStr)
case _ => c.abort(c.enclosingPosition, "The '" + shouldOrMust + "Not typeCheck' syntax only works with String literals.")
}
}
// used by shouldNot typeCheck syntax, delegate to notTypeCheckImpl to generate code
def shouldNotTypeCheckImpl(c: Context)(typeCheckWord: c.Expr[TypeCheckWord]): c.Expr[Unit] =
notTypeCheckImpl(c)(typeCheckWord)("should")
// used by mustNot typeCheck syntax, delegate to notTypeCheckImpl to generate code
def mustNotTypeCheckImpl(c: Context)(typeCheckWord: c.Expr[TypeCheckWord]): c.Expr[Unit] =
notTypeCheckImpl(c)(typeCheckWord)("must")
// check that a code snippet compiles
def compileImpl(c: Context)(compileWord: c.Expr[CompileWord])(shouldOrMust: String): c.Expr[Unit] = {
import c.universe._
// parse and type check a code snippet, generate code to throw TestFailedException if either parse error or type check error
def checkCompile(code: String): c.Expr[Unit] = {
try {
c.typeCheck(c.parse("{ " + code + " }")) // parse and type check code snippet
// both parse and type check succeeded, compile succeeded expectedly, generate code to do nothing
reify {
// Do nothing
}
} catch {
case e: TypecheckException =>
// type check error, compile fails unexpectedly, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedNoErrorButGotTypeError", e.getMessage, code))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
case e: ParseException =>
// parse error, compile failes unexpectedly, generate code to throw TestFailedException
val messageExpr = c.literal(Resources("expectedNoErrorButGotParseError", e.getMessage, code))
reify {
throw new exceptions.TestFailedException(messageExpr.splice, 0)
}
}
}
c.macroApplication match {
case Apply(
Select(
Apply(
_,
List(
Literal(
Constant(code)
)
)
),
shouldOrMustTermName
),
_
) if shouldOrMustTermName.decoded == shouldOrMust =>
// LHS is a normal string literal, call checkCompile with the extracted code string to generate code
val codeStr = code.toString
checkCompile(codeStr)
case Apply(
Select(
Apply(
_,
List(
Select(
Apply(
Select(
_,
augmentStringTermName
),
List(
Literal(
Constant(code)
)
)
),
stripMarginTermName
)
)
),
shouldOrMustTermName
),
_
) if augmentStringTermName.decoded == "augmentString" && stripMarginTermName.decoded == "stripMargin" && shouldOrMustTermName.decoded == shouldOrMust =>
// LHS is a """xxx""".stripMargin string literal, call checkCompile with the extracted code string to generate code
val codeStr = code.toString.stripMargin
checkCompile(codeStr)
case _ => c.abort(c.enclosingPosition, "The '" + shouldOrMust + " compile' syntax only works with String literals.")
}
}
// used by should compile syntax, delegate to compileImpl to generate code
def shouldCompileImpl(c: Context)(compileWord: c.Expr[CompileWord]): c.Expr[Unit] =
compileImpl(c)(compileWord)("should")
// used by must compile syntax, delegate to compileImpl to generate code
def mustCompileImpl(c: Context)(compileWord: c.Expr[CompileWord]): c.Expr[Unit] =
compileImpl(c)(compileWord)("must")
}
| travisbrown/scalatest | src/main/scala/org/scalatest/CompileMacro.scala | Scala | apache-2.0 | 15,375 |
package zoey
import scala.annotation.tailrec
import scala.concurrent.{ Await, ExecutionContext, Future, Promise }
import scala.concurrent.duration.{ Duration, FiniteDuration }
import org.apache.zookeeper.{ ZooKeeper, Watcher, WatchedEvent }
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicReference
/** A Connector that holds a reference to a zookeeper connection */
case class NativeConnector(
connectString: String,
connectTimeout: Option[FiniteDuration],
sessionTimeout: FiniteDuration,
authInfo: Option[AuthInfo])
(implicit ec: ExecutionContext)
extends Connector {
/** A `cell` containing a reference to a Connection if one was resolved */
@volatile private[this] var connection:
Option[NativeConnector.Connection] = None
protected [this] def connect() =
new NativeConnector.Connection(
connectString,
connectTimeout,
sessionTimeout,
listeners.get(),
authInfo)
// register a session event listener for this Connector
onSessionEvent {
case StateEvent.Expired =>
Await.result(close(), Duration.Inf)
}
/** lazily resolves a cached zookeeper connection */
def apply(): Future[ZooKeeper] =
connection.getOrElse {
val c = connect()
connection = Some(c)
c
}.apply().recoverWith {
case e: NativeConnector.ConnectTimeoutException =>
close() flatMap { _ => Future.failed(e) }
case e =>
Future.failed(e)
}
def close(): Future[Unit] =
connection match {
case None =>
Connector.Closed
case Some(ref) =>
connection = None
ref.close()
}
}
object NativeConnector {
case class ConnectTimeoutException(
connectString: String, timeout: FiniteDuration)
extends TimeoutException(s"timeout connecting to $connectString after $timeout")
case object ClosedException
extends RuntimeException("This connection was already closed")
protected class Connection(
connectString: String,
connectTimeout: Option[FiniteDuration],
sessionTimeout: FiniteDuration,
sessionListeners: List[Connector.EventHandler],
authInfo: Option[AuthInfo])
(implicit val ec: ExecutionContext) {
@volatile protected[this] var zookeeper: Option[ZooKeeper] = None
override def toString =
s"${getClass.getName}(${zookeeper.getOrElse("(disconnected)")})"
/** defer some behavior until afer we receive a session state event
* http://zookeeper.apache.org/doc/trunk/zookeeperProgrammers.html#ch_zkSessions */
private class ConnectionWatch(
andThen: (StateEvent, ZooKeeper) => Unit) extends Watcher {
private [this] val ref = new AtomicReference[ZooKeeper]
def process(e: WatchedEvent) {
@tailrec
def await(zk: ZooKeeper): ZooKeeper =
if (zk == null) await(ref.get()) else zk
val zk = await(ref.get())
StateEvent(e) match {
case e @ StateEvent.Connected =>
andThen(e, zk)
case _ =>
// we capture session expired events in session listener
// the underlying client handles disconnects
}
}
def set(zk: ZooKeeper) =
if (!ref.compareAndSet(null, zk)) sys.error(
"ref already set!")
}
protected[this] val connectPromise = Promise[ZooKeeper]()
protected[this] val closePromise = Promise[Unit]()
/** if connectTimeout is defined, a secondary future will be scheduled
* to fail at this time. If this failure happens before the connection
* is promise is satisfied, the future returned with be that of the
* failure
*/
lazy val connected: Future[ZooKeeper] = connectTimeout.map {
undelay.Complete(connectPromise.future).within(
_, ConnectTimeoutException(connectString, _))
}.getOrElse(connectPromise.future)
lazy val closed: Future[Unit] = closePromise.future
def apply(): Future[ZooKeeper] =
if (closed.isCompleted) Future.failed(ClosedException) else {
zookeeper = zookeeper orElse Some(mkZooKeeper)
connected
}
def close(): Future[Unit] = Future {
zookeeper.foreach { zk =>
zk.close()
zookeeper = None
closePromise.success(())
}
}
protected[this] def mkZooKeeper: ZooKeeper = {
val onConnect = new ConnectionWatch({
case (ev, client) =>
sessionListeners.foreach {
_.lift(ev)
}
authInfo.foreach { info =>
client.addAuthInfo(info.scheme, info.data)
}
connectPromise.success(client)
})
val zk = new ZooKeeper(
connectString, sessionTimeout.toMillis.toInt, onConnect)
onConnect.set(zk)
zk
}
}
}
| softprops/zoey | zoey-core/src/main/scala/NativeConnector.scala | Scala | mit | 4,761 |
package org.example.scalacommonslang
object App {
def main(args: Array[String]) {
print("Hello org.example.scala-commons-lang!")
}
}
| rshindo/scala-commons-lang | src/main/scala/App.scala | Scala | apache-2.0 | 149 |
package uk.gov.digital.ho.proving.financialstatus.api.validation
import java.time.LocalDate
import org.springframework.http.HttpStatus
import uk.gov.digital.ho.proving.financialstatus.domain._
trait ThresholdParameterValidator {
val serviceMessages: ServiceMessages
protected def validateInputs(studentType: StudentType,
inLondon: Option[Boolean],
tuitionFees: Option[BigDecimal],
tuitionFeesPaid: Option[BigDecimal],
accommodationFeesPaid: Option[BigDecimal],
dependants: Option[Int],
courseStartDate: Option[LocalDate],
courseEndDate: Option[LocalDate],
originalCourseStartDate: Option[LocalDate],
courseType: CourseType,
dependantsOnly: Option[Boolean]
): Either[Seq[(String, String, HttpStatus)], ValidatedInputs] = {
var errorList = Vector.empty[(String, String, HttpStatus)]
val (validCourseStartDate, validCourseEndDate, validOriginalCourseStartDate) = validateDates(courseStartDate, courseEndDate, originalCourseStartDate)
val isContinuation = validOriginalCourseStartDate && originalCourseStartDate.isDefined
val validDependants = validateDependants(dependants)
val validTuitionFees = validateTuitionFees(tuitionFees)
val validTuitionFeesPaid = validateTuitionFeesPaid(tuitionFeesPaid)
val validAccommodationFeesPaid = validateAccommodationFeesPaid(accommodationFeesPaid)
val validInLondon = validateInnerLondon(inLondon)
val validDependantsOnly = validateDependantsOnly(dependantsOnly)
studentType match {
case GeneralStudent =>
if (courseStartDate.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_START_DATE, HttpStatus.BAD_REQUEST))
} else if (courseEndDate.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_END_DATE, HttpStatus.BAD_REQUEST))
} else if (!validCourseStartDate) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_START_DATE_VALUE, HttpStatus.BAD_REQUEST))
} else if (!validCourseEndDate) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_END_DATE_VALUE, HttpStatus.BAD_REQUEST))
} else if (!validOriginalCourseStartDate) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_ORIGINAL_COURSE_START_DATE_VALUE, HttpStatus.BAD_REQUEST))
} else if (validTuitionFees.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_TUITION_FEES, HttpStatus.BAD_REQUEST))
} else if (validTuitionFeesPaid.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_TUITION_FEES_PAID, HttpStatus.BAD_REQUEST))
} else if (validDependants.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_DEPENDANTS, HttpStatus.BAD_REQUEST))
}
case StudentUnionSabbaticalOfficerStudent | PostGraduateDoctorDentistStudent =>
if (courseStartDate.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_START_DATE, HttpStatus.BAD_REQUEST))
} else if (courseEndDate.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_END_DATE, HttpStatus.BAD_REQUEST))
} else if (!validCourseStartDate) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_START_DATE_VALUE, HttpStatus.BAD_REQUEST))
} else if (!validCourseEndDate) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_COURSE_END_DATE_VALUE, HttpStatus.BAD_REQUEST))
} else if (!validOriginalCourseStartDate) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_ORIGINAL_COURSE_START_DATE_VALUE, HttpStatus.BAD_REQUEST))
} else if (validDependants.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_DEPENDANTS, HttpStatus.BAD_REQUEST))
}
case DoctorateExtensionStudent =>
case UnknownStudent(unknownStudentType) => errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_STUDENT_TYPE(unknownStudentType), HttpStatus.BAD_REQUEST))
}
if (validAccommodationFeesPaid.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_ACCOMMODATION_FEES_PAID, HttpStatus.BAD_REQUEST))
} else if (validDependants.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_DEPENDANTS, HttpStatus.BAD_REQUEST))
} else if (validInLondon.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_IN_LONDON, HttpStatus.BAD_REQUEST))
} else if (validDependantsOnly.isEmpty) {
errorList = errorList :+ ((serviceMessages.REST_INVALID_PARAMETER_VALUE, serviceMessages.INVALID_IN_DEPENDANTS_ONLY, HttpStatus.BAD_REQUEST))
}
if (errorList.isEmpty) Right(ValidatedInputs(validDependants, validTuitionFees, validTuitionFeesPaid,
validAccommodationFeesPaid, validInLondon, courseStartDate, courseEndDate, originalCourseStartDate, isContinuation, courseType == PreSessionalCourse, validDependantsOnly))
else Left(errorList)
}
private def validateDependants(dependants: Option[Int]) = dependants.filter(_ >= 0)
private def validateTuitionFees(tuitionFees: Option[BigDecimal]) = tuitionFees.filter(_ >= 0)
private def validateTuitionFeesPaid(tuitionFeesPaid: Option[BigDecimal]) = tuitionFeesPaid.filter(_ >= 0)
private def validateAccommodationFeesPaid(accommodationFeesPaid: Option[BigDecimal]) = accommodationFeesPaid.filter(_ >= 0)
private def validateInnerLondon(inLondon: Option[Boolean]) = inLondon
private def validateDependantsOnly(dependantsOnly: Option[Boolean]) = dependantsOnly
private def validateDates(courseStartDate: Option[LocalDate], courseEndDate: Option[LocalDate], originalCourseStartDate: Option[LocalDate]): (Boolean, Boolean, Boolean) = {
val validation = for {
startDate <- courseStartDate
endDate <- courseEndDate
} yield {
val (startOK, endOK) = startDate.isBefore(endDate) match {
case true => (true, true)
case false => (false, false)
}
val originalStartOk = originalCourseStartDate match {
case None => true
case Some(date) => date.isBefore(startDate) && date.isBefore(endDate)
}
(startOK, endOK, originalStartOk)
}
validation.getOrElse((false, false, false))
}
case class ValidatedInputs(dependants: Option[Int], tuitionFees: Option[BigDecimal],
tuitionFeesPaid: Option[BigDecimal], accommodationFeesPaid: Option[BigDecimal],
inLondon: Option[Boolean],
courseStartDate: Option[LocalDate],
courseEndDate: Option[LocalDate],
originalCourseStartDate: Option[LocalDate],
isContinuation: Boolean,
isPreSessional: Boolean,
dependantsOnly: Option[Boolean])
}
| UKHomeOffice/pttg-fs-api | src/main/scala/uk/gov/digital/ho/proving/financialstatus/api/validation/ThresholdParameterValidator.scala | Scala | mit | 7,948 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.Activity
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.File
import com.intel.analytics.bigdl.utils.caffe.CaffeLoader
import com.intel.analytics.bigdl.utils.tf.{TensorflowDataFormat, TensorflowLoader}
import scala.reflect.ClassTag
object Module {
def load[T: ClassTag](path : String) : AbstractModule[Activity, Activity, T] = {
File.load[AbstractModule[Activity, Activity, T]](path)
}
def loadTorch[T: ClassTag](path : String) : AbstractModule[Activity, Activity, T] = {
File.loadTorch[AbstractModule[Activity, Activity, T]](path)
}
@deprecated
def loadCaffe[T: ClassTag](model: AbstractModule[Activity, Activity, T],
defPath: String, modelPath: String, matchAll: Boolean = true)(
implicit ev: TensorNumeric[T]): AbstractModule[Activity, Activity, T] = {
CaffeLoader.load[T](model, defPath, modelPath, matchAll)
}
def loadCaffeDynamic[T: ClassTag](defPath: String, modelPath: String, matchAll: Boolean = true)(
implicit ev: TensorNumeric[T]): AbstractModule[Activity, Activity, T] = {
CaffeLoader.loadCaffe[T](defPath, modelPath, matchAll)._1
}
/**
* Load tensorflow model from its saved protobuf file.
* @param file where is the protobuf model file
* @param inputs input node names
* @param outputs output node names, the output tensor order is same with the node order
* @param byteOrder byte order in the tensorflow file. The default value is little endian
* @return BigDL model
*/
def loadTF[T: ClassTag](file: String, inputs: Seq[String], outputs: Seq[String],
byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN)(
implicit ev: TensorNumeric[T]): Module[T] = {
TensorflowLoader.load(file, inputs, outputs, byteOrder)
}
def flatten[@specialized(Float, Double) T: ClassTag](parameters: Array[Tensor[T]])(
implicit ev: TensorNumeric[T]): Tensor[T] = {
val compactedTensor = isCompact(parameters)
if (compactedTensor != null) {
return compactedTensor
}
var i = 0
var length = 0
while (i < parameters.length) {
require(parameters(i).isContiguous())
length += parameters(i).nElement()
i += 1
}
val result = Tensor[T](length)
val resultStorage = result.storage()
i = 0
var offset = 0
while (i < parameters.length) {
System.arraycopy(parameters(i).storage().array(), parameters(i).storageOffset() - 1,
resultStorage.array(), offset, parameters(i).nElement())
parameters(i).set(resultStorage, offset + 1, parameters(i).size(), parameters(i).stride())
offset += parameters(i).nElement()
i += 1
}
result
}
def isCompact[@specialized(Float, Double) T: ClassTag](paramters: Array[Tensor[T]])(
implicit ev: TensorNumeric[T]): Tensor[T] = {
require(paramters.length > 0)
var i = 1
val storage = paramters(0).storage()
var length = paramters(0).nElement()
while (i < paramters.length) {
if (!storage.eq(paramters(i).storage())) {
return null
}
length += paramters(i).nElement()
i += 1
}
if (length != storage.array().length) {
return null
}
return Tensor(storage)
}
}
| 122689305/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/Module.scala | Scala | apache-2.0 | 4,087 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.generators
import akka.actor.Props
import akka.util.ByteString
import com.wegtam.tensei.agent.generators.BaseGenerator.BaseGeneratorMessages.{
GeneratorResponse,
PrepareToGenerate,
ReadyToGenerate,
StartGenerator
}
import scala.annotation.tailrec
import scala.collection.mutable
object DrupalVanCodeGenerator {
val name = "DrupalVanCodeGenerator"
def props: Props = Props(classOf[DrupalVanCodeGenerator])
}
/**
* A generator which creates Drupals vancode out of commentID, articleID and parent commentID.
* Example:
* commentID = 1, articleID = 1, parent = 0 -> vancode = 01/
* commentID = 2, articleID = 1, parent = 1 -> vancode = 01.00/
* commentID = 3, articleID = 2, parent = 0 -> vancode = 01/
*
* The generator accepts the following parameters:
* - article (required) - The article ID of the related article
* - commentid (required) - The origin comment ID.
* - parent - The comment ID of the parent. Default value is 0.
*/
class DrupalVanCodeGenerator extends BaseGenerator {
val ids = mutable.HashMap[Long, (Long, Long, Long)]() //id -> (article,parent,vancodeid)
override def receive: Receive = {
case PrepareToGenerate =>
context become generate
sender() ! ReadyToGenerate
}
override def generate: Receive = {
case msg: StartGenerator => //Nachricht vom Transformator
log.debug("Generating new vancode")
val params = msg.data
val article = if (params.exists(p => p.asInstanceOf[(String, String)]._1 == "article")) {
params
.find(p => p.asInstanceOf[(String, String)]._1 == "article")
.get
.asInstanceOf[(String, String)]
._2
.toLong
} else {
log.error("Missing field name in {}", params.mkString(", "))
throw new NoSuchElementException("Vancode transformer couldn't find the article field.")
}
val id = if (params.exists(p => p.asInstanceOf[(String, String)]._1 == "commentid")) {
params
.find(p => p.asInstanceOf[(String, String)]._1 == "commentid")
.get
.asInstanceOf[(String, String)]
._2
.toLong
} else {
log.error("Missing field name in {}", params.mkString(", "))
throw new NoSuchElementException("Vancode transformer couldn't find the commentid field.")
}
val parent = if (params.exists(p => p.asInstanceOf[(String, String)]._1 == "parent")) {
params
.find(p => p.asInstanceOf[(String, String)]._1 == "parent")
.get
.asInstanceOf[(String, String)]
._2
.toLong
} else {
0
}
val max = if (ids.nonEmpty) {
ids
.map(
e =>
if (e._2._1 == article && e._2._2 == parent) {
e._2._3
} else if (parent != 0) {
-1L
} else {
0L
}
)
.max
} else {
0
}
val newid = max + 1
ids.put(id, (article, parent, newid))
val vancode = ByteString(s"${getparentcode(parent)("")}${toBase36(newid.toInt)}/")
sender() ! GeneratorResponse(List(vancode))
}
/**
* look for the vancodes of all parents and creates the new vancode
*
* @param parent the comment ID of the parent comment
* @return the new vancode
*/
@tailrec
private def getparentcode(parent: Long)(acc: String): String =
if (parent == 0)
acc
else {
val (_, parentId, vancodeId) = ids(parent)
getparentcode(parentId)(s"$acc${toBase36(vancodeId.toInt)}.")
}
/**
* Converts a base 10 number to a base 36 number
*
* @param number a base 10 number
* @return a base 36 number
*/
private def toBase36(number: Int): String = {
val b36 = Integer.toString(number, 36)
s"${b36.length - 1}$b36"
}
}
| Tensei-Data/tensei-agent | src/main/scala/com/wegtam/tensei/agent/generators/DrupalVanCodeGenerator.scala | Scala | agpl-3.0 | 4,687 |
package mesosphere.marathon
package integration
import java.io.File
import java.net.URL
import java.nio.file.Files
import akka.actor.{ActorSystem, Scheduler}
import akka.http.scaladsl.client.RequestBuilding.Get
import akka.stream.Materializer
import mesosphere.marathon.core.pod.{HostNetwork, MesosContainer, PodDefinition}
import mesosphere.marathon.integration.facades.{AppMockFacade, ITEnrichedTask}
import mesosphere.marathon.integration.setup._
import mesosphere.marathon.io.IO
import mesosphere.marathon.state.{PathId, PersistentVolume, PersistentVolumeInfo, VolumeMount}
import mesosphere.marathon.util.ZookeeperServerTest
import mesosphere.{AkkaIntegrationTest, WhenEnvSet}
import org.apache.commons.io.FileUtils
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.{HavePropertyMatchResult, HavePropertyMatcher}
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.sys.process.Process
/**
* This integration test starts older Marathon versions one after another and finishes this upgrade procedure with the
* current build. In each step we verfiy that all apps are still up and running.
*/
class UpgradeIntegrationTest extends AkkaIntegrationTest with MesosClusterTest with ZookeeperServerTest with MarathonAppFixtures with Eventually {
import PathId._
val zkURLBase = s"zk://${zkServer.connectUri}/marathon-$suiteName"
val marathon149Artifact = MarathonArtifact("1.4.9", "marathon-1.4.9.tgz")
val marathon156Artifact = MarathonArtifact("1.5.6", "marathon-1.5.6.tgz")
val marathon16549Artifact = MarathonArtifact("1.6.549", "marathon-1.6.549-aabf74302.tgz")
// Configure Mesos to provide the Mesos containerizer with Docker image support.
override lazy val mesosConfig = MesosConfig(
launcher = "linux",
isolation = Some("filesystem/linux,docker/runtime"),
imageProviders = Some("docker"))
override def beforeAll(): Unit = {
super.beforeAll()
// Download Marathon releases
marathon149Artifact.downloadAndExtract()
marathon156Artifact.downloadAndExtract()
marathon16549Artifact.downloadAndExtract()
marathon149Artifact.marathonPackage.deleteOnExit()
marathon156Artifact.marathonPackage.deleteOnExit()
marathon16549Artifact.marathonPackage.deleteOnExit()
}
case class MarathonArtifact(marathonVersion: String, tarballName: String) {
val marathonPackage: File = Files.createTempDirectory(s"marathon-$marathonVersion").toFile
val downloadURL: URL = new URL(s"https://downloads.mesosphere.com/marathon/releases/$marathonVersion/$tarballName")
def downloadAndExtract() = {
val tarball = new File(marathonPackage, tarballName)
logger.info(s"Downloading $tarballName to ${tarball.getCanonicalPath}")
FileUtils.copyURLToFile(downloadURL, tarball)
IO.extractTGZip(tarball, marathonPackage)
}
}
case class Marathon149(marathonPackage: File, suiteName: String, masterUrl: String, zkUrl: String)(
implicit
val system: ActorSystem, val mat: Materializer, val ctx: ExecutionContext, val scheduler: Scheduler) extends BaseMarathon {
override val processBuilder = {
val java = sys.props.get("java.home").fold("java")(_ + "/bin/java")
val jar = new File(marathonPackage, "marathon-1.4.9/target/scala-2.11/marathon-assembly-1.4.9.jar").getCanonicalPath
val cmd = Seq(java, "-Xmx1024m", "-Xms256m", "-XX:+UseConcMarkSweepGC", "-XX:ConcGCThreads=2") ++ akkaJvmArgs ++
Seq(s"-DmarathonUUID=$uuid -DtestSuite=$suiteName", "-client", "-jar", jar) ++ args
Process(cmd, workDir, sys.env.toSeq: _*)
}
}
case class Marathon156(marathonPackage: File, suiteName: String, masterUrl: String, zkUrl: String)(
implicit
val system: ActorSystem, val mat: Materializer, val ctx: ExecutionContext, val scheduler: Scheduler) extends BaseMarathon {
override val processBuilder = {
val bin = new File(marathonPackage, "marathon-1.5.6/bin/marathon").getCanonicalPath
val cmd = Seq("bash", bin, "-J-Xmx1024m", "-J-Xms256m", "-J-XX:+UseConcMarkSweepGC", "-J-XX:ConcGCThreads=2") ++ akkaJvmArgs ++
Seq(s"-DmarathonUUID=$uuid -DtestSuite=$suiteName") ++ args
Process(cmd, workDir, sys.env.toSeq: _*)
}
}
case class Marathon16549(marathonPackage: File, suiteName: String, masterUrl: String, zkUrl: String)(
implicit
val system: ActorSystem, val mat: Materializer, val ctx: ExecutionContext, val scheduler: Scheduler) extends BaseMarathon {
override val processBuilder = {
val bin = new File(marathonPackage, "marathon-1.6.549-aabf74302/bin/marathon").getCanonicalPath
val cmd = Seq("bash", bin, "-J-Xmx1024m", "-J-Xms256m", "-J-XX:+UseConcMarkSweepGC", "-J-XX:ConcGCThreads=2") ++ akkaJvmArgs ++
Seq(s"-DmarathonUUID=$uuid -DtestSuite=$suiteName") ++ args
Process(cmd, workDir, sys.env.toSeq: _*)
}
}
"Ephemeral and persistent apps and pods" should {
"survive an upgrade cycle" taggedAs WhenEnvSet(envVarRunMesosTests, default = "true") in {
val zkUrl = s"$zkURLBase-upgrade-cycle"
// Start apps in 1.4.9
Given("A Marathon 1.4.9 is running")
val marathon149 = Marathon149(marathon149Artifact.marathonPackage, suiteName = s"$suiteName-1-4-9", mesosMasterUrl, zkUrl)
marathon149.start().futureValue
(marathon149.client.info.entityJson \ "version").as[String] should be("1.4.9")
And("new running apps in Marathon 1.4.9")
val app_149_fail = appProxy(testBasePath / "app-149-fail", "v1", instances = 1, healthCheck = None)
marathon149.client.createAppV2(app_149_fail) should be(Created)
val app_149 = appProxy(testBasePath / "app-149", "v1", instances = 1, healthCheck = None)
marathon149.client.createAppV2(app_149) should be(Created)
patienceConfig
eventually { marathon149 should have (runningTasksFor(app_149.id.toPath, 1)) }
eventually { marathon149 should have (runningTasksFor(app_149_fail.id.toPath, 1)) }
val originalApp149Tasks = marathon149.client.tasks(app_149.id.toPath).value
val originalApp149FailedTasks = marathon149.client.tasks(app_149_fail.id.toPath).value
When("Marathon 1.4.9 is shut down")
marathon149.stop().futureValue
And(s"App ${app_149_fail.id} fails")
AppMockFacade.suicideAll(originalApp149FailedTasks)
// Pass upgrade to 1.5.6
And("Marathon is upgraded to 1.5.6")
val marathon156 = Marathon156(marathon156Artifact.marathonPackage, s"$suiteName-1-5-6", mesosMasterUrl, zkUrl)
marathon156.start().futureValue
(marathon156.client.info.entityJson \ "version").as[String] should be("1.5.6")
And("new apps in Marathon 1.5.6 are added")
val app_156 = appProxy(testBasePath / "app-156", "v1", instances = 1, healthCheck = None)
marathon156.client.createAppV2(app_156) should be(Created)
val app_156_fail = appProxy(testBasePath / "app-156-fail", "v1", instances = 1, healthCheck = None)
marathon156.client.createAppV2(app_156_fail) should be(Created)
Then("All apps from 1.5.6 are running")
eventually { marathon156 should have (runningTasksFor(app_156.id.toPath, 1)) }
eventually { marathon156 should have (runningTasksFor(app_156_fail.id.toPath, 1)) }
val originalApp156Tasks = marathon156.client.tasks(app_156.id.toPath).value
val originalApp156FailedTasks = marathon156.client.tasks(app_156_fail.id.toPath).value
And("All apps from 1.4.9 are still running")
marathon156.client.tasks(app_149.id.toPath).value should contain theSameElementsAs (originalApp149Tasks)
When("Marathon 1.5.6 is shut down")
marathon156.stop().futureValue
And(s"App ${app_156_fail.id} fails")
AppMockFacade.suicideAll(originalApp156FailedTasks)
// Pass upgrade to 1.6.549
And("Marathon is upgraded to 1.6.549")
val marathon16549 = Marathon16549(marathon16549Artifact.marathonPackage, s"$suiteName-1-6-549", mesosMasterUrl, zkUrl)
marathon16549.start().futureValue
(marathon16549.client.info.entityJson \ "version").as[String] should be("1.6.549")
And("new pods in Marathon 1.6.549 are added")
val resident_pod_16549 = PodDefinition(
id = testBasePath / "resident-pod-16549",
containers = Seq(
MesosContainer(
name = "task1",
exec = Some(raml.MesosExec(raml.ShellCommand("cd $MESOS_SANDBOX && echo 'start' >> pst1/foo && python -m SimpleHTTPServer $ENDPOINT_TASK1"))),
resources = raml.Resources(cpus = 0.1, mem = 32.0),
endpoints = Seq(raml.Endpoint(name = "task1", hostPort = Some(0))),
volumeMounts = Seq(VolumeMount(Some("pst"), "pst1", true))
)
),
volumes = Seq(PersistentVolume(name = Some("pst"), persistent = PersistentVolumeInfo(size = 10L))),
networks = Seq(HostNetwork),
instances = 1,
unreachableStrategy = state.UnreachableDisabled,
upgradeStrategy = state.UpgradeStrategy(0.0, 0.0)
)
marathon16549.client.createPodV2(resident_pod_16549) should be(Created)
val (resident_pod_16549_port, resident_pod_16549_address) = eventually {
val status = marathon16549.client.status(resident_pod_16549.id)
status should be(Stable)
status.value.instances(0).containers(0).endpoints(0).allocatedHostPort should be('defined)
val port = status.value.instances(0).containers(0).endpoints(0).allocatedHostPort.get
(port, status.value.instances(0).networks(0).addresses(0))
}
Then(s"pod ${resident_pod_16549.id} can be queried on http://$resident_pod_16549_address:$resident_pod_16549_port")
implicit val requestTimeout = 30.seconds
eventually { AkkaHttpResponse.request(Get(s"http://$resident_pod_16549_address:$resident_pod_16549_port/pst1/foo")).futureValue.entityString should be("start\n") }
Then("All apps from 1.4.9 and 1.5.6 are still running")
marathon16549.client.tasks(app_149.id.toPath).value should contain theSameElementsAs (originalApp149Tasks)
marathon16549.client.tasks(app_156.id.toPath).value should contain theSameElementsAs (originalApp156Tasks)
// Pass upgrade to current
When("Marathon is upgraded to the current version")
marathon16549.stop().futureValue
val marathonCurrent = LocalMarathon(suiteName = s"$suiteName-current", masterUrl = mesosMasterUrl, zkUrl = zkUrl)
marathonCurrent.start().futureValue
(marathonCurrent.client.info.entityJson \ "version").as[String] should be(BuildInfo.version.toString)
Then("All apps from 1.4.9 and 1.5.6 are still running")
marathonCurrent.client.tasks(app_149.id.toPath).value should contain theSameElementsAs (originalApp149Tasks)
marathonCurrent.client.tasks(app_156.id.toPath).value should contain theSameElementsAs (originalApp156Tasks)
And("All apps from 1.4.9 and 1.5.6 are recovered and running again")
eventually { marathonCurrent should have(runningTasksFor(app_149_fail.id.toPath, 1)) }
marathonCurrent.client.tasks(app_149_fail.id.toPath).value should not contain theSameElementsAs(originalApp149FailedTasks)
eventually { marathonCurrent should have(runningTasksFor(app_156_fail.id.toPath, 1)) }
marathonCurrent.client.tasks(app_156_fail.id.toPath).value should not contain theSameElementsAs(originalApp156FailedTasks)
And("All pods from 1.6.549 are still running")
eventually { marathonCurrent.client.status(resident_pod_16549.id) should be(Stable) }
eventually { AkkaHttpResponse.request(Get(s"http://$resident_pod_16549_address:$resident_pod_16549_port/pst1/foo")).futureValue.entityString should be("start\n") }
marathonCurrent.close()
}
}
"upgrade from 1.6.549 to the latest" in {
val zkUrl = s"$zkURLBase-to-latest"
val marathon16549 = Marathon16549(marathon16549Artifact.marathonPackage, suiteName = s"$suiteName-1-6-549", mesosMasterUrl, zkUrl)
// Start apps in 1.6.549
Given("A Marathon 1.6.549 is running")
marathon16549.start().futureValue
(marathon16549.client.info.entityJson \ "version").as[String] should be("1.6.549")
And("new running apps in Marathon 1.6.549")
val app_16549_fail = appProxy(testBasePath / "app-16549-fail", "v1", instances = 1, healthCheck = None)
marathon16549.client.createAppV2(app_16549_fail) should be(Created)
val app_16549 = appProxy(testBasePath / "app-16549", "v1", instances = 1, healthCheck = None)
marathon16549.client.createAppV2(app_16549) should be(Created)
patienceConfig
eventually { marathon16549 should have (runningTasksFor(app_16549.id.toPath, 1)) }
eventually { marathon16549 should have (runningTasksFor(app_16549_fail.id.toPath, 1)) }
val originalApp16549Tasks = marathon16549.client.tasks(app_16549.id.toPath).value
val originalApp16549FailedTasks = marathon16549.client.tasks(app_16549_fail.id.toPath).value
When("Marathon 1.6.549 is shut down")
marathon16549.stop().futureValue
AppMockFacade.suicideAll(originalApp16549FailedTasks)
// Pass upgrade to current
When("Marathon is upgraded to the current version")
val marathonCurrent = LocalMarathon(suiteName = s"$suiteName-current", masterUrl = mesosMasterUrl, zkUrl = zkUrl)
marathonCurrent.start().futureValue
(marathonCurrent.client.info.entityJson \ "version").as[String] should be(BuildInfo.version.toString)
Then("All apps from 1.6.549 are still running")
marathonCurrent.client.tasks(app_16549.id.toPath).value should contain theSameElementsAs (originalApp16549Tasks)
And("All apps from 1.6.549 are recovered and running again")
eventually { marathonCurrent should have(runningTasksFor(app_16549_fail.id.toPath, 1)) }
marathonCurrent.close()
}
"resident app can be restarted after upgrade from 1.6.549" in {
val zkUrl = s"$zkURLBase-resident-apps"
val marathon16549 = Marathon16549(marathon16549Artifact.marathonPackage, suiteName = s"$suiteName-1-6-549", mesosMasterUrl, zkUrl)
// Start apps in 1.6.549
Given("A Marathon 1.6.549 is running")
marathon16549.start().futureValue
(marathon16549.client.info.entityJson \ "version").as[String] should be("1.6.549")
And("new running apps in Marathon 1.6.549")
val containerPath = "persistent-volume"
val residentApp_16549 = residentApp(
id = testBasePath / "resident-app-16549",
containerPath = containerPath,
cmd = s"""echo "data" >> $containerPath/data && sleep 1000""")
marathon16549.client.createAppV2(residentApp_16549) should be(Created)
patienceConfig
eventually { marathon16549 should have (runningTasksFor(residentApp_16549.id.toPath, 1)) }
val originalApp16549Tasks = marathon16549.client.tasks(residentApp_16549.id.toPath).value
When("We restart the app")
marathon16549.client.restartApp(residentApp_16549.id.toPath) should be(OK)
Then("We have new running tasks")
eventually {
marathon16549.client.tasks(residentApp_16549.id.toPath).value should not contain theSameElementsAs(originalApp16549Tasks)
marathon16549 should have (runningTasksFor(residentApp_16549.id.toPath, 1))
}
// Pass upgrade to current
When("Marathon is upgraded to the current version")
marathon16549.stop().futureValue
val marathonCurrent = LocalMarathon(suiteName = s"$suiteName-current", masterUrl = mesosMasterUrl, zkUrl = zkUrl)
marathonCurrent.start().futureValue
(marathonCurrent.client.info.entityJson \ "version").as[String] should be(BuildInfo.version.toString)
Then("All apps from 1.6.549 are still running")
marathonCurrent should have (runningTasksFor(residentApp_16549.id.toPath, 1))
val restartedApp16549Tasks = marathonCurrent.client.tasks(residentApp_16549.id.toPath).value
When("We restart the app again")
marathonCurrent.client.restartApp(residentApp_16549.id.toPath) should be(OK)
Then("We have new running tasks")
eventually {
marathonCurrent.client.tasks(residentApp_16549.id.toPath).value should not contain theSameElementsAs(restartedApp16549Tasks)
marathonCurrent should have (runningTasksFor(residentApp_16549.id.toPath, 1))
}
marathonCurrent.close()
}
/**
* Scala [[HavePropertyMatcher]] that checks that numberOfTasks are in running state for app appId on given Marathon.
*
* Do not use the class directly but [[UpgradeIntegrationTest.runningTasksFor]]:
*
* {{{
* marathon149 should have(runningTasksFor(app_149.id.toPath, 2))
* }}}
*
* @param appId The app the is checked for running tasks.
* @param numberOfTasks The number of tasks that should be running.
*/
class RunningTasksMatcher(appId: PathId, numberOfTasks: Int) extends HavePropertyMatcher[BaseMarathon, List[ITEnrichedTask]] {
def apply(marathon: BaseMarathon): HavePropertyMatchResult[List[ITEnrichedTask]] = {
val tasks = marathon.client.tasks(appId).value
val notRunningTasks = tasks.filter(_.state != "TASK_RUNNING")
val matches = tasks.size == numberOfTasks && notRunningTasks.size == 0
HavePropertyMatchResult(matches, "runningTasks", List.empty, notRunningTasks)
}
}
def runningTasksFor(appId: PathId, numberOfTasks: Int) = new RunningTasksMatcher(appId, numberOfTasks)
override val testBasePath = PathId("/")
override val healthCheckPort: Int = 0
}
| gsantovena/marathon | tests/integration/src/test/scala/mesosphere/marathon/integration/UpgradeIntegrationTest.scala | Scala | apache-2.0 | 17,451 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.monitoring.metrics
import java.net.ServerSocket
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import kamon.prometheus.PrometheusReporter
import org.apache.openwhisk.core.monitoring.metrics.OpenWhiskEvents.MetricConfig
import pureconfig.loadConfigOrThrow
trait EventsTestHelper {
protected def createConsumer(kport: Int,
globalConfig: Config,
recorder: MetricRecorder = PrometheusRecorder(new PrometheusReporter))(
implicit system: ActorSystem,
materializer: ActorMaterializer) = {
val settings = OpenWhiskEvents
.eventConsumerSettings(OpenWhiskEvents.defaultConsumerConfig(globalConfig))
.withBootstrapServers(s"localhost:$kport")
val metricConfig = loadConfigOrThrow[MetricConfig](globalConfig, "user-events")
EventConsumer(settings, Seq(recorder), metricConfig)
}
protected def freePort(): Int = {
val socket = new ServerSocket(0)
try socket.getLocalPort
finally if (socket != null) socket.close()
}
}
| openwhisk/openwhisk | core/monitoring/user-events/src/test/scala/org/apache/openwhisk/core/monitoring/metrics/EventsTestHelper.scala | Scala | apache-2.0 | 1,915 |
/*
* Copyright (c) 2014 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package context
import scala.collection.JavaConversions._
import com.sun.jdi.ClassObjectReference
import com.sun.jdi.ClassType
import com.sun.jdi.ObjectReference
import javax.xml.bind.DatatypeConverter
/**
* Part of `JdiContext` responsible for loading classes on debugged jvm.
*/
trait JdiClassLoader {
self: JdiContext =>
/**
* Load class on debugged jvm.
* Needed for JDI to work sometimes.
*/
final def loadClass(name: String): Unit = {
val classObj = classByName("java.lang.Class")
val byName = methodOn(classObj, "forName", arity = 1)
val classMirror = jvm.mirrorOf(name)
classObj.invokeMethod(currentThread, byName, List(classMirror))
}
/**
* Load given class (bytes) on debugged JVM.
* Class is sent as string encoded with base64.
*
* @param name name of loaded class
* @param code bytes of class to load
*/
final def loadClass(name: String, code: Array[Byte]): Unit = {
// both vals create method java.lang.Class defineClass(code: byte[], off: Int, len: Int)
val methodSignature = "([BII)Ljava/lang/Class;"
val methodName = "defineClass"
// obtain class loader from this class for top stackframe
val classLoaderRef = currentFrame.thisObject.referenceType.classLoader
val defineClassMethod = classLoaderRef.referenceType.methodsByName(methodName, methodSignature).head
// encode with base64
val localByteString = DatatypeConverter.printBase64Binary(code)
// send to JDI
val remoteByteStrings = jvm.mirrorOf(localByteString)
val dateTypeConverterClazzReference = classByName("javax.xml.bind.DatatypeConverter")
val parseMetod = methodOn(dateTypeConverterClazzReference, "parseBase64Binary", arity = 1)
// encoded
val remoteByteArray = dateTypeConverterClazzReference.invokeMethod(currentThread, parseMetod, List(remoteByteStrings))
// load the class
val args = List(remoteByteArray, jvm.mirrorOf(0), jvm.mirrorOf(code.length))
classLoaderRef.invokeMethod(currentThread, defineClassMethod, args).asInstanceOf[ClassObjectReference]
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/context/JdiClassLoader.scala | Scala | bsd-3-clause | 2,191 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.apache.spark.sql.jts
import org.locationtech.jts.geom.Geometry
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.types._
import org.locationtech.geomesa.spark.jts.util.WKBUtils
import scala.reflect._
/**
* Base class for all JTS UDTs, which get encoded in Catalyst as WKB blobs.
* @param simpleString short name, like "point"
* @tparam T Concrete JTS type represented by this UDT
*/
abstract class AbstractGeometryUDT[T >: Null <: Geometry: ClassTag](override val simpleString: String)
extends UserDefinedType[T] {
override def pyUDT: String = s"geomesa_pyspark.types.${getClass.getSimpleName}"
override def serialize(obj: T): InternalRow = {
new GenericInternalRow(Array[Any](WKBUtils.write(obj)))
}
override def sqlType: DataType = StructType(Seq(
StructField("wkb", DataTypes.BinaryType)
))
override def userClass: Class[T] = classTag[T].runtimeClass.asInstanceOf[Class[T]]
override def deserialize(datum: Any): T = {
val ir = datum.asInstanceOf[InternalRow]
WKBUtils.read(ir.getBinary(0)).asInstanceOf[T]
}
}
| locationtech/geomesa | geomesa-spark/geomesa-spark-jts/src/main/scala/org/apache/spark/sql/jts/AbstractGeometryUDT.scala | Scala | apache-2.0 | 1,631 |
package com.example
import akka.actor.{Actor,ActorLogging,PoisonPill,Props}
import spray.routing._
import spray.http._
import MediaTypes._
import spray.can.Http
import scala.concurrent.duration._
import akka.util.Timeout
import spray.routing.RequestContext
import spray.http.StatusCodes
// we don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class RestInterface extends Actor with RestApi with ActorLogging {
// the HttpService trait defines only one abstract member, which
// connects the services environment to the enclosing actor or test
def actorRefFactory = context
def receive = runRoute(routes)
}
// this trait defines our service behavior independently from the service actor
trait RestApi extends HttpService {
import akka.pattern.ask
import akka.pattern.pipe
import PingPongProtocol._
// we use the enclosing ActorContext's or ActorSystem's dispatcher for our Futures and Scheduler
implicit def executionContext = actorRefFactory.dispatcher
val pingPong = actorRefFactory.actorOf(Props[PingPong])
implicit val timeout = Timeout(10.seconds)
val routes =
path("") {
get {
// XML is marshalled to `text/xml` by default, so we simply override here
respondWithMediaType(`text/html`) {
complete(indexContent())
}
}
} ~
path("ping") {
get { requestContext =>
val responder = createPingPongResponder(requestContext)
pingPong.ask(PongRequest).pipeTo(responder)
}
} ~
path("pingfuture") {
get { requestContext =>
val responder = createPingPongResponder(requestContext)
pingPong.ask(PongFutureRequest).pipeTo(responder)
}
} ~
path("pang") {
get {
complete("PANG!")
}
}
def indexContent() = {
<html>
<body>
<h1>Say hello to <i>spray-routing</i> on <i>spray-can</i>!</h1>
</body>
</html>
}
def createPingPongResponder(requestContext:RequestContext) = {
actorRefFactory.actorOf(Props(new PingPongResponder(requestContext)))
}
}
class PingPongResponder(requestContext:RequestContext) extends Actor with ActorLogging {
import PingPongProtocol._
import spray.httpx.SprayJsonSupport._
def receive = {
case PongResponse(message) =>
requestContext.complete(StatusCodes.OK, message)
self ! PoisonPill
case _ =>
log.error("received unknown PingPong message")
requestContext.complete(StatusCodes.InternalServerError)
self ! PoisonPill
}
}
| wtfleming/spray-template | src/main/scala/com/example/RestInterface.scala | Scala | mit | 2,614 |
package monocle.std
import monocle.{Iso, PIso, PPrism, Prism}
import scalaz.syntax.std.option._
import scalaz.{-\\/, Maybe, \\/-}
object maybe extends MaybeOptics
trait MaybeOptics {
final def pMaybeToOption[A, B]: PIso[Maybe[A], Maybe[B], Option[A], Option[B]] =
PIso((_: Maybe[A]).toOption)((_: Option[B]).toMaybe)
final def maybeToOption[A]: Iso[Maybe[A], Option[A]] =
pMaybeToOption[A, A]
final def pJust[A, B]: PPrism[Maybe[A], Maybe[B], A, B] =
PPrism[Maybe[A], Maybe[B], A, B](_.cata(\\/-(_), -\\/(Maybe.empty)))(Maybe.just[B])
final def just[A]: Prism[Maybe[A], A] =
pJust[A, A]
final def nothing[A]: Prism[Maybe[A], Unit] =
Prism[Maybe[A], Unit](m => if(m.isEmpty) Some(()) else None)(_ => Maybe.empty)
} | rperry/Monocle | core/shared/src/main/scala/monocle/std/Maybe.scala | Scala | mit | 747 |
package com.twitter.finagle.builder
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.ChannelClosedException
import com.twitter.finagle.Service
import com.twitter.finagle.client.utils.StringClient
import com.twitter.finagle.server.utils.StringServer
import com.twitter.util.{Await, Future}
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.funsuite.AnyFunSuite
class ServerChannelConfigurationTest extends AnyFunSuite {
val identityService = Service.mk[String, String] { req => Future.value(req) }
test("close connection after max life time duration") {
val lifeTime = 100.millis
val address = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
val server = StringServer
.Server()
.withSession.maxLifeTime(lifeTime)
.withLabel("FinagleServer")
.serve(address, identityService)
val client: Service[String, String] = ClientBuilder()
.stack(StringClient.Client(appendDelimiter = false))
.daemon(true) // don't create an exit guard
.hosts(server.boundAddress.asInstanceOf[InetSocketAddress])
.hostConnectionLimit(1)
.build()
// Issue a request which is NOT newline-delimited. Server should close connection
// after waiting `lifeTime` for a new line
intercept[ChannelClosedException] { Await.result(client("123"), lifeTime * 3) }
server.close()
}
test("close connection after max idle time duration") {
val idleTime = 100.millis
val address = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
val server = StringServer
.Server()
.withSession.maxIdleTime(idleTime)
.withLabel("FinagleServer")
.serve(address, identityService)
val client: Service[String, String] = ClientBuilder()
.stack(StringClient.Client(appendDelimiter = false))
.daemon(true) // don't create an exit guard
.hosts(server.boundAddress.asInstanceOf[InetSocketAddress])
.hostConnectionLimit(1)
.build()
// Issue a request which is NOT newline-delimited. Server should close connection
// after waiting `idleTime` for a new line
intercept[ChannelClosedException] { Await.result(client("123"), idleTime * 3) }
server.close()
}
}
| twitter/finagle | finagle-core/src/test/scala/com/twitter/finagle/builder/ServerChannelConfigurationTest.scala | Scala | apache-2.0 | 2,235 |
package beam.calibration.utils
import com.sigopt.exception.APIConnectionError
object SigOptApiToken {
def getClientAPIToken: String =
Option {
System.getenv("SIGOPT_API_TOKEN")
}.getOrElse(
throw new APIConnectionError(
"Correct developer client token must be present in environment as SIGOPT_DEV_API Token"
)
)
}
| colinsheppard/beam | src/main/scala/beam/calibration/utils/SigOptApiToken.scala | Scala | gpl-3.0 | 358 |
case class Foo protected(a: Int)
case class Bar private(a: Int) | som-snytt/dotty | tests/pos/i5827.scala | Scala | apache-2.0 | 64 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.params.TypeParamClause
/**
* @author Alexander Podkhalyuzin
* Date: 06.02.2008
*/
/*
* TraitDef ::= id [TypeParamClause] TraitTemplateOpt
*/
object TraitDef extends TraitDef {
override protected def templateOpt = TraitTemplateOpt
override protected def typeParamClause = TypeParamClause
}
trait TraitDef {
protected def templateOpt: TemplateOpt
protected def typeParamClause: TypeParamClause
def parse(builder: ScalaPsiBuilder): Boolean = builder.getTokenType match {
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer() //Ate identifier
typeParamClause.parse(builder)
templateOpt.parse(builder)
true
case _ =>
builder.error(ErrMsg("identifier.expected"))
false
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/top/TraitDef.scala | Scala | apache-2.0 | 1,030 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io.IOException
import java.util.{List => JList}
import javax.security.auth.login.LoginException
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.shims.Utils
import org.apache.hadoop.security.{SecurityUtil, UserGroupInformation}
import org.apache.hive.service.{AbstractService, CompositeService, Service}
import org.apache.hive.service.Service.STATE
import org.apache.hive.service.auth.HiveAuthFactory
import org.apache.hive.service.cli._
import org.apache.hive.service.server.HiveServer2
import org.slf4j.Logger
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
private[hive] class SparkSQLCLIService(hiveServer: HiveServer2, sqlContext: SQLContext)
extends CLIService(hiveServer)
with ReflectedCompositeService {
override def init(hiveConf: HiveConf): Unit = {
setSuperField(this, "hiveConf", hiveConf)
val sparkSqlSessionManager = new SparkSQLSessionManager(hiveServer, sqlContext)
setSuperField(this, "sessionManager", sparkSqlSessionManager)
addService(sparkSqlSessionManager)
var sparkServiceUGI: UserGroupInformation = null
var httpUGI: UserGroupInformation = null
if (UserGroupInformation.isSecurityEnabled) {
try {
val principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL)
val keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB)
if (principal.isEmpty || keyTabFile.isEmpty) {
throw QueryExecutionErrors.invalidKerberosConfigForHiveServer2Error()
}
val originalUgi = UserGroupInformation.getCurrentUser
sparkServiceUGI = if (HiveAuthFactory.needUgiLogin(originalUgi,
SecurityUtil.getServerPrincipal(principal, "0.0.0.0"), keyTabFile)) {
HiveAuthFactory.loginFromKeytab(hiveConf)
Utils.getUGI()
} else {
originalUgi
}
setSuperField(this, "serviceUGI", sparkServiceUGI)
} catch {
case e @ (_: IOException | _: LoginException) =>
throw HiveThriftServerErrors.cannotLoginToKerberosError(e)
}
// Try creating spnego UGI if it is configured.
val principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL).trim
val keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB).trim
if (principal.nonEmpty && keyTabFile.nonEmpty) {
try {
httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf)
setSuperField(this, "httpUGI", httpUGI)
} catch {
case e: IOException =>
throw HiveThriftServerErrors.cannotLoginToSpnegoError(principal, keyTabFile, e)
}
}
}
initCompositeService(hiveConf)
}
/**
* the super class [[CLIService#start]] starts a useless dummy metastore client, skip it and call
* the ancestor [[CompositeService#start]] directly.
*/
override def start(): Unit = startCompositeService()
override def getInfo(sessionHandle: SessionHandle, getInfoType: GetInfoType): GetInfoValue = {
getInfoType match {
case GetInfoType.CLI_SERVER_NAME => new GetInfoValue("Spark SQL")
case GetInfoType.CLI_DBMS_NAME => new GetInfoValue("Spark SQL")
case GetInfoType.CLI_DBMS_VER => new GetInfoValue(sqlContext.sparkContext.version)
case GetInfoType.CLI_ODBC_KEYWORDS => new GetInfoValue("Unimplemented")
case _ => super.getInfo(sessionHandle, getInfoType)
}
}
}
private[thriftserver] trait ReflectedCompositeService { this: AbstractService =>
private val logInfo = (msg: String) => getAncestorField[Logger](this, 3, "LOG").info(msg)
private val logError = (msg: String, e: Throwable) =>
getAncestorField[Logger](this, 3, "LOG").error(msg, e)
def initCompositeService(hiveConf: HiveConf): Unit = {
// Emulating `CompositeService.init(hiveConf)`
val serviceList = getAncestorField[JList[Service]](this, 2, "serviceList")
serviceList.asScala.foreach(_.init(hiveConf))
// Emulating `AbstractService.init(hiveConf)`
invoke(classOf[AbstractService], this, "ensureCurrentState", classOf[STATE] -> STATE.NOTINITED)
setAncestorField(this, 3, "hiveConf", hiveConf)
invoke(classOf[AbstractService], this, "changeState", classOf[STATE] -> STATE.INITED)
logInfo(s"Service: $getName is inited.")
}
def startCompositeService(): Unit = {
// Emulating `CompositeService.start`
val serviceList = getAncestorField[JList[Service]](this, 2, "serviceList")
var serviceStartCount = 0
try {
serviceList.asScala.foreach { service =>
service.start()
serviceStartCount += 1
}
// Emulating `AbstractService.start`
val startTime = java.lang.Long.valueOf(System.currentTimeMillis())
setAncestorField(this, 3, "startTime", startTime)
invoke(classOf[AbstractService], this, "ensureCurrentState", classOf[STATE] -> STATE.INITED)
invoke(classOf[AbstractService], this, "changeState", classOf[STATE] -> STATE.STARTED)
logInfo(s"Service: $getName is started.")
} catch {
case NonFatal(e) =>
logError(s"Error starting services $getName", e)
invoke(classOf[CompositeService], this, "stop",
classOf[Int] -> Integer.valueOf(serviceStartCount))
throw HiveThriftServerErrors.failedToStartServiceError(getName, e)
}
}
}
| ueshin/apache-spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala | Scala | apache-2.0 | 6,391 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.data.binary.BinaryRowData
import org.apache.flink.table.planner.JDouble
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.expressions.PlannerNamedWindowProperty
import org.apache.flink.table.planner.plan.nodes.calcite.{Expand, Rank, WindowAggregate}
import org.apache.flink.table.planner.plan.nodes.physical.batch.{BatchPhysicalGroupAggregateBase, BatchPhysicalLocalHashWindowAggregate, BatchPhysicalLocalSortWindowAggregate, BatchPhysicalWindowAggregateBase}
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, RankRange}
import org.apache.flink.table.runtime.operators.sort.BinaryIndexedSortable
import org.apache.flink.table.runtime.typeutils.BinaryRowDataSerializer.LENGTH_SIZE_IN_BYTES
import com.google.common.collect.ImmutableList
import org.apache.calcite.avatica.util.TimeUnitRange._
import org.apache.calcite.plan.RelOptUtil
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata.{RelMdUtil, RelMetadataQuery}
import org.apache.calcite.rel.{RelNode, SingleRel}
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.`type`.SqlTypeName.{TIME, TIMESTAMP}
import org.apache.calcite.util.{ImmutableBitSet, NumberUtil}
import java.math.BigDecimal
import java.util
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* FlinkRelMdUtil provides utility methods used by the metadata provider methods.
*/
object FlinkRelMdUtil {
/** Returns an estimate of the number of rows returned by a SEMI/ANTI [[Join]]. */
def getSemiAntiJoinRowCount(mq: RelMetadataQuery, left: RelNode, right: RelNode,
joinType: JoinRelType, condition: RexNode, isAnti: Boolean): JDouble = {
val leftCount = mq.getRowCount(left)
if (leftCount == null) {
return null
}
var selectivity = RexUtil.getSelectivity(condition)
if (isAnti) {
selectivity = 1d - selectivity
}
leftCount * selectivity
}
/**
* Creates a RexNode that stores a selectivity value corresponding to the
* selectivity of a semi-join/anti-join. This can be added to a filter to simulate the
* effect of the semi-join/anti-join during costing, but should never appear in a real
* plan since it has no physical implementation.
*
* @param mq instance of metadata query
* @param rel the SEMI/ANTI join of interest
* @return constructed rexNode
*/
def makeSemiAntiJoinSelectivityRexNode(mq: RelMetadataQuery, rel: Join): RexNode = {
require(rel.getJoinType == JoinRelType.SEMI || rel.getJoinType == JoinRelType.ANTI)
val joinInfo = rel.analyzeCondition()
val rexBuilder = rel.getCluster.getRexBuilder
makeSemiAntiJoinSelectivityRexNode(
mq, joinInfo, rel.getLeft, rel.getRight, rel.getJoinType == JoinRelType.ANTI, rexBuilder)
}
private def makeSemiAntiJoinSelectivityRexNode(
mq: RelMetadataQuery,
joinInfo: JoinInfo,
left: RelNode,
right: RelNode,
isAnti: Boolean,
rexBuilder: RexBuilder): RexNode = {
val equiSelectivity: JDouble = if (!joinInfo.leftKeys.isEmpty) {
RelMdUtil.computeSemiJoinSelectivity(mq, left, right, joinInfo.leftKeys, joinInfo.rightKeys)
} else {
1D
}
val nonEquiSelectivity = RelMdUtil.guessSelectivity(joinInfo.getRemaining(rexBuilder))
val semiJoinSelectivity = equiSelectivity * nonEquiSelectivity
val selectivity = if (isAnti) {
val antiJoinSelectivity = 1.0 - semiJoinSelectivity
if (antiJoinSelectivity == 0.0) {
// we don't expect that anti-join's selectivity is 0.0, so choose a default value 0.1
0.1
} else {
antiJoinSelectivity
}
} else {
semiJoinSelectivity
}
rexBuilder.makeCall(
RelMdUtil.ARTIFICIAL_SELECTIVITY_FUNC,
rexBuilder.makeApproxLiteral(new BigDecimal(selectivity)))
}
/**
* Estimates new distinctRowCount of currentNode after it applies a condition.
* The estimation based on one assumption:
* even distribution of all distinct data
*
* @param rowCount rowcount of node.
* @param distinctRowCount distinct rowcount of node.
* @param selectivity selectivity of condition expression.
* @return new distinctRowCount
*/
def adaptNdvBasedOnSelectivity(
rowCount: JDouble,
distinctRowCount: JDouble,
selectivity: JDouble): JDouble = {
val ndv = Math.min(distinctRowCount, rowCount)
Math.max((1 - Math.pow(1 - selectivity, rowCount / ndv)) * ndv, 1.0)
}
/**
* Estimates ratio outputRowCount/ inputRowCount of agg when ndv of groupKeys is unavailable.
*
* the value of `1.0 - math.exp(-0.1 * groupCount)` increases with groupCount
* from 0.095 until close to 1.0. when groupCount is 1, the formula result is 0.095,
* when groupCount is 2, the formula result is 0.18,
* when groupCount is 3, the formula result is 0.25.
* ...
*
* @param groupingLength grouping keys length of aggregate
* @return the ratio outputRowCount/ inputRowCount of agg when ndv of groupKeys is unavailable.
*/
def getAggregationRatioIfNdvUnavailable(groupingLength: Int): JDouble =
1.0 - math.exp(-0.1 * groupingLength)
/**
* Creates a RexNode that stores a selectivity value corresponding to the
* selectivity of a NamedProperties predicate.
*
* @param winAgg window aggregate node
* @param predicate a RexNode
* @return constructed rexNode including non-NamedProperties predicates and
* a predicate that stores NamedProperties predicate's selectivity
*/
def makeNamePropertiesSelectivityRexNode(
winAgg: WindowAggregate,
predicate: RexNode): RexNode = {
val fullGroupSet = AggregateUtil.checkAndGetFullGroupSet(winAgg)
makeNamePropertiesSelectivityRexNode(winAgg, fullGroupSet, winAgg.getNamedProperties, predicate)
}
/**
* Creates a RexNode that stores a selectivity value corresponding to the
* selectivity of a NamedProperties predicate.
*
* @param globalWinAgg global window aggregate node
* @param predicate a RexNode
* @return constructed rexNode including non-NamedProperties predicates and
* a predicate that stores NamedProperties predicate's selectivity
*/
def makeNamePropertiesSelectivityRexNode(
globalWinAgg: BatchPhysicalWindowAggregateBase,
predicate: RexNode): RexNode = {
require(globalWinAgg.isFinal, "local window agg does not contain NamedProperties!")
val fullGrouping = globalWinAgg.grouping ++ globalWinAgg.auxGrouping
makeNamePropertiesSelectivityRexNode(
globalWinAgg, fullGrouping, globalWinAgg.namedWindowProperties, predicate)
}
/**
* Creates a RexNode that stores a selectivity value corresponding to the
* selectivity of a NamedProperties predicate.
*
* @param winAgg window aggregate node
* @param fullGrouping full groupSets
* @param namedProperties NamedWindowProperty list
* @param predicate a RexNode
* @return constructed rexNode including non-NamedProperties predicates and
* a predicate that stores NamedProperties predicate's selectivity
*/
def makeNamePropertiesSelectivityRexNode(
winAgg: SingleRel,
fullGrouping: Array[Int],
namedProperties: Seq[PlannerNamedWindowProperty],
predicate: RexNode): RexNode = {
if (predicate == null || predicate.isAlwaysTrue || namedProperties.isEmpty) {
return predicate
}
val rexBuilder = winAgg.getCluster.getRexBuilder
val namePropertiesStartIdx = winAgg.getRowType.getFieldCount - namedProperties.size
// split non-nameProperties predicates and nameProperties predicates
val pushable = new util.ArrayList[RexNode]
val notPushable = new util.ArrayList[RexNode]
RelOptUtil.splitFilters(
ImmutableBitSet.range(0, namePropertiesStartIdx),
predicate,
pushable,
notPushable)
if (notPushable.nonEmpty) {
val pred = RexUtil.composeConjunction(rexBuilder, notPushable, true)
val selectivity = RelMdUtil.guessSelectivity(pred)
val fun = rexBuilder.makeCall(
RelMdUtil.ARTIFICIAL_SELECTIVITY_FUNC,
rexBuilder.makeApproxLiteral(new BigDecimal(selectivity)))
pushable.add(fun)
}
RexUtil.composeConjunction(rexBuilder, pushable, true)
}
/**
* Returns the number of distinct values provided numSelected are selected
* where there are domainSize distinct values.
*
* <p>Current implementation of RelMdUtil#numDistinctVals in Calcite 1.26
* has precision problem, so we treat small and large inputs differently
* here and handle large inputs with the old implementation of
* RelMdUtil#numDistinctVals in Calcite 1.22.
*
* <p>This method should be removed once CALCITE-4351 is fixed. See CALCITE-4351
* and FLINK-19780.
*/
def numDistinctVals(domainSize: Double, numSelected: Double): Double = {
val EPS = 1e-9
if (Math.abs(1 / domainSize) < EPS || domainSize < 1) {
// ln(1+x) ~= x for small x
val dSize = RelMdUtil.capInfinity(domainSize)
val numSel = RelMdUtil.capInfinity(numSelected)
val res = if (dSize > 0) (1.0 - Math.exp(-1 * numSel / dSize)) * dSize else 0
// fix the boundary cases
Math.max(0, Math.min(res, Math.min(dSize, numSel)))
} else {
RelMdUtil.numDistinctVals(domainSize, numSelected)
}
}
/**
* Estimates outputRowCount of local aggregate.
*
* output rowcount of local agg is (1 - pow((1 - 1/x) , n/m)) * m * x, based on two assumption:
* 1. even distribution of all distinct data
* 2. even distribution of all data in each concurrent local agg worker
*
* @param parallelism number of concurrent worker of local aggregate
* @param inputRowCount rowcount of input node of aggregate.
* @param globalAggRowCount rowcount of output of global aggregate.
* @return outputRowCount of local aggregate.
*/
def getRowCountOfLocalAgg(
parallelism: Int,
inputRowCount: JDouble,
globalAggRowCount: JDouble): JDouble =
Math.min((1 - math.pow(1 - 1.0 / parallelism, inputRowCount / globalAggRowCount))
* globalAggRowCount * parallelism, inputRowCount)
/**
* Takes a bitmap representing a set of input references and extracts the
* ones that reference the group by columns in an aggregate.
*
* @param groupKey the original bitmap
* @param aggRel the aggregate
*/
def setAggChildKeys(
groupKey: ImmutableBitSet,
aggRel: Aggregate): (ImmutableBitSet, Array[AggregateCall]) = {
val childKeyBuilder = ImmutableBitSet.builder
val aggCalls = new mutable.ArrayBuffer[AggregateCall]()
val groupSet = aggRel.getGroupSet.toArray
val (auxGroupSet, otherAggCalls) = AggregateUtil.checkAndSplitAggCalls(aggRel)
val fullGroupSet = groupSet ++ auxGroupSet
// does not need to take keys in aggregate call into consideration if groupKey contains all
// groupSet element in aggregate
val containsAllAggGroupKeys = fullGroupSet.indices.forall(groupKey.get)
groupKey.foreach(
bit =>
if (bit < fullGroupSet.length) {
childKeyBuilder.set(fullGroupSet(bit))
} else if (!containsAllAggGroupKeys) {
// getIndicatorCount return 0 if auxGroupSet is not empty
val agg = otherAggCalls.get(bit - (fullGroupSet.length + aggRel.getIndicatorCount))
aggCalls += agg
}
)
(childKeyBuilder.build(), aggCalls.toArray)
}
/**
* Takes a bitmap representing a set of input references and extracts the
* ones that reference the group by columns in an aggregate.
*
* @param groupKey the original bitmap
* @param aggRel the aggregate
*/
def setAggChildKeys(
groupKey: ImmutableBitSet,
aggRel: BatchPhysicalGroupAggregateBase): (ImmutableBitSet, Array[AggregateCall]) = {
require(!aggRel.isFinal || !aggRel.isMerge, "Cannot handle global agg which has local agg!")
setChildKeysOfAgg(groupKey, aggRel)
}
/**
* Takes a bitmap representing a set of input references and extracts the
* ones that reference the group by columns in an aggregate.
*
* @param groupKey the original bitmap
* @param aggRel the aggregate
*/
def setAggChildKeys(
groupKey: ImmutableBitSet,
aggRel: BatchPhysicalWindowAggregateBase): (ImmutableBitSet, Array[AggregateCall]) = {
require(!aggRel.isFinal || !aggRel.isMerge, "Cannot handle global agg which has local agg!")
setChildKeysOfAgg(groupKey, aggRel)
}
private def setChildKeysOfAgg(
groupKey: ImmutableBitSet,
agg: SingleRel): (ImmutableBitSet, Array[AggregateCall]) = {
val (aggCalls, fullGroupSet) = agg match {
case agg: BatchPhysicalLocalSortWindowAggregate =>
// grouping + assignTs + auxGrouping
(agg.getAggCallList, agg.grouping ++ Array(agg.inputTimeFieldIndex) ++ agg.auxGrouping)
case agg: BatchPhysicalLocalHashWindowAggregate =>
// grouping + assignTs + auxGrouping
(agg.getAggCallList, agg.grouping ++ Array(agg.inputTimeFieldIndex) ++ agg.auxGrouping)
case agg: BatchPhysicalWindowAggregateBase =>
(agg.getAggCallList, agg.grouping ++ agg.auxGrouping)
case agg: BatchPhysicalGroupAggregateBase =>
(agg.getAggCallList, agg.grouping ++ agg.auxGrouping)
case _ => throw new IllegalArgumentException(s"Unknown aggregate: ${agg.getRelTypeName}")
}
// does not need to take keys in aggregate call into consideration if groupKey contains all
// groupSet element in aggregate
val containsAllAggGroupKeys = fullGroupSet.indices.forall(groupKey.get)
val childKeyBuilder = ImmutableBitSet.builder
val aggs = new mutable.ArrayBuffer[AggregateCall]()
groupKey.foreach { bit =>
if (bit < fullGroupSet.length) {
childKeyBuilder.set(fullGroupSet(bit))
} else if (!containsAllAggGroupKeys) {
val agg = aggCalls.get(bit - fullGroupSet.length)
aggs += agg
}
}
(childKeyBuilder.build(), aggs.toArray)
}
/**
* Takes a bitmap representing a set of local window aggregate references.
*
* global win-agg output type: groupSet + auxGroupSet + aggCall + namedProperties
* local win-agg output type: groupSet + assignTs + auxGroupSet + aggCalls
*
* Skips `assignTs` when mapping `groupKey` to `childKey`.
*
* @param groupKey the original bitmap
* @param globalWinAgg the global window aggregate
*/
def setChildKeysOfWinAgg(
groupKey: ImmutableBitSet,
globalWinAgg: BatchPhysicalWindowAggregateBase): ImmutableBitSet = {
require(globalWinAgg.isMerge, "Cannot handle global agg which does not have local window agg!")
val childKeyBuilder = ImmutableBitSet.builder
groupKey.toArray.foreach { key =>
if (key < globalWinAgg.grouping.length) {
childKeyBuilder.set(key)
} else {
// skips `assignTs`
childKeyBuilder.set(key + 1)
}
}
childKeyBuilder.build()
}
/**
* Split groupKeys on Aggregate/ BatchPhysicalGroupAggregateBase/ BatchPhysicalWindowAggregateBase
* into keys on aggregate's groupKey and aggregate's aggregateCalls.
*
* @param agg the aggregate
* @param groupKey the original bitmap
*/
def splitGroupKeysOnAggregate(
agg: SingleRel,
groupKey: ImmutableBitSet): (ImmutableBitSet, Array[AggregateCall]) = {
def removeAuxKey(
groupKey: ImmutableBitSet,
groupSet: Array[Int],
auxGroupSet: Array[Int]): ImmutableBitSet = {
if (groupKey.contains(ImmutableBitSet.of(groupSet: _*))) {
// remove auxGroupSet from groupKey if groupKey contain both full-groupSet
// and (partial-)auxGroupSet
groupKey.except(ImmutableBitSet.of(auxGroupSet: _*))
} else {
groupKey
}
}
agg match {
case rel: Aggregate =>
val (auxGroupSet, _) = AggregateUtil.checkAndSplitAggCalls(rel)
val (childKeys, aggCalls) = setAggChildKeys(groupKey, rel)
val childKeyExcludeAuxKey = removeAuxKey(childKeys, rel.getGroupSet.toArray, auxGroupSet)
(childKeyExcludeAuxKey, aggCalls)
case rel: BatchPhysicalGroupAggregateBase =>
// set the bits as they correspond to the child input
val (childKeys, aggCalls) = setAggChildKeys(groupKey, rel)
val childKeyExcludeAuxKey = removeAuxKey(childKeys, rel.grouping, rel.auxGrouping)
(childKeyExcludeAuxKey, aggCalls)
case rel: BatchPhysicalWindowAggregateBase =>
val (childKeys, aggCalls) = setAggChildKeys(groupKey, rel)
val childKeyExcludeAuxKey = removeAuxKey(childKeys, rel.grouping, rel.auxGrouping)
(childKeyExcludeAuxKey, aggCalls)
case _ => throw new IllegalArgumentException(s"Unknown aggregate: ${agg.getRelTypeName}.")
}
}
/**
* Split a predicate on Aggregate into two parts, the first one is pushable part,
* the second one is rest part.
*
* @param agg Aggregate which to analyze
* @param predicate Predicate which to analyze
* @return a tuple, first element is pushable part, second element is rest part.
* Note, pushable condition will be converted based on the input field position.
*/
def splitPredicateOnAggregate(
agg: Aggregate,
predicate: RexNode): (Option[RexNode], Option[RexNode]) = {
val fullGroupSet = AggregateUtil.checkAndGetFullGroupSet(agg)
splitPredicateOnAgg(fullGroupSet, agg, predicate)
}
/**
* Split a predicate on BatchExecGroupAggregateBase into two parts,
* the first one is pushable part, the second one is rest part.
*
* @param agg Aggregate which to analyze
* @param predicate Predicate which to analyze
* @return a tuple, first element is pushable part, second element is rest part.
* Note, pushable condition will be converted based on the input field position.
*/
def splitPredicateOnAggregate(
agg: BatchPhysicalGroupAggregateBase,
predicate: RexNode): (Option[RexNode], Option[RexNode]) = {
splitPredicateOnAgg(agg.grouping ++ agg.auxGrouping, agg, predicate)
}
/**
* Split a predicate on WindowAggregateBatchExecBase into two parts,
* the first one is pushable part, the second one is rest part.
*
* @param agg Aggregate which to analyze
* @param predicate Predicate which to analyze
* @return a tuple, first element is pushable part, second element is rest part.
* Note, pushable condition will be converted based on the input field position.
*/
def splitPredicateOnAggregate(
agg: BatchPhysicalWindowAggregateBase,
predicate: RexNode): (Option[RexNode], Option[RexNode]) = {
splitPredicateOnAgg(agg.grouping ++ agg.auxGrouping, agg, predicate)
}
/**
* Shifts every [[RexInputRef]] in an expression higher than length of full grouping
* (for skips `assignTs`).
*
* global win-agg output type: groupSet + auxGroupSet + aggCall + namedProperties
* local win-agg output type: groupSet + assignTs + auxGroupSet + aggCalls
*
* @param predicate a RexNode
* @param globalWinAgg the global window aggregate
*/
def setChildPredicateOfWinAgg(
predicate: RexNode,
globalWinAgg: BatchPhysicalWindowAggregateBase): RexNode = {
require(globalWinAgg.isMerge, "Cannot handle global agg which does not have local window agg!")
if (predicate == null) {
return null
}
// grouping + assignTs + auxGrouping
val fullGrouping = globalWinAgg.grouping ++ globalWinAgg.auxGrouping
// skips `assignTs`
RexUtil.shift(predicate, fullGrouping.length, 1)
}
private def splitPredicateOnAgg(
grouping: Array[Int],
agg: SingleRel,
predicate: RexNode): (Option[RexNode], Option[RexNode]) = {
val notPushable = new util.ArrayList[RexNode]
val pushable = new util.ArrayList[RexNode]
val numOfGroupKey = grouping.length
RelOptUtil.splitFilters(
ImmutableBitSet.range(0, numOfGroupKey),
predicate,
pushable,
notPushable)
val rexBuilder = agg.getCluster.getRexBuilder
val childPred = if (pushable.isEmpty) {
None
} else {
// Converts a list of expressions that are based on the output fields of a
// Aggregate to equivalent expressions on the Aggregate's input fields.
val aggOutputFields = agg.getRowType.getFieldList
val aggInputFields = agg.getInput.getRowType.getFieldList
val adjustments = new Array[Int](aggOutputFields.size)
grouping.zipWithIndex foreach {
case (bit, index) => adjustments(index) = bit - index
}
val pushableConditions = pushable map {
pushCondition =>
pushCondition.accept(
new RelOptUtil.RexInputConverter(
rexBuilder,
aggOutputFields,
aggInputFields,
adjustments))
}
Option(RexUtil.composeConjunction(rexBuilder, pushableConditions, true))
}
val restPred = if (notPushable.isEmpty) {
None
} else {
Option(RexUtil.composeConjunction(rexBuilder, notPushable, true))
}
(childPred, restPred)
}
def binaryRowAverageSize(rel: RelNode): JDouble = {
val binaryType = FlinkTypeFactory.toLogicalRowType(rel.getRowType)
// TODO reuse FlinkRelMetadataQuery here
val mq = rel.getCluster.getMetadataQuery
val columnSizes = mq.getAverageColumnSizes(rel)
var length = 0d
columnSizes.zip(binaryType.getChildren).foreach {
case (columnSize, internalType) =>
if (BinaryRowData.isInFixedLengthPart(internalType)) {
length += 8
} else {
if (columnSize == null) {
// find a better way of computing generic type field variable-length
// right now we use a small value assumption
length += 16
} else {
// the 8 bytes is used store the length and offset of variable-length part.
length += columnSize + 8
}
}
}
length += BinaryRowData.calculateBitSetWidthInBytes(columnSizes.size())
length
}
def computeSortMemory(mq: RelMetadataQuery, inputOfSort: RelNode): JDouble = {
//TODO It's hard to make sure that the normalized key's length is accurate in optimized stage.
// use SortCodeGenerator.MAX_NORMALIZED_KEY_LEN instead of 16
val normalizedKeyBytes = 16
val rowCount = mq.getRowCount(inputOfSort)
val averageRowSize = binaryRowAverageSize(inputOfSort)
val recordAreaInBytes = rowCount * (averageRowSize + LENGTH_SIZE_IN_BYTES)
val indexAreaInBytes = rowCount * (normalizedKeyBytes + BinaryIndexedSortable.OFFSET_LEN)
recordAreaInBytes + indexAreaInBytes
}
def splitPredicateOnRank(
rank: Rank,
predicate: RexNode): (Option[RexNode], Option[RexNode]) = {
val rankFunColumnIndex = RankUtil.getRankNumberColumnIndex(rank).getOrElse(-1)
if (predicate == null || predicate.isAlwaysTrue || rankFunColumnIndex < 0) {
return (Some(predicate), None)
}
val rankNodes = new util.ArrayList[RexNode]
val nonRankNodes = new util.ArrayList[RexNode]
RelOptUtil.splitFilters(
ImmutableBitSet.range(0, rankFunColumnIndex),
predicate,
nonRankNodes,
rankNodes)
val rexBuilder = rank.getCluster.getRexBuilder
val nonRankPred = if (nonRankNodes.isEmpty) {
None
} else {
Option(RexUtil.composeConjunction(rexBuilder, nonRankNodes, true))
}
val rankPred = if (rankNodes.isEmpty) {
None
} else {
Option(RexUtil.composeConjunction(rexBuilder, rankNodes, true))
}
(nonRankPred, rankPred)
}
def getRankRangeNdv(rankRange: RankRange): JDouble = rankRange match {
case r: ConstantRankRange => (r.getRankEnd - r.getRankStart + 1).toDouble
case _ => 100D // default value now
}
/**
* Returns [[RexInputRef]] index set of projects corresponding to the given column index.
* The index will be set as -1 if the given column in project is not a [[RexInputRef]].
*/
def getInputRefIndices(index: Int, expand: Expand): util.Set[Int] = {
val inputRefs = new util.HashSet[Int]()
for (project <- expand.projects) {
project.get(index) match {
case inputRef: RexInputRef => inputRefs.add(inputRef.getIndex)
case _ => inputRefs.add(-1)
}
}
inputRefs
}
/** Splits a column set between left and right sets. */
def splitColumnsIntoLeftAndRight(
leftCount: Int,
columns: ImmutableBitSet): (ImmutableBitSet, ImmutableBitSet) = {
val leftBuilder = ImmutableBitSet.builder
val rightBuilder = ImmutableBitSet.builder
columns.foreach {
bit => if (bit < leftCount) leftBuilder.set(bit) else rightBuilder.set(bit - leftCount)
}
(leftBuilder.build, rightBuilder.build)
}
/**
* Computes the cardinality of a particular expression from the projection
* list.
*
* @param mq metadata query instance
* @param calc calc RelNode
* @param expr projection expression
* @return cardinality
*/
def cardOfCalcExpr(mq: RelMetadataQuery, calc: Calc, expr: RexNode): JDouble = {
expr.accept(new CardOfCalcExpr(mq, calc))
}
/**
* Visitor that walks over a scalar expression and computes the
* cardinality of its result.
* The code is borrowed from RelMdUtil
*
* @param mq metadata query instance
* @param calc calc relnode
*/
private class CardOfCalcExpr(
mq: RelMetadataQuery,
calc: Calc)
extends RexVisitorImpl[JDouble](true) {
private val program = calc.getProgram
private val condition = if (program.getCondition != null) {
program.expandLocalRef(program.getCondition)
} else {
null
}
override def visitInputRef(inputRef: RexInputRef): JDouble = {
val col = ImmutableBitSet.of(inputRef.getIndex)
val distinctRowCount = mq.getDistinctRowCount(calc.getInput, col, condition)
if (distinctRowCount == null) {
null
} else {
FlinkRelMdUtil.numDistinctVals(distinctRowCount, mq.getAverageRowSize(calc))
}
}
override def visitLiteral(literal: RexLiteral): JDouble = {
FlinkRelMdUtil.numDistinctVals(1D, mq.getAverageRowSize(calc))
}
override def visitCall(call: RexCall): JDouble = {
val rowCount = mq.getRowCount(calc)
val distinctRowCount: JDouble = if (call.isA(SqlKind.MINUS_PREFIX)) {
cardOfCalcExpr(mq, calc, call.getOperands.get(0))
} else if (call.isA(ImmutableList.of(SqlKind.PLUS, SqlKind.MINUS))) {
val card0 = cardOfCalcExpr(mq, calc, call.getOperands.get(0))
if (card0 == null) {
null
} else {
val card1 = cardOfCalcExpr(mq, calc, call.getOperands.get(1))
if (card1 == null) {
null
} else {
Math.max(card0, card1)
}
}
} else if (call.isA(ImmutableList.of(SqlKind.TIMES, SqlKind.DIVIDE))) {
NumberUtil.multiply(
cardOfCalcExpr(mq, calc, call.getOperands.get(0)),
cardOfCalcExpr(mq, calc, call.getOperands.get(1)))
} else if (call.isA(SqlKind.EXTRACT)) {
val extractUnit = call.getOperands.get(0)
val timeOperand = call.getOperands.get(1)
extractUnit match {
// go https://www.postgresql.org/docs/9.1/static/
// functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT to get the definitions of timeunits
case unit: RexLiteral =>
val unitValue = unit.getValue
val timeOperandType = timeOperand.getType.getSqlTypeName
// assume min time is 1970-01-01 00:00:00, max time is 2100-12-31 21:59:59
unitValue match {
case YEAR => 130D // [1970, 2100]
case MONTH => 12D
case DAY => 31D
case HOUR => 24D
case MINUTE => 60D
case SECOND => timeOperandType match {
case TIMESTAMP | TIME => 60 * 1000D // [0.000, 59.999]
case _ => 60D // [0, 59]
}
case QUARTER => 4D
case WEEK => 53D // [1, 53]
case MILLISECOND => timeOperandType match {
case TIMESTAMP | TIME => 60 * 1000D // [0.000, 59.999]
case _ => 60D // [0, 59]
}
case MICROSECOND => timeOperandType match {
case TIMESTAMP | TIME => 60 * 1000D * 1000D // [0.000, 59.999]
case _ => 60D // [0, 59]
}
case DOW => 7D // [0, 6]
case DOY => 366D // [1, 366]
case EPOCH => timeOperandType match {
// the number of seconds since 1970-01-01 00:00:00 UTC
case TIMESTAMP | TIME => 130 * 24 * 60 * 60 * 1000D
case _ => 130 * 24 * 60 * 60D
}
case DECADE => 13D // The year field divided by 10
case CENTURY => 2D
case MILLENNIUM => 2D
case _ => cardOfCalcExpr(mq, calc, timeOperand)
}
case _ => cardOfCalcExpr(mq, calc, timeOperand)
}
} else if (call.getOperands.size() == 1) {
cardOfCalcExpr(mq, calc, call.getOperands.get(0))
} else {
if (rowCount != null) rowCount / 10 else null
}
if (distinctRowCount == null) {
null
} else {
FlinkRelMdUtil.numDistinctVals(distinctRowCount, rowCount)
}
}
}
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/FlinkRelMdUtil.scala | Scala | apache-2.0 | 30,350 |
package mesosphere.marathon.health
import mesosphere.marathon.state.Timestamp
sealed trait HealthResult {
def taskId: String
def version: String
def time: Timestamp
}
case class Healthy(
taskId: String,
version: String,
time: Timestamp = Timestamp.now()) extends HealthResult
case class Unhealthy(
taskId: String,
version: String,
cause: String,
time: Timestamp = Timestamp.now()) extends HealthResult
| HardikDR/marathon | src/main/scala/mesosphere/marathon/health/HealthResult.scala | Scala | apache-2.0 | 426 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Random
class RankSpec extends FlatSpec with Matchers {
"Rank Float operation" should "works correctly" in {
val input =
Tensor[Float](T(1f, 2f, 2f))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank Boolean operation" should "works correctly" in {
val input =
Tensor[Boolean](T(true, true, false))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank Double operation" should "works correctly" in {
val input =
Tensor[Double](T(2.0, 3.0, 2.0))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank Char operation" should "works correctly" in {
val input =
Tensor[Char](T('b', 'c', 'a'))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank Long operation" should "works correctly" in {
val input =
Tensor[Long](T(2L, 3L, 2L))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank String operation" should "works correctly" in {
val input =
Tensor[String](T("aaa", "ccc", "aaa"))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank Short operation" should "works correctly" in {
val input =
Tensor[Short](T(2: Short, 3: Short, 2: Short))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
"Rank Int operation" should "works correctly" in {
val input =
Tensor[Int](T(2, 3, 2))
val expectOutput = Tensor.scalar(1)
val output = Rank[Float]().forward(input)
output should be(expectOutput)
}
}
class RankSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val rank = Rank[Float].setName("rank")
val input = Tensor[Float](3, 3).apply1(_ => Random.nextFloat())
runSerializationTest(rank, input, rank.
asInstanceOf[ModuleToOperation[Float]].module.getClass)
}
}
| wzhongyuan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/RankSpec.scala | Scala | apache-2.0 | 3,160 |
package org.jetbrains.plugins.scala
package lang
package psi
import com.intellij.codeInsight.hint.HintManager
import com.intellij.lang.ASTNode
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.impl.source.codeStyle.CodeEditUtil
import com.intellij.psi.scope._
import com.intellij.psi.stubs.StubElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.editor.importOptimizer.ScalaImportOptimizer
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportExprUsed, ImportSelectorUsed, ImportUsed, ImportWildcardSelectorUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportSelector, ScImportSelectors, ScImportStmt}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaFile, ScalaRecursiveElementVisitor}
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticPackage
import org.jetbrains.plugins.scala.lang.psi.impl.{ScPackageImpl, ScalaPsiElementFactory}
import org.jetbrains.plugins.scala.lang.psi.types.ScDesignatorType
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.lang.resolve.processor.{CompletionProcessor, ResolveProcessor}
import org.jetbrains.plugins.scala.lang.resolve.{ScalaResolveResult, StdKinds}
import scala.annotation.tailrec
import scala.collection.immutable.HashSet
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
trait ScImportsHolder extends ScalaPsiElement {
def getImportStatements: Seq[ScImportStmt] = {
this match {
case s: ScalaStubBasedElementImpl[_] =>
val stub: StubElement[_] = s.getStub
if (stub != null) {
return stub.getChildrenByType(ScalaElementTypes.IMPORT_STMT, JavaArrayFactoryUtil.ScImportStmtFactory).toSeq
}
case _ =>
}
findChildrenByClassScala(classOf[ScImportStmt]).toSeq
}
override def processDeclarations(processor: PsiScopeProcessor,
state : ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (lastParent != null) {
var run = ScalaPsiUtil.getPrevStubOrPsiElement(lastParent)
// updateResolveCaches()
while (run != null) {
ProgressManager.checkCanceled()
if (run.isInstanceOf[ScImportStmt] &&
!run.processDeclarations(processor, state, lastParent, place)) return false
run = ScalaPsiUtil.getPrevStubOrPsiElement(run)
}
}
true
}
def getImportsForLastParent(lastParent: PsiElement): Seq[ScImportStmt] = {
val buffer: ArrayBuffer[ScImportStmt] = new ArrayBuffer[ScImportStmt]()
if (lastParent != null) {
var run = ScalaPsiUtil.getPrevStubOrPsiElement(lastParent)
while (run != null) {
ProgressManager.checkCanceled()
run match {
case importStmt: ScImportStmt => buffer += importStmt
case _ =>
}
run = ScalaPsiUtil.getPrevStubOrPsiElement(run)
}
}
buffer.toSeq
}
def getAllImportUsed: mutable.Set[ImportUsed] = {
val res: mutable.Set[ImportUsed] = new mutable.HashSet[ImportUsed]
def processChild(element: PsiElement) {
for (child <- element.getChildren) {
child match {
case imp: ScImportExpr =>
if (/*!imp.singleWildcard && */imp.selectorSet.isEmpty) {
res += ImportExprUsed(imp)
}
else if (imp.singleWildcard) {
res += ImportWildcardSelectorUsed(imp)
}
for (selector <- imp.selectors) {
res += ImportSelectorUsed(selector)
}
case _ => processChild(child)
}
}
}
processChild(this)
res
}
def importStatementsInHeader: Seq[ScImportStmt] = {
val buf = new ArrayBuffer[ScImportStmt]
for (child <- getChildren) {
child match {
case x: ScImportStmt => buf += x
case p: ScPackaging if !p.isExplicit && buf.isEmpty => return p.importStatementsInHeader
case _: ScTypeDefinition | _: ScPackaging => return buf.toSeq
case _ =>
}
}
buf.toSeq
}
//Utility method to find first import statement, but only in element header
private def findFirstImportStmt(ref: PsiElement): Option[PsiElement] = {
def checkReference(imp: PsiElement): Boolean = {
var prev: PsiElement = imp.getPrevSibling
var par = ref
while (par != null && par.getParent != this) par = par.getParent
while (prev != null && prev != par) prev = prev.getPrevSibling
prev == null
}
findChild(classOf[ScImportStmt]) match {
case Some(x) =>
if (checkReference(x)) Some(x)
else None
case None => None
}
}
def addImportForClass(clazz: PsiClass, ref: PsiElement = null) {
ref match {
case ref: ScReferenceElement =>
if (!ref.isValid || ref.isReferenceTo(clazz)) return
ref.bind() match {
case Some(ScalaResolveResult(t: ScTypeAliasDefinition, subst)) if t.typeParameters.isEmpty =>
for (tp <- t.aliasedType(TypingContext.empty)) {
tp match {
case ScDesignatorType(c: PsiClass) if c == clazz => return
case _ =>
}
}
case _ =>
}
case _ =>
}
addImportForPath(clazz.qualifiedName, ref)
}
def addImportForPsiNamedElement(elem: PsiNamedElement, ref: PsiElement, cClass: Option[PsiClass] = None) {
def needImport = ref match {
case null => true
case ref: ScReferenceElement => ref.isValid && !ref.isReferenceTo(elem)
case _ => false
}
ScalaNamesUtil.qualifiedName(elem) match {
case Some(qual) if needImport => addImportForPath(qual)
case _ =>
}
}
//todo: Code now looks overcomplicated and logic is separated from ScalaImportOptimizer, rewrite?
def addImportForPath(path: String, ref: PsiElement = null, explicitly: Boolean = false) {
val selectors = new ArrayBuffer[String]
val renamedSelectors = new ArrayBuffer[String]()
val qualifiedName = path
val index = qualifiedName.lastIndexOf('.')
if (index == -1) return //cannot import anything
var classPackageQualifier = qualifiedName.substring(0, index)
val pathQualifier = classPackageQualifier
//collecting selectors to add into new import statement
var firstPossibleGoodPlace: Option[ScImportExpr] = None
val toDelete: ArrayBuffer[ScImportExpr] = new ArrayBuffer[ScImportExpr]()
for (imp <- importStatementsInHeader if !explicitly) {
for (expr: ScImportExpr <- imp.importExprs) {
val qualifier = expr.qualifier
if (qualifier != null) { //in case "import scala" it can be null
val qn = qualifier.resolve() match {
case named: PsiNamedElement => ScalaNamesUtil.qualifiedName(named).getOrElse("")
case _ => ""
}
if (qn == classPackageQualifier) {
expr.getLastChild match {
case s: ScImportSelectors =>
for (selector <- s.selectors) {
if (selector.importedName != selector.reference.refName) {
renamedSelectors += selector.getText
} else {
selectors += selector.getText
}
}
if (s.hasWildcard) selectors += "_"
case _ => selectors ++= expr.getNames
}
firstPossibleGoodPlace match {
case Some(_) =>
toDelete += expr
case _ =>
firstPossibleGoodPlace = Some(expr)
}
}
}
}
}
def getSplitQualifierElement(s: String) = {
val index = s.lastIndexOf('.')
if (index == -1) ("", s)
else (s.substring(0, index), s.substring(index + 1))
}
val settings: ScalaCodeStyleSettings = ScalaCodeStyleSettings.getInstance(getProject)
if (!settings.isCollectImports &&
selectors.length < settings.getClassCountToUseImportOnDemand - 1) {
toDelete.clear()
firstPossibleGoodPlace = None
selectors.clear()
}
//creating selectors string (after last '.' in import expression)
var isPlaceHolderImport = false
val simpleName = path.substring(path.lastIndexOf('.') + 1)
simpleName +=: selectors
val wildcardImport: Boolean = selectors.contains("_") ||
selectors.length >= settings.getClassCountToUseImportOnDemand
if (wildcardImport) {
selectors.clear()
selectors += "_"
isPlaceHolderImport = true
}
val place = getLastChild
@tailrec
def treeWalkUp(completionProcessor: CompletionProcessor, p: PsiElement, lastParent: PsiElement) {
p match {
case null =>
case _ =>
if (!p.processDeclarations(completionProcessor, ResolveState.initial, lastParent, place)) return
treeWalkUp(completionProcessor, p.getContext, p)
}
}
def collectAllCandidates(): mutable.HashMap[String, HashSet[PsiNamedElement]] = {
val candidates = new mutable.HashMap[String, HashSet[PsiNamedElement]]
val everythingProcessor = new CompletionProcessor(StdKinds.stableImportSelector, getLastChild, includePrefixImports = false)
treeWalkUp(everythingProcessor, this, place)
for (candidate <- everythingProcessor.candidates) {
val set = candidates.getOrElse(candidate.name, HashSet.empty[PsiNamedElement])
candidates.update(candidate.name, set + candidate.getElement)
}
candidates
}
val candidatesBefore = collectAllCandidates()
val usedNames = new mutable.HashSet[String]()
this.accept(new ScalaRecursiveElementVisitor {
override def visitReference(reference: ScReferenceElement) {
if (reference == ref) {
super.visitReference(reference)
return
}
reference.qualifier match {
case None if !reference.getContext.isInstanceOf[ScImportSelector] => usedNames += reference.refName
case _ =>
}
super.visitReference(reference)
}
})
for (expr <- toDelete) {
expr.deleteExpr()
}
var importString = (renamedSelectors ++ selectors).distinct match {
case Seq(s) => s
case ss => ss.mkString("{", ", ", "}")
}
val completionProcessor = new CompletionProcessor(StdKinds.packageRef, place, includePrefixImports = false)
treeWalkUp(completionProcessor, this, place)
val names: mutable.HashSet[String] = new mutable.HashSet
val packs: ArrayBuffer[PsiPackage] = new ArrayBuffer
val renamedPackages: mutable.HashMap[PsiPackage, String] = new mutable.HashMap[PsiPackage, String]()
for (candidate <- completionProcessor.candidatesS) {
candidate match {
case r@ScalaResolveResult(pack: PsiPackage, _) =>
if (names.contains(pack.name)) {
var index = packs.indexWhere(_.name == pack.name)
while(index != -1) {
packs.remove(index)
index = packs.indexWhere(_.name == pack.name)
}
} else {
names += pack.name
packs += pack
r.isRenamed match {
case Some(otherName) => renamedPackages += ((pack, otherName))
case _ =>
}
}
case _ =>
}
}
val packages = packs.map(_.getQualifiedName)
val packagesName = packs.map(_.name)
var importSt: ScImportStmt = null
while (importSt == null) {
val (pre, last) = getSplitQualifierElement(classPackageQualifier)
def updateImportStringWith(s: String) {
if (ScalaNamesUtil.isKeyword(s)) importString = "`" + s + "`" + "." + importString
else importString = s + "." + importString
}
if ((!settings.isAddFullQualifiedImports ||
classPackageQualifier.indexOf(".") == -1) &&
packages.contains(classPackageQualifier)) {
val s = packs.find(_.getQualifiedName == classPackageQualifier) match {
case Some(qual) => renamedPackages.get(qual) match {
case Some(r) => r
case _ => last
}
case _ => last
}
updateImportStringWith(s)
importSt = ScalaPsiElementFactory.createImportFromText("import " + importString, getManager)
} else {
updateImportStringWith(last)
if (pre == "") {
if (ScSyntheticPackage.get(classPackageQualifier, getProject) == null ||
packagesName.contains(classPackageQualifier))
importString = "_root_." + importString
importSt = ScalaPsiElementFactory.createImportFromText("import " + importString, getManager)
}
classPackageQualifier = pre
}
}
//cheek all imports under new import to fix problems
if (isPlaceHolderImport) {
val syntheticPackage = ScSyntheticPackage.get(getSplitQualifierElement(qualifiedName)._1, getProject)
val subPackages = if (syntheticPackage != null)
syntheticPackage.getSubPackages(getResolveScope)
else {
val psiPack = ScPackageImpl.findPackage(getProject, getSplitQualifierElement(qualifiedName)._1)
if (psiPack != null) psiPack.getSubPackages(getResolveScope)
else Array[PsiPackage]()
}
def checkImports(element: PsiElement) {
element match {
case expr: ScImportExpr =>
@tailrec
def iterateExpr() {
val qualifier = expr.qualifier
var firstQualifier = qualifier
if (firstQualifier == null || firstQualifier.getText == "_root_") return
while (firstQualifier.qualifier.isDefined) firstQualifier = firstQualifier.qualifier.get
if (subPackages.map(_.name).contains(firstQualifier.getText)) {
var classPackageQualifier = getSplitQualifierElement(firstQualifier.resolve() match {
case pack: PsiPackage => pack.getQualifiedName
case cl: PsiClass => cl.qualifiedName
case _ => return
})._1
var importString = qualifier.getText
var break = true
while (break) {
val (pre, last) = getSplitQualifierElement(classPackageQualifier)
if (last != "") importString = last + "." + importString
if (packages.contains(classPackageQualifier)) {
break = false
} else {
classPackageQualifier = pre
if (classPackageQualifier == "") {
importString = "_root_." + importString
break = false
}
}
}
val newQualifier = ScalaPsiElementFactory.createReferenceFromText(importString, getManager)
qualifier.replace(newQualifier)
iterateExpr()
}
}
iterateExpr()
case _ => for (child <- element.getChildren) checkImports(child)
}
}
if (subPackages.length > 0) {
checkImports(this)
}
}
def tail() {
if (!explicitly) {
val candidatesAfter = collectAllCandidates()
def checkName(s: String) {
if (candidatesBefore.getOrElse(s, HashSet.empty).size < candidatesAfter.getOrElse(s, HashSet.empty).size) {
val pathes = new mutable.HashSet[String]()
//let's try to fix it by adding all before imports explicitly
candidatesBefore.getOrElse(s, HashSet.empty[PsiNamedElement]).foreach {
case c: PsiClass => pathes += c.qualifiedName
case c: PsiNamedElement => pathes ++= ScalaNamesUtil.qualifiedName(c)
}
for (path <- pathes) {
addImportForPath(path, ref, explicitly = true)
}
}
}
if (wildcardImport) {
//check all names
for (name <- usedNames) checkName(name)
} else {
//check only newly imported name
val name = path.split('.').last
if (usedNames.contains(name)) {
checkName(name)
}
}
}
HintManager.getInstance.hideAllHints()
}
firstPossibleGoodPlace match {
case Some(expr) if ref == null || expr.getTextOffset < ref.getTextOffset =>
expr.replace(importSt.importExprs(0))
tail()
return
case _ =>
}
//looking for td import statement to find place which we will use for new import statement
findFirstImportStmt(ref) match {
case Some(x: ScImportStmt) =>
//now we walking throw forward siblings, and seeking appropriate place (lexicographical)
var stmt: PsiElement = x
var prevStmt: ScImportStmt = null
def addImportAfterPrevStmt(ourIndex: Int, prevIndex: Int) {
val before = addImportAfter(importSt, prevStmt)
if (ourIndex > prevIndex) {
var blankLines = ""
var currentGroupIndex = prevIndex
val groups = ScalaCodeStyleSettings.getInstance(getProject).getImportLayout
def iteration() {
currentGroupIndex += 1
while (groups(currentGroupIndex) == ScalaCodeStyleSettings.BLANK_LINE) {
blankLines += "\\n"
currentGroupIndex += 1
}
}
while (currentGroupIndex != -1 && blankLines.isEmpty && currentGroupIndex < ourIndex) iteration()
if (!blankLines.isEmpty) {
val newline = ScalaPsiElementFactory.createNewLineNode(getManager, blankLines)
before.getParent.getNode.addChild(newline, before.getNode)
}
}
}
//this is flag to stop walking when we add import before more big lexicographically import statement
var added = false
def getImportPrefixQualifier(stmt: ScImportStmt): String = {
val importExpr: ScImportExpr = stmt.importExprs.headOption.getOrElse(return "")
ScalaImportOptimizer.getImportInfo(importExpr, _ => true).
fold(Option(importExpr.qualifier).fold("")(_.getText))(_.prefixQualifier)
}
while (!added && stmt != null && (stmt.isInstanceOf[ScImportStmt]
|| stmt.isInstanceOf[PsiWhiteSpace]
|| stmt.getNode.getElementType == ScalaTokenTypes.tSEMICOLON)) {
stmt match {
case im: ScImportStmt =>
def processPackage(elem: PsiElement): Boolean = {
if (classPackageQualifier == "") return true
val completionProcessor = new ResolveProcessor(StdKinds.packageRef, elem,
getSplitQualifierElement(classPackageQualifier)._2)
val place = getLastChild
@tailrec
def treeWalkUp(place: PsiElement, lastParent: PsiElement) {
place match {
case null =>
case p =>
if (!p.processDeclarations(completionProcessor,
ResolveState.initial,
lastParent, place)) return
treeWalkUp(place.getContext, place)
}
}
treeWalkUp(this, place)
completionProcessor.candidatesS.nonEmpty
}
val nextImportContainsRef =
if (ref != null) PsiTreeUtil.isAncestor(im, ref, false) // See SCL-2925
else false
def compare: Boolean = {
val l: String = getImportPrefixQualifier(im)
ScalaImportOptimizer.greater(l, pathQualifier, im.getText, importSt.getText, getProject)
}
val cond2 = compare && processPackage(im)
if (nextImportContainsRef || cond2) {
added = true
val ourIndex = ScalaImportOptimizer.findGroupIndex(pathQualifier, getProject)
val imIndex = ScalaImportOptimizer.findGroupIndex(getImportPrefixQualifier(im), getProject)
val prevIndex =
if (prevStmt == null) -1
else ScalaImportOptimizer.findGroupIndex(getImportPrefixQualifier(prevStmt), getProject)
if (prevIndex != ourIndex) {
addImportBefore(importSt, im)
if (ourIndex < imIndex) {
var blankLines = ""
var currentGroupIndex = ourIndex
val groups = ScalaCodeStyleSettings.getInstance(getProject).getImportLayout
def iteration() {
currentGroupIndex += 1
while (groups(currentGroupIndex) == ScalaCodeStyleSettings.BLANK_LINE) {
blankLines += "\\n"
currentGroupIndex += 1
}
}
while (currentGroupIndex != -1 && blankLines.isEmpty && currentGroupIndex < imIndex) iteration()
if (!blankLines.isEmpty) {
val newline = ScalaPsiElementFactory.createNewLineNode(getManager, blankLines)
im.getParent.getNode.addChild(newline, im.getNode)
}
}
} else addImportAfterPrevStmt(ourIndex, prevIndex)
}
prevStmt = im
case _ =>
}
stmt = stmt.getNextSibling
}
//if our stmt is the biggest lexicographically import statement we add this to the end
if (!added) {
if (prevStmt != null) {
val ourIndex = ScalaImportOptimizer.findGroupIndex(pathQualifier, getProject)
val prevIndex = ScalaImportOptimizer.findGroupIndex(getImportPrefixQualifier(prevStmt), getProject)
addImportAfterPrevStmt(ourIndex, prevIndex)
} else {
addImportAfter(importSt, getLastChild)
}
}
case _ =>
def updateFirst() {
getFirstChild match {
case pack: ScPackaging if !pack.isExplicit => pack.addImportForPath(path, ref, explicitly)
case elem if elem != null => insertFirstImport(importSt, elem)
case _ => addImport(importSt)
}
}
getNode.findChildByType(ScalaTokenTypes.tLBRACE) match {
case null if this.isInstanceOf[ScalaFile] =>
updateFirst()
case null =>
val reference = getNode.findChildByType(ScalaElementTypes.REFERENCE)
if (reference != null) {
reference.getPsi.getNextSibling
addImportAfter(importSt, reference.getPsi)
} else {
updateFirst()
}
case node =>
this match {
case tb: ScTemplateBody => tb.selfTypeElement match {
case Some(te) => addImportAfter(importSt, te)
case _ =>
addImportAfter(importSt, node.getPsi)
}
case _ =>
addImportAfter(importSt, node.getPsi)
}
}
}
tail()
}
protected def insertFirstImport(importSt: ScImportStmt, first: PsiElement): PsiElement = addBefore(importSt, first)
def addImport(element: PsiElement): PsiElement = {
CodeEditUtil.addChildren(getNode, element.getNode, element.getNode, null).getPsi
}
def addImportBefore(element: PsiElement, anchor: PsiElement): PsiElement = {
val anchorNode = anchor.getNode
CodeEditUtil.addChildren(getNode, element.getNode, element.getNode, anchorNode).getPsi
}
def addImportAfter(element: PsiElement, anchor: PsiElement): PsiElement = {
if (anchor.getNode == getNode.getLastChildNode) return addImport(element)
addImportBefore(element, anchor.getNode.getTreeNext.getPsi)
}
def plainDeleteImport(stmt: ScImportExpr) {
stmt.deleteExpr()
}
def plainDeleteSelector(sel: ScImportSelector) {
sel.deleteSelector()
}
def deleteImportStmt(stmt: ScImportStmt) {
def remove(node: ASTNode) = getNode.removeChild(node)
def shortenWhitespace(node: ASTNode) {
if (node == null) return
if (node.getText.count(_ == '\\n') >= 2) {
val nl = ScalaPsiElementFactory.createNewLine(getManager, node.getText.replaceFirst("[\\n]", ""))
getNode.replaceChild(node, nl.getNode)
}
}
def removeWhitespace(node: ASTNode) {
if (node == null) return
if (node.getPsi.isInstanceOf[PsiWhiteSpace]) {
if (node.getText.count(_ == '\\n') < 2) remove(node)
else shortenWhitespace(node)
}
}
def removeSemicolonAndWhitespace(node: ASTNode) {
if (node == null) return
if (node.getElementType == ScalaTokenTypes.tSEMICOLON) {
removeWhitespace(node.getTreeNext)
remove(node)
}
else removeWhitespace(node)
}
val node = stmt.getNode
val next = node.getTreeNext
val prev = node.getTreePrev
removeSemicolonAndWhitespace(next)
remove(node)
shortenWhitespace(prev)
}
}
| double-y/translation-idea-plugin | src/org/jetbrains/plugins/scala/lang/psi/ScImportsHolder.scala | Scala | apache-2.0 | 25,883 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.expressions.{Expression, PlanExpression}
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, AdaptiveSparkPlanHelper, QueryStageExec}
object ExplainUtils extends AdaptiveSparkPlanHelper {
/**
* Given a input physical plan, performs the following tasks.
* 1. Computes the operator id for current operator and records it in the operator
* by setting a tag.
* 2. Computes the whole stage codegen id for current operator and records it in the
* operator by setting a tag.
* 3. Generate the two part explain output for this plan.
* 1. First part explains the operator tree with each operator tagged with an unique
* identifier.
* 2. Second part explains each operator in a verbose manner.
*
* Note : This function skips over subqueries. They are handled by its caller.
*
* @param plan Input query plan to process
* @param append function used to append the explain output
* @param startOperatorID The start value of operation id. The subsequent operations will
* be assigned higher value.
*
* @return The last generated operation id for this input plan. This is to ensure we
* always assign incrementing unique id to each operator.
*
*/
private def processPlanSkippingSubqueries[T <: QueryPlan[T]](
plan: => QueryPlan[T],
append: String => Unit,
startOperatorID: Int): Int = {
val operationIDs = new mutable.ArrayBuffer[(Int, QueryPlan[_])]()
var currentOperatorID = startOperatorID
try {
currentOperatorID = generateOperatorIDs(plan, currentOperatorID, operationIDs)
generateWholeStageCodegenIds(plan)
QueryPlan.append(
plan,
append,
verbose = false,
addSuffix = false,
printOperatorId = true)
append("\\n")
var i: Integer = 0
for ((opId, curPlan) <- operationIDs) {
append(curPlan.verboseStringWithOperatorId())
}
} catch {
case e: AnalysisException => append(e.toString)
}
currentOperatorID
}
/**
* Given a input physical plan, performs the following tasks.
* 1. Generates the explain output for the input plan excluding the subquery plans.
* 2. Generates the explain output for each subquery referenced in the plan.
*/
def processPlan[T <: QueryPlan[T]](
plan: => QueryPlan[T],
append: String => Unit): Unit = {
try {
val subqueries = ArrayBuffer.empty[(SparkPlan, Expression, BaseSubqueryExec)]
var currentOperatorID = 0
currentOperatorID = processPlanSkippingSubqueries(plan, append, currentOperatorID)
getSubqueries(plan, subqueries)
var i = 0
for (sub <- subqueries) {
if (i == 0) {
append("\\n===== Subqueries =====\\n\\n")
}
i = i + 1
append(s"Subquery:$i Hosting operator id = " +
s"${getOpId(sub._1)} Hosting Expression = ${sub._2}\\n")
// For each subquery expression in the parent plan, process its child plan to compute
// the explain output. In case of subquery reuse, we don't print subquery plan more
// than once. So we skip [[ReusedSubqueryExec]] here.
if (!sub._3.isInstanceOf[ReusedSubqueryExec]) {
currentOperatorID = processPlanSkippingSubqueries(
sub._3.child,
append,
currentOperatorID)
}
append("\\n")
}
} finally {
removeTags(plan)
}
}
/**
* Traverses the supplied input plan in a bottom-up fashion does the following :
* 1. produces a map : operator identifier -> operator
* 2. Records the operator id via setting a tag in the operator.
* Note :
* 1. Operator such as WholeStageCodegenExec and InputAdapter are skipped as they don't
* appear in the explain output.
* 2. operator identifier starts at startOperatorID + 1
* @param plan Input query plan to process
* @param startOperatorID The start value of operation id. The subsequent operations will
* be assigned higher value.
* @param operatorIDs A output parameter that contains a map of operator id and query plan. This
* is used by caller to print the detail portion of the plan.
* @return The last generated operation id for this input plan. This is to ensure we
* always assign incrementing unique id to each operator.
*/
private def generateOperatorIDs(
plan: QueryPlan[_],
startOperatorID: Int,
operatorIDs: mutable.ArrayBuffer[(Int, QueryPlan[_])]): Int = {
var currentOperationID = startOperatorID
// Skip the subqueries as they are not printed as part of main query block.
if (plan.isInstanceOf[BaseSubqueryExec]) {
return currentOperationID
}
plan.foreachUp {
case p: WholeStageCodegenExec =>
case p: InputAdapter =>
case other: QueryPlan[_] =>
def setOpId(): Unit = if (other.getTagValue(QueryPlan.OP_ID_TAG).isEmpty) {
currentOperationID += 1
other.setTagValue(QueryPlan.OP_ID_TAG, currentOperationID)
operatorIDs += ((currentOperationID, other))
}
other match {
case p: AdaptiveSparkPlanExec =>
currentOperationID =
generateOperatorIDs(p.executedPlan, currentOperationID, operatorIDs)
setOpId()
case p: QueryStageExec =>
currentOperationID = generateOperatorIDs(p.plan, currentOperationID, operatorIDs)
setOpId()
case _ =>
setOpId()
other.innerChildren.foldLeft(currentOperationID) {
(curId, plan) => generateOperatorIDs(plan, curId, operatorIDs)
}
}
}
currentOperationID
}
/**
* Traverses the supplied input plan in a top-down fashion and records the
* whole stage code gen id in the plan via setting a tag.
*/
private def generateWholeStageCodegenIds(plan: QueryPlan[_]): Unit = {
var currentCodegenId = -1
def setCodegenId(p: QueryPlan[_], children: Seq[QueryPlan[_]]): Unit = {
if (currentCodegenId != -1) {
p.setTagValue(QueryPlan.CODEGEN_ID_TAG, currentCodegenId)
}
children.foreach(generateWholeStageCodegenIds)
}
// Skip the subqueries as they are not printed as part of main query block.
if (plan.isInstanceOf[BaseSubqueryExec]) {
return
}
plan.foreach {
case p: WholeStageCodegenExec => currentCodegenId = p.codegenStageId
case _: InputAdapter => currentCodegenId = -1
case p: AdaptiveSparkPlanExec => setCodegenId(p, Seq(p.executedPlan))
case p: QueryStageExec => setCodegenId(p, Seq(p.plan))
case other: QueryPlan[_] => setCodegenId(other, other.innerChildren)
}
}
/**
* Generate detailed field string with different format based on type of input value
*/
def generateFieldString(fieldName: String, values: Any): String = values match {
case iter: Iterable[_] if (iter.size == 0) => s"${fieldName}: []"
case iter: Iterable[_] => s"${fieldName} [${iter.size}]: ${iter.mkString("[", ", ", "]")}"
case str: String if (str == null || str.isEmpty) => s"${fieldName}: None"
case str: String => s"${fieldName}: ${str}"
case _ => throw new IllegalArgumentException(s"Unsupported type for argument values: $values")
}
/**
* Given a input plan, returns an array of tuples comprising of :
* 1. Hosting operator id.
* 2. Hosting expression
* 3. Subquery plan
*/
private def getSubqueries(
plan: => QueryPlan[_],
subqueries: ArrayBuffer[(SparkPlan, Expression, BaseSubqueryExec)]): Unit = {
plan.foreach {
case a: AdaptiveSparkPlanExec =>
getSubqueries(a.executedPlan, subqueries)
case p: SparkPlan =>
p.expressions.foreach (_.collect {
case e: PlanExpression[_] =>
e.plan match {
case s: BaseSubqueryExec =>
subqueries += ((p, e, s))
getSubqueries(s, subqueries)
case _ =>
}
})
}
}
/**
* Returns the operator identifier for the supplied plan by retrieving the
* `operationId` tag value.
*/
def getOpId(plan: QueryPlan[_]): String = {
plan.getTagValue(QueryPlan.OP_ID_TAG).map(v => s"$v").getOrElse("unknown")
}
def removeTags(plan: QueryPlan[_]): Unit = {
def remove(p: QueryPlan[_], children: Seq[QueryPlan[_]]): Unit = {
p.unsetTagValue(QueryPlan.OP_ID_TAG)
p.unsetTagValue(QueryPlan.CODEGEN_ID_TAG)
children.foreach(removeTags)
}
plan foreach {
case p: AdaptiveSparkPlanExec => remove(p, Seq(p.executedPlan))
case p: QueryStageExec => remove(p, Seq(p.plan))
case plan: QueryPlan[_] => remove(plan, plan.innerChildren)
}
}
}
| witgo/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/ExplainUtils.scala | Scala | apache-2.0 | 9,901 |
/*
* Copyright 2014-2020 Rik van der Kleij
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package intellij.haskell.cabal
import com.intellij.extapi.psi.PsiFileBase
import com.intellij.openapi.fileTypes.FileType
import com.intellij.psi.FileViewProvider
import javax.swing._
import org.jetbrains.annotations.NotNull
class CabalFile(viewProvider: FileViewProvider) extends PsiFileBase(viewProvider, CabalLanguage.Instance) {
@NotNull
def getFileType: FileType = {
CabalFileType.INSTANCE
}
override def toString: String = {
"Cabal file"
}
override def getIcon(flags: Int): Icon = {
super.getIcon(flags)
}
}
| rikvdkleij/intellij-haskell | src/main/scala/intellij/haskell/cabal/CabalFile.scala | Scala | apache-2.0 | 1,147 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models.lenet
import java.nio.ByteBuffer
import java.nio.file.{Files, Path, Paths}
import com.intel.analytics.bigdl.dataset.ByteRecord
import com.intel.analytics.bigdl.utils.File
import scopt.OptionParser
object Utils {
val trainMean = 0.13066047740239506
val trainStd = 0.3081078
val testMean = 0.13251460696903547
val testStd = 0.31048024
case class TrainParams(
folder: String = "./",
checkpoint: Option[String] = None,
modelSnapshot: Option[String] = None,
stateSnapshot: Option[String] = None,
batchSize: Int = 12,
learningRate: Double = 0.05,
learningRateDecay: Double = 0.0,
maxEpoch: Int = 15,
coreNumber: Int = -1,
nodeNumber: Int = -1,
overWriteCheckpoint: Boolean = false,
graphModel: Boolean = false
)
val trainParser = new OptionParser[TrainParams]("BigDL Lenet Train Example") {
opt[String]('f', "folder")
.text("where you put the MNIST data")
.action((x, c) => c.copy(folder = x))
opt[Int]('b', "batchSize")
.text("batch size")
.action((x, c) => c.copy(batchSize = x))
opt[String]("model")
.text("model snapshot location")
.action((x, c) => c.copy(modelSnapshot = Some(x)))
opt[String]("state")
.text("state snapshot location")
.action((x, c) => c.copy(stateSnapshot = Some(x)))
opt[String]("checkpoint")
.text("where to cache the model")
.action((x, c) => c.copy(checkpoint = Some(x)))
opt[Double]('r', "learningRate")
.text("learning rate")
.action((x, c) => c.copy(learningRate = x))
opt[Double]('d', "learningRateDecay")
.text("learning rate decay")
.action((x, c) => c.copy(learningRateDecay = x))
opt[Int]('e', "maxEpoch")
.text("epoch numbers")
.action((x, c) => c.copy(maxEpoch = x))
opt[Int]('b', "batchSize")
.text("batch size")
.action((x, c) => c.copy(batchSize = x))
opt[Unit]("overWrite")
.text("overwrite checkpoint files")
.action( (_, c) => c.copy(overWriteCheckpoint = true) )
opt[Unit]('g', "graphModel")
.text("use graph model")
.action((x, c) => c.copy(graphModel = true))
}
case class TestParams(
folder: String = "./",
model: String = "",
batchSize: Int = 128
)
val testParser = new OptionParser[TestParams]("BigDL Lenet Test Example") {
opt[String]('f', "folder")
.text("where you put the MNIST data")
.action((x, c) => c.copy(folder = x))
opt[String]("model")
.text("model snapshot location")
.action((x, c) => c.copy(model = x))
.required()
.required()
opt[Int]('b', "batchSize")
.text("batch size")
.action((x, c) => c.copy(batchSize = x))
}
/**
* load mnist data.
* read mnist from hdfs if data folder starts with "hdfs:", otherwise form local file.
* @param featureFile
* @param labelFile
* @return
*/
private[bigdl] def load(featureFile: String, labelFile: String): Array[ByteRecord] = {
val featureBuffer = if (featureFile.startsWith(File.hdfsPrefix)) {
ByteBuffer.wrap(File.readHdfsByte(featureFile))
} else {
ByteBuffer.wrap(Files.readAllBytes(Paths.get(featureFile)))
}
val labelBuffer = if (featureFile.startsWith(File.hdfsPrefix)) {
ByteBuffer.wrap(File.readHdfsByte(labelFile))
} else {
ByteBuffer.wrap(Files.readAllBytes(Paths.get(labelFile)))
}
val labelMagicNumber = labelBuffer.getInt()
require(labelMagicNumber == 2049)
val featureMagicNumber = featureBuffer.getInt()
require(featureMagicNumber == 2051)
val labelCount = labelBuffer.getInt()
val featureCount = featureBuffer.getInt()
require(labelCount == featureCount)
val rowNum = featureBuffer.getInt()
val colNum = featureBuffer.getInt()
val result = new Array[ByteRecord](featureCount)
var i = 0
while (i < featureCount) {
val img = new Array[Byte]((rowNum * colNum))
var y = 0
while (y < rowNum) {
var x = 0
while (x < colNum) {
img(x + y * colNum) = featureBuffer.get()
x += 1
}
y += 1
}
result(i) = ByteRecord(img, labelBuffer.get().toFloat + 1.0f)
i += 1
}
result
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/models/lenet/Utils.scala | Scala | apache-2.0 | 4,872 |
package core
import io.apibuilder.spec.v0.models.Method
import org.scalatest.{FunSpec, Matchers}
class BrokenSpec extends FunSpec with Matchers {
it("support arrays as types in fields") {
val json = """
{
"name": "API Builder",
"apidoc": { "version": "0.9.6" },
"models": {
"vendor": {
"fields": [
{ "name": "guid", "type": "uuid" },
{ "name": "tags", "type": "[string]" }
]
}
}
}
"""
val validator = TestHelper.serviceValidatorFromApiJson(json)
validator.errors().mkString should be("")
val fields = validator.service().models.head.fields
fields.find { _.name == "guid" }.get.`type` should be("uuid")
fields.find { _.name == "tags" }.get.`type` should be("[string]")
}
it("support arrays as types in operations") {
val json = """
{
"name": "API Builder",
"apidoc": { "version": "0.9.6" },
"models": {
"vendor": {
"fields": [
{ "name": "guid", "type": "uuid" }
]
}
},
"resources": {
"vendor": {
"operations": [
{
"method": "POST",
"parameters": [
{ "name": "guid", "type": "uuid" },
{ "name": "tag", "type": "[string]", "required": false }
],
"responses": {
"200": { "type": "vendor" }
}
}
]
}
}
}
"""
val validator = TestHelper.serviceValidatorFromApiJson(json)
validator.errors().mkString should be("")
val operation = validator.service().resources.head.operations.head
operation.method should be(Method.Post)
operation.parameters.find { _.name == "guid" }.get.`type` should be("uuid")
val guid = operation.parameters.find { _.name == "guid" }.get
guid.`type` should be("uuid")
guid.required should be(true)
val tag = operation.parameters.find { _.name == "tag" }.get
tag.`type` should be("[string]")
tag.required should be(false)
}
}
| gheine/apidoc | core/src/test/scala/core/BrokenSpec.scala | Scala | mit | 2,097 |
package sprawler.crawler.url
import sprawler.crawler.CrawlerSession
import sprawler.CrawlerExceptions.{ UnprocessableUrlException, RedirectLimitReachedException }
import scala.util.{ Success, Failure, Try }
import spray.http.Uri.Empty
import java.util.concurrent.ConcurrentHashMap
trait CheckUrlCrawlability { this: CrawlerUrl =>
/**
* Required for figuring out which URLs have been crawled during this crawling
* session.
* See [[sprawler.crawler.CrawlerSession.visitedUrls]].
* @return
*/
def session: CrawlerSession
/**
* Tests if the provided url's UrlHelper matches the base crawler's UrlHelper.
* TPDs are used similar to how SHA's are used to identify code states in git.
* Used to check if toUrl is on the same domain as the origin URL.
* {{{
* val url = CrawlerUrl("https://www.github.com/some/path", "github.com")
* url.sameDomain
* => true
*
* val url = CrawlerUrl("https://www.github.com/some/path", "google.com")
* url.sameDomain
* => false
* }}}
*/
def sameDomain: Boolean = {
if (this.fromUri != Empty) {
if (this.fromUri.authority.host == this.uri.authority.host)
true
else
false
} else {
true
}
}
val hasRedirectsLeft: Boolean = {
redirectsLeft match {
case Some(num) => num > 0
case None => true
}
}
val isWithinDepth: Boolean = {
depth >= 0
}
val hasValidScheme: Boolean = {
val scheme = this.uri.scheme
if (scheme == "http" || scheme == "https")
true
else
false
}
val hasValidDomain: Boolean = {
val domain = this.uri.authority.host
if (domain.toString == "")
false
else
true
}
/**
* May contain false negatives, since [[java.util.concurrent.ConcurrentHashMap]]
*
*/
def isVisited: Boolean = {
session.visitedUrls.containsKey(uri.toString())
}
/**
* This method determines whether or not this url can be crawled.
*
* A url is crawlable if:
* - Domain/Hostnames match
* - Scheme is http or https
* - The url hasn't already been crawled
*
* @return Success(Unit) if crawlable, otherwise a Failure[[sprawler.CrawlerExceptions.UnprocessableUrlException]]
* is returned, with the reason why the URL couldn't be crawled.
*/
def isCrawlable: Try[Unit] = {
def generateUrlError(message: String): Try[Unit] = {
Failure(
UnprocessableUrlException(
fromUrl = this.fromUri.toString(),
toUrl = this.uri.toString(),
message = message
)
)
}
if (!hasValidScheme) {
generateUrlError(UnprocessableUrlException.MissingHttpPrefix)
} else if (!hasValidDomain) {
generateUrlError(UnprocessableUrlException.InvalidDomain)
} else if (!isWithinDepth) {
generateUrlError(UnprocessableUrlException.MaxDepthReached)
} else if (!hasRedirectsLeft) {
Failure(RedirectLimitReachedException(this.fromUri.toString(), this.uri.toString()))
} else if (!sameDomain) {
generateUrlError(UnprocessableUrlException.NotSameOrigin)
} else if (isVisited) {
generateUrlError(UnprocessableUrlException.UrlAlreadyCrawled)
} else {
Success(Unit)
}
}
} | daniel-trinh/sprawler | src/main/scala/sprawler/crawler/url/CheckUrlCrawlability.scala | Scala | mit | 3,250 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.local
import java.io.File
import java.net.URL
import java.nio.ByteBuffer
import org.apache.spark.{SparkConf, SparkContext, SparkEnv, TaskState}
import org.apache.spark.TaskState.TaskState
import org.apache.spark.executor.{Executor, ExecutorBackend}
import org.apache.spark.internal.Logging
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle}
import org.apache.spark.rpc.{RpcCallContext, RpcEndpointRef, RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
private case class ReviveOffers()
private case class StatusUpdate(taskId: Long, state: TaskState, serializedData: ByteBuffer)
private case class KillTask(taskId: Long, interruptThread: Boolean, reason: String)
private case class StopExecutor()
/**
* Calls to [[LocalSchedulerBackend]] are all serialized through LocalEndpoint. Using an
* RpcEndpoint makes the calls on [[LocalSchedulerBackend]] asynchronous, which is necessary
* to prevent deadlock between [[LocalSchedulerBackend]] and the [[TaskSchedulerImpl]].
*/
private[spark] class LocalEndpoint(
override val rpcEnv: RpcEnv,
userClassPath: Seq[URL],
scheduler: TaskSchedulerImpl,
executorBackend: LocalSchedulerBackend,
private val totalCores: Int)
extends ThreadSafeRpcEndpoint with Logging {
private var freeCores = totalCores
val localExecutorId = SparkContext.DRIVER_IDENTIFIER
val localExecutorHostname = "localhost"
private val executor = new Executor(
localExecutorId, localExecutorHostname, SparkEnv.get, userClassPath, isLocal = true)
override def receive: PartialFunction[Any, Unit] = {
case ReviveOffers =>
reviveOffers()
case StatusUpdate(taskId, state, serializedData) =>
scheduler.statusUpdate(taskId, state, serializedData)
if (TaskState.isFinished(state)) {
freeCores += scheduler.CPUS_PER_TASK
reviveOffers()
}
case KillTask(taskId, interruptThread, reason) =>
executor.killTask(taskId, interruptThread, reason)
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case StopExecutor =>
executor.stop()
context.reply(true)
}
def reviveOffers() {
val offers = IndexedSeq(new WorkerOffer(localExecutorId, localExecutorHostname, freeCores,
Some(rpcEnv.address.hostPort)))
for (task <- scheduler.resourceOffers(offers).flatten) {
freeCores -= scheduler.CPUS_PER_TASK
executor.launchTask(executorBackend, task)
}
}
}
/**
* Used when running a local version of Spark where the executor, backend, and master all run in
* the same JVM. It sits behind a [[TaskSchedulerImpl]] and handles launching tasks on a single
* Executor (created by the [[LocalSchedulerBackend]]) running locally.
*/
private[spark] class LocalSchedulerBackend(
conf: SparkConf,
scheduler: TaskSchedulerImpl,
val totalCores: Int)
extends SchedulerBackend with ExecutorBackend with Logging {
private val appId = "local-" + System.currentTimeMillis
private var localEndpoint: RpcEndpointRef = null
private val userClassPath = getUserClasspath(conf)
private val listenerBus = scheduler.sc.listenerBus
private val launcherBackend = new LauncherBackend() {
override def conf: SparkConf = LocalSchedulerBackend.this.conf
override def onStopRequest(): Unit = stop(SparkAppHandle.State.KILLED)
}
/**
* Returns a list of URLs representing the user classpath.
*
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Seq[URL] = {
val userClassPathStr = conf.getOption("spark.executor.extraClassPath")
userClassPathStr.map(_.split(File.pathSeparator)).toSeq.flatten.map(new File(_).toURI.toURL)
}
launcherBackend.connect()
override def start() {
val rpcEnv = SparkEnv.get.rpcEnv
val executorEndpoint = new LocalEndpoint(rpcEnv, userClassPath, scheduler, this, totalCores)
localEndpoint = rpcEnv.setupEndpoint("LocalSchedulerBackendEndpoint", executorEndpoint)
listenerBus.post(SparkListenerExecutorAdded(
System.currentTimeMillis,
executorEndpoint.localExecutorId,
new ExecutorInfo(executorEndpoint.localExecutorHostname, totalCores, Map.empty)))
launcherBackend.setAppId(appId)
launcherBackend.setState(SparkAppHandle.State.RUNNING)
}
override def stop() {
stop(SparkAppHandle.State.FINISHED)
}
override def reviveOffers() {
localEndpoint.send(ReviveOffers)
}
override def defaultParallelism(): Int =
scheduler.conf.getInt("spark.default.parallelism", totalCores)
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String) {
localEndpoint.send(KillTask(taskId, interruptThread, reason))
}
override def statusUpdate(taskId: Long, state: TaskState, serializedData: ByteBuffer) {
localEndpoint.send(StatusUpdate(taskId, state, serializedData))
}
override def applicationId(): String = appId
override def maxNumConcurrentTasks(): Int = totalCores / scheduler.CPUS_PER_TASK
private def stop(finalState: SparkAppHandle.State): Unit = {
localEndpoint.ask(StopExecutor)
try {
launcherBackend.setState(finalState)
} finally {
launcherBackend.close()
}
}
}
| ahnqirage/spark | core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackend.scala | Scala | apache-2.0 | 6,123 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.canvas
import wvlet.airspec.AirSpec
/**
*/
class OffHeapMemoryAllocatorTest extends AirSpec {
test("allocate and release memory") {
val a = new OffHeapMemoryAllocator
val m1 = a.allocate(10)
a.allocatedMemorySize shouldBe 10
val m2 = a.allocate(100)
a.allocatedMemorySize shouldBe 110
a.release(m1)
a.allocatedMemorySize shouldBe 100
a.close()
a.allocatedMemorySize shouldBe 0
}
}
| wvlet/airframe | airframe-canvas/src/test/scala/wvlet/airframe/canvas/OffHeapMemoryAllocatorTest.scala | Scala | apache-2.0 | 1,010 |
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package tensor;
package mutable;
import domain.{Domain1,IndexDomain};
import scalala.scalar.Scalar;
/**
* Implementation trait for mutable Tensor1Col instances.
*
* @author dramage
*/
trait Tensor1ColLike
[@specialized(Int,Long) K, @specialized(Int,Long,Float,Double) V,
+D<:Domain1[K], +This<:Tensor1Col[K,V]]
extends tensor.Tensor1ColLike[K,V,D,This] with Tensor1Like[K,V,D,This] {
override def t : Tensor1Row[K,V] =
new Tensor1Row.View[K,V](repr);
}
/**
* Mutable tensor.Tensor1Col.
*
* @author dramage
*/
trait Tensor1Col
[@specialized(Int,Long) K, @specialized(Int,Long,Float,Double) V]
extends tensor.Tensor1Col[K,V] with Tensor1[K,V]
with Tensor1ColLike[K,V,Domain1[K],Tensor1Col[K,V]];
object Tensor1Col {
/** Constructs a closed-domain tensor for the given domain. */
def apply[K,V:Scalar](domain : Domain1[K]) : Tensor1Col[K,V] = domain match {
case d : IndexDomain => VectorCol(d);
case _ => new Impl(domain, scala.collection.mutable.Map[K,V]());
}
class Impl[K,V:Scalar](
override val domain : Domain1[K],
override protected val data : scala.collection.mutable.Map[K,V])
extends Tensor.Impl[K,V](domain, data) with Tensor1Col[K,V];
class View[K,V](override val inner : Tensor1Row[K,V])
extends Tensor1Proxy[K,V,Tensor1Row[K,V]] with Tensor1Col[K,V]
with Tensor1Like[K,V,Domain1[K],View[K,V]] {
override def repr : View[K,V] = this;
override def t : Tensor1Row[K,V] = inner;
}
}
| scalala/Scalala | src/main/scala/scalala/tensor/mutable/Tensor1Col.scala | Scala | lgpl-2.1 | 2,318 |
package de.zalando.model
import de.zalando.apifirst.Application._
import de.zalando.apifirst.Domain._
import de.zalando.apifirst.ParameterPlace
import de.zalando.apifirst.naming._
import de.zalando.apifirst.Hypermedia._
import de.zalando.apifirst.Http._
import de.zalando.apifirst.Security
import java.net.URL
import Security._
//noinspection ScalaStyle
object form_data_yaml extends WithModel {
def types = Map[Reference, Type](
Reference("⌿paths⌿/multipart⌿post⌿avatar") →
Opt(File(TypeMeta(None, List())), TypeMeta(None, List())),
Reference("⌿paths⌿/multipart⌿post⌿name") →
Str(None, TypeMeta(None, List())),
Reference("⌿paths⌿/both⌿post⌿ringtone") →
File(TypeMeta(None, List())),
Reference("⌿paths⌿/both⌿post⌿year") →
Opt(BInt(TypeMeta(None, List())), TypeMeta(None, List())),
Reference("⌿paths⌿/multipart⌿post⌿responses⌿200") →
TypeDef(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200"),
Seq(
Field(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200⌿name"), TypeRef(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿name"))),
Field(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200⌿year"), TypeRef(Reference("⌿paths⌿/both⌿post⌿year"))),
Field(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200⌿fileSize"), TypeRef(Reference("⌿paths⌿/both⌿post⌿year"))),
Field(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200⌿fileName"), TypeRef(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿name")))
), TypeMeta(Some("Named types: 4"), List())),
Reference("⌿paths⌿/both⌿post⌿responses⌿200") →
TypeDef(Reference("⌿paths⌿/both⌿post⌿responses⌿200"),
Seq(
Field(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿name"), TypeRef(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿name"))),
Field(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿year"), TypeRef(Reference("⌿paths⌿/both⌿post⌿year"))),
Field(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿avatarSize"), TypeRef(Reference("⌿paths⌿/both⌿post⌿year"))),
Field(Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿ringtoneSize"), TypeRef(Reference("⌿paths⌿/both⌿post⌿year")))
), TypeMeta(Some("Named types: 4"), List())),
Reference("⌿paths⌿/both⌿post⌿responses⌿200⌿name") →
Opt(Str(None, TypeMeta(None, List())), TypeMeta(None, List()))
)
def parameters = Map[ParameterRef, Parameter](
ParameterRef( Reference("⌿paths⌿/url-encoded⌿post⌿name")) → Parameter("name", Str(None, TypeMeta(None, List())), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/both⌿post⌿year")) → Parameter("year", TypeRef(Reference("⌿paths⌿/both⌿post⌿year")), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/multipart⌿post⌿name")) → Parameter("name", Str(None, TypeMeta(None, List())), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/url-encoded⌿post⌿year")) → Parameter("year", TypeRef(Reference("⌿paths⌿/both⌿post⌿year")), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/url-encoded⌿post⌿avatar")) → Parameter("avatar", File(TypeMeta(None, List())), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/both⌿post⌿avatar")) → Parameter("avatar", TypeRef(Reference("⌿paths⌿/multipart⌿post⌿avatar")), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/both⌿post⌿ringtone")) → Parameter("ringtone", File(TypeMeta(None, List())), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/both⌿post⌿name")) → Parameter("name", Str(None, TypeMeta(None, List())), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/multipart⌿post⌿avatar")) → Parameter("avatar", TypeRef(Reference("⌿paths⌿/multipart⌿post⌿avatar")), None, None, ".+", encode = true, ParameterPlace.withName("formData")),
ParameterRef( Reference("⌿paths⌿/multipart⌿post⌿year")) → Parameter("year", TypeRef(Reference("⌿paths⌿/both⌿post⌿year")), None, None, ".+", encode = true, ParameterPlace.withName("formData"))
)
def basePath: String = "/form_data"
def discriminators: DiscriminatorLookupTable = Map[Reference, Reference](
)
def securityDefinitions: SecurityDefinitionsTable = Map[String, Security.Definition](
)
def stateTransitions: StateTransitionsTable = Map[State, Map[State, TransitionProperties]]()
def calls: Seq[ApiCall] = Seq(
ApiCall(POST, Path(Reference("⌿multipart")),
HandlerCall(
"form_data.yaml",
"Form_dataYaml",
instantiate = false,
"postmultipart",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/multipart⌿post⌿name")),
ParameterRef(Reference("⌿paths⌿/multipart⌿post⌿year")),
ParameterRef(Reference("⌿paths⌿/multipart⌿post⌿avatar"))
)
),
Set(MimeType("multipart/form-data")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
200 -> ParameterRef(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(POST, Path(Reference("⌿url-encoded")),
HandlerCall(
"form_data.yaml",
"Form_dataYaml",
instantiate = false,
"posturl_encoded",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/url-encoded⌿post⌿name")),
ParameterRef(Reference("⌿paths⌿/url-encoded⌿post⌿year")),
ParameterRef(Reference("⌿paths⌿/url-encoded⌿post⌿avatar"))
)
),
Set(MimeType("application/x-www-form-urlencoded")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
200 -> ParameterRef(Reference("⌿paths⌿/multipart⌿post⌿responses⌿200"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(POST, Path(Reference("⌿both")),
HandlerCall(
"form_data.yaml",
"Form_dataYaml",
instantiate = false,
"postboth",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/both⌿post⌿name")),
ParameterRef(Reference("⌿paths⌿/both⌿post⌿year")),
ParameterRef(Reference("⌿paths⌿/both⌿post⌿avatar")),
ParameterRef(Reference("⌿paths⌿/both⌿post⌿ringtone"))
)
),
Set(MimeType("application/x-www-form-urlencoded"), MimeType("multipart/form-data")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
200 -> ParameterRef(Reference("⌿paths⌿/both⌿post⌿responses⌿200"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self
), None),
Set.empty[Security.Constraint]))
def packageName: Option[String] = Some("form_data.yaml")
def model = new StrictModel(calls, types, parameters, discriminators, basePath, packageName, stateTransitions, securityDefinitions)
} | zalando/play-swagger | play-scala-generator/src/test/scala/model/resources.form_data_yaml.scala | Scala | mit | 7,504 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.examples.selection
import com.heatonresearch.aifh.evolutionary.genome.Genome
import com.heatonresearch.aifh.evolutionary.opp.selection.TournamentSelection
import com.heatonresearch.aifh.evolutionary.population.BasicPopulation
import com.heatonresearch.aifh.evolutionary.train.EvolutionaryAlgorithm
import com.heatonresearch.aifh.evolutionary.train.basic.BasicEA
import com.heatonresearch.aifh.genetic.genome.IntegerArrayGenome
import com.heatonresearch.aifh.learning.MLMethod
import com.heatonresearch.aifh.learning.score.ScoreFunction
import com.heatonresearch.aifh.randomize.GenerateRandom
import com.heatonresearch.aifh.randomize.MersenneTwisterGenerateRandom
/**
* This example shows how the number of rounds affects the average score of the genome selected by
* the tournament selection operator. A population of 1000 genomes is created with each genome having a
* score between 0 and 999. There is one genome for each score. Round counts are tried between one and ten.
* The average score over 100k selections is reported. As the number of rounds increases, so does the average
* score selected.
* <p/>
* Sample output is shown here:
* <p/>
* Rounds: 1, Avg Score: 665
* Rounds: 2, Avg Score: 749
* Rounds: 3, Avg Score: 800
* Rounds: 4, Avg Score: 833
* Rounds: 5, Avg Score: 856
* Rounds: 6, Avg Score: 874
* Rounds: 7, Avg Score: 888
* Rounds: 8, Avg Score: 899
* Rounds: 9, Avg Score: 908
* Rounds: 10, Avg Score: 915
*/
object TournamentCompareExample {
def main(args: Array[String]) {
val pop = new BasicPopulation
val species = pop.createSpecies
for(i <- 0 until 1000) {
val genome: Genome = new IntegerArrayGenome(1)
genome.score = i
genome.adjustedScore = i
pop.speciesList(0).add(genome)
}
val rnd: GenerateRandom = new MersenneTwisterGenerateRandom
val train: EvolutionaryAlgorithm = new BasicEA(pop, new ScoreFunction {
def calculateScore(method: MLMethod): Double = 0
def shouldMinimize: Boolean = false
})
for(roundCount <- 1 to 10) {
val selection = new TournamentSelection(train, roundCount)
var sum = 0
var count = 0
for(i <- 0 until 100000) {
val genomeID = selection.performSelection(rnd, species)
val genome = species.members(genomeID)
sum += genome.adjustedScore.toInt
count += 1
}
sum /= count
println("Rounds: " + roundCount + ", Avg Score: " + sum)
}
}
} | PeterLauris/aifh | vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/examples/selection/TournamentCompareExample.scala | Scala | apache-2.0 | 3,436 |
/*
* Copyright (c) 2014 Contributor. All rights reserved.
*/
package methodsInheritance
trait Base {
val foo = 1
def bar = 2
def baz() = 3
}
class Methods extends Base
class Overrides extends Base {
override val foo = 11
override def bar = 12
override def baz() = 13
}
object MethodsInheritance {
val a = new Methods
a.foo
a.bar
a.baz()
val b = new Overrides
b.foo
b.bar
b.baz()
} | stephenh/scala-ide | org.scala-ide.sdt.core.tests/test-workspace/custom-highlighting/src/custom/MethodsInheritance.scala | Scala | bsd-3-clause | 425 |
package scife
package enumeration
package common.enumdef
import scife.{ enumeration => e }
import util._
import scife.util.logging._
import scife.util._
import org.scalatest._
import org.scalatest.prop._
import org.scalatest.Matchers._
import org.scalacheck.Gen
import scala.language.postfixOps
object RedBlackTreeEnum {
import Checks._
import structures._
import RedBlackTrees._
import memoization.MemoizationScope
// constructs enumerator for "simple" red-black trees
def constructEnumeratorBenchmarkVersion_1 = {
import RedBlackTrees._
import dependent._
val colorsProducer = Depend.memoized(
(set: Set[Boolean]) => { e.WrapArray(set.toArray) })
val treesOfSize: Depend[(Int, Range, Set[Boolean], Int), Tree] = Depend.memoized(
(self: Depend[(Int, Range, Set[Boolean], Int), Tree], pair: (Int, Range, Set[Boolean], Int)) => {
val (size, range, colors, blackHeight) = pair
if (range.size >= size && range.size < 0 || blackHeight < 0) e.Empty
else if (size == 0 && blackHeight == 1 && colors.contains(true)) e.Singleton(Leaf)
// else if (size == 1 && blackHeight == 1 && colors.contains(false)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, false) })
// else if (size == 1 && blackHeight == 2 && colors.contains(true)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
// else if (size == 1) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
else if (size > 0 && blackHeight >= 1) {
val roots = e.Enum(range)
val leftSizes = e.WrapArray(0 until size)
val rootColors = colorsProducer(colors)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val rootLeftSizeColorTuples = e.Product(rootLeftSizePairs, rootColors)
val leftTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor) Set(true, false) else Set(true)
val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
(leftSize, range.start to (median - 1), childColors, childBlackHeight)
})
val rightTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor) Set(true, false) else Set(true)
val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
(size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
})
val leftRightPairs: Depend[((Int, Int), Boolean), (Tree, Tree)] =
Product(leftTrees, rightTrees)
val allNodes =
memoization.Chain[((Int, Int), Boolean), (Tree, Tree), Node](rootLeftSizeColorTuples, leftRightPairs,
(p1: ((Int, Int), Boolean), p2: (Tree, Tree)) => {
val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = (p1, p2)
Node(leftTree, currRoot, rightTree, rootColor)
})
allNodes
} else e.Empty
})
treesOfSize
}
def constructEnumeratorBenchmarkTest(implicit ms: MemoizationScope) = {
import dependent._
import memoization._
// val rootProducer = Depend(
// (range: Range) => {
// e.WrapArray(range)
// })
val colorsProducer = Depend.memoized(
(set: Set[Boolean]) => { e.WrapArray(set.toArray) })
// val sizeProducer = Depend(
// (size: Int) => {
// e.WrapArray(0 until size)
// })
val treesOfSize: Depend[(Int, Range, Set[Boolean], Int), Tree] = Depend.memoized(
(self: Depend[(Int, Range, Set[Boolean], Int), Tree], pair: (Int, Range, Set[Boolean], Int)) => {
val (size, range, colors, blackHeight) = pair
if (range.size >= size && range.size < 0 || blackHeight < 0) e.Empty
else if (size == 0 && blackHeight == 1 && colors.contains(true)) e.Singleton(Leaf)
// else if (size == 1 && blackHeight == 1 && colors.contains(false)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, false) })
// else if (size == 1 && blackHeight == 2 && colors.contains(true)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
// else if (size == 1) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
else if (size > 0 && blackHeight >= 1) {
val roots = e.Enum(range)
val leftSizes = e.WrapArray(0 until size)
val rootColors = colorsProducer(colors)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val rootLeftSizeColorTuples = e.Product(rootLeftSizePairs, rootColors)
val leftTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor) Set(true, false) else Set(true)
val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
(leftSize, range.start to (median - 1), childColors, childBlackHeight)
})
val rightTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor) Set(true, false) else Set(true)
val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
(size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
})
val leftRightPairs: Depend[((Int, Int), Boolean), (Tree, Tree)] =
Product(leftTrees, rightTrees)
val allNodes =
memoization.Chain[((Int, Int), Boolean), (Tree, Tree), Node](rootLeftSizeColorTuples, leftRightPairs,
(p1: ((Int, Int), Boolean), p2: (Tree, Tree)) => {
val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = (p1, p2)
assert(!(size >= 2 && leftSize == 0 && size - leftSize - 1 == 0))
assert(!(size >= 2 && leftTree == Leaf && rightTree == Leaf))
assert(!(leftSize > 0 && leftTree == Leaf), "leftSize=%d, leftTree=Leaf".format(leftSize))
Node(leftTree, currRoot, rightTree, rootColor)
})
allNodes
} else e.Empty
})
treesOfSize
}
def constructEnumerator_currentBenchmark(implicit ms: MemoizationScope) = {
import e.dependent._
val colorsProducer = Depend.memoized(
(set: Set[Boolean]) => { e.WrapArray(set.toArray) })
val treesOfSize: Depend[(Int, Range, Set[Boolean], Int), Tree] = Depend.memoized(
(self: Depend[(Int, Range, Set[Boolean], Int), Tree], pair: (Int, Range, Set[Boolean], Int)) => {
val (size, range, colors, blackHeight) = pair
if (range.size >= size && range.size < 0 || blackHeight < 0) e.Empty
else if (size == 0 && blackHeight == 1 && colors.contains(true)) e.Singleton(Leaf)
// else if (size == 1 && blackHeight == 1 && colors.contains(false)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, false) })
// else if (size == 1 && blackHeight == 2 && colors.contains(true)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
// else if (size == 1) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
else if (size > 0 && blackHeight >= 1) {
val roots = e.Enum(range)
val leftSizes = e.WrapArray(0 until size)
val rootColors = colorsProducer(colors)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val rootLeftSizeColorTuples = e.Product(rootLeftSizePairs, rootColors)
val leftTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor) Set(true, false) else Set(true)
val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
(leftSize, range.start to (median - 1), childColors, childBlackHeight)
})
val rightTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor) Set(true, false) else Set(true)
val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
(size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
})
val leftRightPairs: Depend[((Int, Int), Boolean), (Tree, Tree)] =
Product(leftTrees, rightTrees)
val allNodes =
memoization.Chain[((Int, Int), Boolean), (Tree, Tree), Node](rootLeftSizeColorTuples, leftRightPairs,
(p1: ((Int, Int), Boolean), p2: (Tree, Tree)) => {
val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = (p1, p2)
Node(leftTree, currRoot, rightTree, rootColor)
})
allNodes
} else e.Empty
})
treesOfSize
}
def constructEnumerator_new(implicit ms: MemoizationScope) = {
import e.dependent._
val treesOfSize: Depend[(Int, Range, Range, Int), Tree] = Depend.memoized(
(self: Depend[(Int, Range, Range, Int), Tree], pair: (Int, Range, Range, Int)) => {
val (size, range, colors, blackHeight) = pair
if (range.size >= size && range.size < 0 || blackHeight < 0) e.Empty
else if (size == 0 && blackHeight == 1 && colors.end >= 1) e.Singleton(Leaf)
else if (size > 0 && blackHeight >= 1) {
val roots: Finite[Int] = e.Enum(range)
val leftSizes: Finite[Int] = e.WrapArray(0 until size)
val rootColors: Finite[Int] = e.WrapArray(colors.toArray)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val rootLeftSizeColorTuples: Finite[((Int, Int), Int)] = e.Product(rootLeftSizePairs, rootColors)
val leftTrees: Depend[((Int, Int), Int), Tree] = InMap(self, { (par: ((Int, Int), Int)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor == 1) 0 to 1 else 1 to 1
val childBlackHeight = if (rootColor == 1) blackHeight - 1 else blackHeight
(leftSize, range.start to (median - 1), childColors, childBlackHeight)
})
val rightTrees: Depend[((Int, Int), Int), Tree] = InMap(self, { (par: ((Int, Int), Int)) =>
val ((leftSize, median), rootColor) = par
val childColors = if (rootColor == 1) 0 to 1 else 1 to 1
val childBlackHeight = if (rootColor == 1) blackHeight - 1 else blackHeight
(size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
})
val leftRightPairs: Depend[((Int, Int), Int), (Tree, Tree)] =
Product(leftTrees, rightTrees)
val allNodes =
memoization.Chain[((Int, Int), Int), (Tree, Tree), Node](rootLeftSizeColorTuples, leftRightPairs,
(p1: ((Int, Int), Int), p2: (Tree, Tree)) => {
val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = (p1, p2)
Node(leftTree, currRoot, rightTree, rootColor == 1)
})
allNodes
} else e.Empty
})
treesOfSize
}
def constructEnumerator_concise(implicit ms: MemoizationScope) = {
import e.dependent._
val treesOfSize: Depend[(Int, Range, Range, Int), Tree] = Depend.memoized(
(self: Depend[(Int, Range, Range, Int), Tree], pair: (Int, Range, Range, Int)) => {
val (size, range, colors, blackHeight) = pair
if (range.size >= size && range.size < 0 || blackHeight < 0) e.Empty
else if (size == 0 && blackHeight == 1 && colors.end >= 1) e.Singleton(Leaf)
else if (size > 0 && blackHeight >= 1) {
val roots = e.Enum(range)
val leftSizes = e.WrapArray(0 until size)
val rootColors = e.WrapArray(colors.toArray)
val rootLeftSizeColorTuples = e.Product(e.Product(leftSizes, roots), rootColors)
val leftTrees = InMap(self, { (par: ((Int, Int), Int)) =>
val childColors = if (par._2 == 1) 0 to 1 else 1 to 1
val childBlackHeight = if (par._2 == 1) blackHeight - 1 else blackHeight
(par._1._1, range.start to (par._1._2 - 1), childColors, childBlackHeight)
})
val rightTrees = InMap(self, { (par: ((Int, Int), Int)) =>
val childColors = if (par._2 == 1) 0 to 1 else 1 to 1
val childBlackHeight = if (par._2 == 1) blackHeight - 1 else blackHeight
(size - par._1._1 - 1, (par._1._2 + 1) to range.end, childColors, childBlackHeight)
})
val leftRightPairs = Product(leftTrees, rightTrees)
memoization.Chain[((Int, Int), Int), (Tree, Tree), Node](rootLeftSizeColorTuples, leftRightPairs,
(p1: ((Int, Int), Int), p2: (Tree, Tree)) => Node(p2._1, p1._1._2, p2._2, p1._2 == 1)
)
} else e.Empty
})
treesOfSize
}
//
// // constructs enumerator for red-black trees with operations
// def constructEnumeratorOtherType = {
// import RedBlackTreeWithOperations._
//
// val colorsProducer = new WrapFunctionFin(
// (set: Set[Boolean]) => { new WrapArray(set.toArray) })
//
// val treesOfSize = new WrapFunctionFin(
// (self: MemberDependFinite[(Int, Range, Set[Boolean], Int), Tree], pair: (Int, Range, Set[Boolean], Int)) => {
// val (size, range, colors, blackHeight) = pair
//
// if (range.size >= size && range.size < 0 || blackHeight < 0) new Empty: MemberFinite[Tree]
// else if (size == 0 && blackHeight == 1 && colors.contains(true)) new Singleton(Leaf): MemberFinite[Tree]
// else if (size > 0 && blackHeight >= 1) {
// val roots = new WrapRange(range)
// val leftSizes = new WrapArray(0 until size toArray)
// val rootColors = colorsProducer(colors)
//
// val rootLeftSizePairs = new member.ProductFinite(leftSizes, roots)
// val rootLeftSizeColorTuples = new member.ProductFinite(rootLeftSizePairs, rootColors)
//
// val leftTrees = new InMapFin(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (leftSize, range.start to (median - 1), childColors, childBlackHeight)
// })
//
// val rightTrees = new InMapFin(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
// })
//
// val leftRightPairs =
// Product(leftTrees, rightTrees)
//
// val allNodes = new ChainFinite(rootLeftSizeColorTuples, leftRightPairs)
//
// val makeTree =
// (p: (((Int, Int), Boolean), (Tree, Tree))) => {
// val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = p
//
// assert(!(size >= 2 && leftSize == 0 && size - leftSize - 1 == 0))
// assert(!(size >= 2 && leftTree == Leaf && rightTree == Leaf))
// assert(!(leftSize > 0 && leftTree == Leaf), "leftSize=%d, leftTree=Leaf".format(leftSize))
// Node(rootColor, leftTree, currRoot, rightTree)
// }
//
// val invertTree = {
// (p: Tree) =>
// {
// val Node(rootColor, leftTree, currRoot, rightTree) = p.asInstanceOf[Node]
//
// (((RedBlackTrees.size(leftTree), currRoot), rootColor), (leftTree, rightTree))
// }
// }
//
// new Map[(((Int, Int), Boolean), (Tree, Tree)), Tree](allNodes, makeTree, invertTree) with MemberFinite[Tree]: MemberFinite[Tree]
// } else new Empty: MemberFinite[Tree]
// })
//
// treesOfSize
// }
//
// def constructEnumeratorOtherTypeMemoized = {
// import RedBlackTreeWithOperations._
// import dependent._
//
// val colorsProducer = new WrapFunctionFin(
// (set: Set[Boolean]) => { new WrapArray(set.toArray) })
//
// val treesOfSize = new WrapFunctionFin(
// (self: MemberDependFinite[(Int, Range, Set[Boolean], Int), Tree],
// pair: (Int, Range, Set[Boolean], Int)) => {
// val (size, range, colors, blackHeight) = pair
//
// if (range.size >= size && range.size < 0 || blackHeight < 0) new Empty: MemberFinite[Tree]
// else if (size == 0 && blackHeight == 1 && colors.contains(true)) new Singleton(Leaf): MemberFinite[Tree]
// else if (size > 0 && blackHeight >= 1) {
// val roots = new WrapRange(range)
// val leftSizes = new WrapArray(0 until size toArray)
// val rootColors = colorsProducer(colors)
//
// val rootLeftSizePairs = new member.ProductFinite(leftSizes, roots)
// val rootLeftSizeColorTuples = new member.ProductFinite(rootLeftSizePairs, rootColors)
//
// val leftTrees = new InMapFin(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (leftSize, range.start to (median - 1), childColors, childBlackHeight)
// })
//
// val rightTrees = new InMapFin(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
// })
//
// val leftRightPairs =
// Product(leftTrees, rightTrees)
//
// val allNodes = new ChainFinite(rootLeftSizeColorTuples, leftRightPairs)
//
// val makeTree =
// (p: (((Int, Int), Boolean), (Tree, Tree))) => {
// val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = p
//
// assert(!(size >= 2 && leftSize == 0 && size - leftSize - 1 == 0))
// assert(!(size >= 2 && leftTree == Leaf && rightTree == Leaf))
// assert(!(leftSize > 0 && leftTree == Leaf), "leftSize=%d, leftTree=Leaf".format(leftSize))
// Node(rootColor, leftTree, currRoot, rightTree)
// }
//
// val invertTree = {
// (p: Tree) =>
// {
// val Node(rootColor, leftTree, currRoot, rightTree) = p.asInstanceOf[Node]
//
// (((RedBlackTrees.size(leftTree), currRoot), rootColor), (leftTree, rightTree))
// }
// }
//
// new Map[(((Int, Int), Boolean), (Tree, Tree)), Tree](allNodes, makeTree, invertTree) with member.memoization.Memoized[Tree] with e.memoization.Memoized[Tree] with MemberFinite[Tree]: MemberFinite[Tree]
// } else new Empty: MemberFinite[Tree]
// }) with e.memoization.dependent.Memoized[(Int, Range, Set[Boolean], Int), Tree]
//
// treesOfSize
// }
// def constructEnumeratorOtherTypeMemoizedBlackHeight = {
// import RedBlackTreeWithOperations._
//
// val colorsProducer = new WrapFunctionFin(
// (set: Set[Boolean]) => { new WrapArray(set.toArray) })
//
// val treesOfSize = new WrapFunctionFin(
// (self: MemberDependFinite[(Int, Range, Set[Boolean], Int), Tree], pair: (Int, Range, Set[Boolean], Int)) => {
// val (size, range, colors, blackHeight) = pair
//
// if (range.size >= size && range.size < 0 || blackHeight < 0) new Empty: MemberFinite[Tree]
// else if (size == 0 && blackHeight == 1 && colors.contains(true)) new Singleton(Leaf): MemberFinite[Tree]
// else if (size > 0 && blackHeight >= 1) {
// val roots = new WrapRange(range)
// val leftSizes = new WrapArray(0 until size toArray)
// val rootColors = colorsProducer(colors)
//
// val rootLeftSizePairs = new member.ProductFinite(leftSizes, roots)
// val rootLeftSizeColorTuples = new member.ProductFinite(rootLeftSizePairs, rootColors)
//
// val leftTrees = new InMapFin(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (leftSize, range.start to (median - 1), childColors, childBlackHeight)
// })
//
// val rightTrees = new InMapFin(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
// })
//
// val leftRightPairs =
// Product(leftTrees, rightTrees)
//
// val allNodes = new ChainFinite(rootLeftSizeColorTuples, leftRightPairs)
//
// val makeTree =
// (p: (((Int, Int), Boolean), (Tree, Tree))) => {
// val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = p
//
// assert(!(size >= 2 && leftSize == 0 && size - leftSize - 1 == 0))
// assert(!(size >= 2 && leftTree == Leaf && rightTree == Leaf))
// assert(!(leftSize > 0 && leftTree == Leaf), "leftSize=%d, leftTree=Leaf".format(leftSize))
// Node(rootColor, leftTree, currRoot, rightTree)
// }
//
// val invertTree = {
// (p: Tree) =>
// {
// val Node(rootColor, leftTree, currRoot, rightTree) = p.asInstanceOf[Node]
//
// (((RedBlackTrees.size(leftTree), currRoot), rootColor), (leftTree, rightTree))
// }
// }
//
// new Map[(((Int, Int), Boolean), (Tree, Tree)), Tree](allNodes, makeTree, invertTree) with MemberFinite[Tree] with e.memoization.Memoized[Tree] with Memoized[Tree]: MemberFinite[Tree]
// } else new Empty: MemberFinite[Tree]
// }) with e.memoization.dependent.Memoized[(Int, Range, Set[Boolean], Int), Tree]
//
// treesOfSize
// }
//
// def constructEnumeratorNormal = {
// import e.dependent._
//
// val colorsProducer = Depend(
// (set: Set[Boolean]) => { e.WrapArray(set.toArray) })
//
// val treesOfSize: Depend[(Int, Range, Set[Boolean], Int), Tree] = Depend(
// (self: Depend[(Int, Range, Set[Boolean], Int), Tree], pair: (Int, Range, Set[Boolean], Int)) => {
// val (size, range, colors, blackHeight) = pair
//
// if (range.size >= size && range.size < 0 || blackHeight < 0) e.Empty
// else if (size == 0 && blackHeight == 1 && colors.contains(true)) e.Singleton(Leaf)
// // else if (size == 1 && blackHeight == 1 && colors.contains(false)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, false) })
// // else if (size == 1 && blackHeight == 2 && colors.contains(true)) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
// // else if (size == 1) e.WrapArray(range map { v => Node(Leaf, v, Leaf, true) })
// else if (size > 0 && blackHeight >= 1) {
// val roots = e.Enum(range)
// val leftSizes = e.WrapArray(0 until size)
// val rootColors = colorsProducer(colors)
//
// val rootLeftSizePairs = e.Product(leftSizes, roots)
// val rootLeftSizeColorTuples = e.Product(rootLeftSizePairs, rootColors)
//
// val leftTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (leftSize, range.start to (median - 1), childColors, childBlackHeight)
// })
//
// val rightTrees: Depend[((Int, Int), Boolean), Tree] = InMap(self, { (par: ((Int, Int), Boolean)) =>
// val ((leftSize, median), rootColor) = par
// val childColors = if (rootColor) Set(true, false) else Set(true)
// val childBlackHeight = if (rootColor) blackHeight - 1 else blackHeight
// (size - leftSize - 1, (median + 1) to range.end, childColors, childBlackHeight)
// })
//
// val leftRightPairs: Depend[((Int, Int), Boolean), (Tree, Tree)] =
// Product(leftTrees, rightTrees)
//
// val allNodes =
// Chain[((Int, Int), Boolean), (Tree, Tree), Node](rootLeftSizeColorTuples, leftRightPairs,
// (p1: ((Int, Int), Boolean), p2: (Tree, Tree)) => {
// val (((leftSize, currRoot), rootColor), (leftTree, rightTree)) = (p1, p2)
//
// assert(!(size >= 2 && leftSize == 0 && size - leftSize - 1 == 0))
// assert(!(size >= 2 && leftTree == Leaf && rightTree == Leaf))
// assert(!(leftSize > 0 && leftTree == Leaf), "leftSize=%d, leftTree=Leaf".format(leftSize))
// Node(leftTree, currRoot, rightTree, rootColor)
// })
//
// allNodes
// } else e.Empty
// })
//
// treesOfSize
// }
}
| kaptoxic/SciFe | src/test/scala/scife/enumeration/common/enumdef/RedBlackTreeEnum.scala | Scala | gpl-2.0 | 26,515 |
package inloopio.math.algebra
trait VectorIterable extends Iterable[MatrixSlice] {
def iterateAll: Iterator[MatrixSlice]
def numSlices: Int
def numRows: Int
def numCols: Int
/**
* Return a new vector with cardinality equal to getNumRows() of this matrix which is the matrix product of the
* recipient and the argument
*
* @param v a vector with cardinality equal to getNumCols() of the recipient
* @return a new vector (typically a DenseVector)
* @throws CardinalityException if this.getNumRows() != v.size()
*/
def times(v: Vector): Vector
/**
* Convenience method for producing this.transpose().times(this.times(v)), which can be implemented with only one pass
* over the matrix, without making the transpose() call (which can be expensive if the matrix is sparse)
*
* @param v a vector with cardinality equal to getNumCols() of the recipient
* @return a new vector (typically a DenseVector) with cardinality equal to that of the argument.
* @throws CardinalityException if this.getNumCols() != v.size()
*/
def timesSquared(v: Vector): Vector
}
| dcaoyuan/inloopio-libs | inloopio-math/src/main/scala/inloopio/math/algebra/VectorIterable.scala | Scala | bsd-3-clause | 1,113 |
package net.ruippeixotog.structs
class SkewBinomialQueueSpec extends PriorityQueueSpec[SkewBinomialQueue] {
def queueName = "skew binomial queue"
}
| ruippeixotog/functional-brodal-queues | src/test/scala/net/ruippeixotog/structs/SkewBinomialQueueSpec.scala | Scala | mit | 151 |
/*
* Copyright 2010-2011 Vilius Normantas <code@norma.lt>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
import lt.norma.crossbow.core._
import org.scalatest.FunSuite
class VariableSignalTest extends FunSuite {
test("Variable signal") {
val s = new VariableSignal
expect("Variable Signal") {
s.name
}
expect(Set.empty) {
s.dependencies
}
assert {
s.isFlat
}
s.set(Direction.Long)
assert {
s.isLong
}
s.set(Direction.Short)
assert {
s.isShort
}
s.unset()
assert {
s.isFlat
}
}
}
| ViliusN/Crossbow | crossbow-core/test/lt/norma/crossbow/indicators/VariableSignalTest.scala | Scala | gpl-3.0 | 1,257 |
package com.twitter.finagle
import _root_.java.net.{InetSocketAddress, SocketAddress}
import com.twitter.concurrent.Broker
import com.twitter.conversions.time._
import com.twitter.finagle.client._
import com.twitter.finagle.dispatch.{SerialServerDispatcher, PipeliningDispatcher}
import com.twitter.finagle.memcached.protocol.text.{
MemcachedClientPipelineFactory, MemcachedServerPipelineFactory}
import com.twitter.finagle.memcached.protocol.{Command, Response}
import com.twitter.finagle.memcached.{Client => MClient, Server => MServer, _}
import com.twitter.finagle.netty3._
import com.twitter.finagle.pool.ReusingPool
import com.twitter.finagle.server._
import com.twitter.finagle.stats.{ClientStatsReceiver, StatsReceiver}
import com.twitter.finagle.util.DefaultTimer
import com.twitter.hashing.KeyHasher
import com.twitter.util.Duration
trait MemcachedRichClient { self: Client[Command, Response] =>
def newRichClient(group: Group[SocketAddress]): memcached.Client = memcached.Client(newClient(group).toService)
def newRichClient(group: String): memcached.Client = memcached.Client(newClient(group).toService)
}
trait MemcachedKetamaClient {
def newKetamaClient(group: String, keyHasher: KeyHasher = KeyHasher.KETAMA, useFailureAccrual: Boolean = true): memcached.Client = {
newKetamaClient(Resolver.resolve(group)(), keyHasher, useFailureAccrual)
}
def newKetamaClient(
group: Group[SocketAddress],
keyHasher: KeyHasher,
ejectFailedHost: Boolean
): memcached.Client = {
val cacheNodes = group collect {
case node: CacheNode => node
case addr: InetSocketAddress => new CacheNode(addr.getHostName, addr.getPort, 1)
}
val faParams =
if (ejectFailedHost) MemcachedFailureAccrualClient.DefaultFailureAccrualParams
else (Int.MaxValue, Duration.Zero)
new KetamaClient(
cacheNodes,
keyHasher,
KetamaClient.DefaultNumReps,
faParams,
None,
ClientStatsReceiver.scope("memcached_client")
)
}
}
object MemcachedTransporter extends Netty3Transporter[Command, Response](
"memcached", MemcachedClientPipelineFactory)
object MemcachedClient extends DefaultClient[Command, Response](
name = "memcached",
endpointer = Bridge[Command, Response, Command, Response](
MemcachedTransporter, new PipeliningDispatcher(_)),
pool = (sr: StatsReceiver) => new ReusingPool(_, sr)
) with MemcachedRichClient with MemcachedKetamaClient
private[finagle] object MemcachedFailureAccrualClient {
val DefaultFailureAccrualParams = (5, 30.seconds)
def apply(
key: KetamaClientKey,
broker: Broker[NodeHealth],
failureAccrualParams: (Int, Duration) = DefaultFailureAccrualParams
): Client[Command, Response] with MemcachedRichClient = {
new MemcachedFailureAccrualClient(key, broker, failureAccrualParams)
}
}
private[finagle] class MemcachedFailureAccrualClient(
key: KetamaClientKey,
broker: Broker[NodeHealth],
failureAccrualParams: (Int, Duration)
) extends DefaultClient[Command, Response](
name = "memcached",
endpointer = Bridge[Command, Response, Command, Response](
MemcachedTransporter, new PipeliningDispatcher(_)),
pool = (sr: StatsReceiver) => new ReusingPool(_, sr),
failureAccrual = {
new KetamaFailureAccrualFactory(
_,
failureAccrualParams._1,
failureAccrualParams._2,
DefaultTimer.twitter, key, broker)
}
) with MemcachedRichClient
object MemcachedListener extends Netty3Listener[Response, Command](
"memcached", MemcachedServerPipelineFactory)
object MemcachedServer extends DefaultServer[Command, Response, Response, Command](
"memcached", MemcachedListener, new SerialServerDispatcher(_, _)
)
object Memcached extends Client[Command, Response] with MemcachedRichClient with MemcachedKetamaClient with Server[Command, Response] {
def newClient(group: Group[SocketAddress]): ServiceFactory[Command, Response] =
MemcachedClient.newClient(group)
def serve(addr: SocketAddress, service: ServiceFactory[Command, Response]): ListeningServer =
MemcachedServer.serve(addr, service)
}
| stevegury/finagle | finagle-memcached/src/main/scala/com/twitter/finagle/Memcached.scala | Scala | apache-2.0 | 4,100 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream, File, IOException}
import java.security.PrivilegedExceptionAction
import java.text.DateFormat
import java.util.{Arrays, Date, Locale}
import scala.collection.JavaConverters._
import scala.collection.immutable.Map
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util.control.NonFatal
import com.google.common.primitives.Longs
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.{Token, TokenIdentifier}
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.BUFFER_SIZE
import org.apache.spark.util.Utils
/**
* Contains util methods to interact with Hadoop from Spark.
*/
private[spark] class SparkHadoopUtil extends Logging {
private val sparkConf = new SparkConf(false).loadFromSystemProperties(true)
val conf: Configuration = newConfiguration(sparkConf)
UserGroupInformation.setConfiguration(conf)
/**
* Runs the given function with a Hadoop UserGroupInformation as a thread local variable
* (distributed to child threads), used for authenticating HDFS and YARN calls.
*
* IMPORTANT NOTE: If this function is going to be called repeated in the same process
* you need to look https://issues.apache.org/jira/browse/HDFS-3545 and possibly
* do a FileSystem.closeAllForUGI in order to avoid leaking Filesystems
*/
def runAsSparkUser(func: () => Unit): Unit = {
createSparkUser().doAs(new PrivilegedExceptionAction[Unit] {
def run: Unit = func()
})
}
def createSparkUser(): UserGroupInformation = {
val user = Utils.getCurrentUserName()
logDebug("creating UGI for user: " + user)
val ugi = UserGroupInformation.createRemoteUser(user)
transferCredentials(UserGroupInformation.getCurrentUser(), ugi)
ugi
}
def transferCredentials(source: UserGroupInformation, dest: UserGroupInformation): Unit = {
dest.addCredentials(source.getCredentials())
}
/**
* Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
* configuration.
*/
def appendS3AndSparkHadoopHiveConfigurations(
conf: SparkConf,
hadoopConf: Configuration): Unit = {
SparkHadoopUtil.appendS3AndSparkHadoopHiveConfigurations(conf, hadoopConf)
}
/**
* Appends spark.hadoop.* configurations from a [[SparkConf]] to a Hadoop
* configuration without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
SparkHadoopUtil.appendSparkHadoopConfigs(conf, hadoopConf)
}
/**
* Appends spark.hadoop.* configurations from a Map to another without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(
srcMap: Map[String, String],
destMap: HashMap[String, String]): Unit = {
// Copy any "spark.hadoop.foo=bar" system properties into destMap as "foo=bar"
for ((key, value) <- srcMap if key.startsWith("spark.hadoop.")) {
destMap.put(key.substring("spark.hadoop.".length), value)
}
}
def appendSparkHiveConfigs(
srcMap: Map[String, String],
destMap: HashMap[String, String]): Unit = {
// Copy any "spark.hive.foo=bar" system properties into destMap as "hive.foo=bar"
for ((key, value) <- srcMap if key.startsWith("spark.hive.")) {
destMap.put(key.substring("spark.".length), value)
}
}
/**
* Return an appropriate (subclass) of Configuration. Creating config can initialize some Hadoop
* subsystems.
*/
def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = SparkHadoopUtil.newConfiguration(conf)
hadoopConf.addResource(SparkHadoopUtil.SPARK_HADOOP_CONF_FILE)
hadoopConf
}
/**
* Add any user credentials to the job conf which are necessary for running on a secure Hadoop
* cluster.
*/
def addCredentials(conf: JobConf): Unit = {
val jobCreds = conf.getCredentials()
jobCreds.mergeAll(UserGroupInformation.getCurrentUser().getCredentials())
}
def addCurrentUserCredentials(creds: Credentials): Unit = {
UserGroupInformation.getCurrentUser.addCredentials(creds)
}
def loginUserFromKeytab(principalName: String, keytabFilename: String): Unit = {
if (!new File(keytabFilename).exists()) {
throw new SparkException(s"Keytab file: ${keytabFilename} does not exist")
} else {
logInfo("Attempting to login to Kerberos " +
s"using principal: ${principalName} and keytab: ${keytabFilename}")
UserGroupInformation.loginUserFromKeytab(principalName, keytabFilename)
}
}
/**
* Add or overwrite current user's credentials with serialized delegation tokens,
* also confirms correct hadoop configuration is set.
*/
private[spark] def addDelegationTokens(tokens: Array[Byte], sparkConf: SparkConf): Unit = {
UserGroupInformation.setConfiguration(newConfiguration(sparkConf))
val creds = deserialize(tokens)
logInfo("Updating delegation tokens for current user.")
logDebug(s"Adding/updating delegation tokens ${dumpTokens(creds)}")
addCurrentUserCredentials(creds)
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes read. If
* getFSBytesReadOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes read on r since t.
*/
private[spark] def getFSBytesReadOnThreadCallback(): () => Long = {
val f = () => FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics.getBytesRead).sum
val baseline = (Thread.currentThread().getId, f())
/**
* This function may be called in both spawned child threads and parent task thread (in
* PythonRDD), and Hadoop FileSystem uses thread local variables to track the statistics.
* So we need a map to track the bytes read from the child threads and parent thread,
* summing them together to get the bytes read of this task.
*/
new Function0[Long] {
private val bytesReadMap = new mutable.HashMap[Long, Long]()
override def apply(): Long = {
bytesReadMap.synchronized {
bytesReadMap.put(Thread.currentThread().getId, f())
bytesReadMap.map { case (k, v) =>
v - (if (k == baseline._1) baseline._2 else 0)
}.sum
}
}
}
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes written. If
* getFSBytesWrittenOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes written on r since t.
*
* @return None if the required method can't be found.
*/
private[spark] def getFSBytesWrittenOnThreadCallback(): () => Long = {
val threadStats = FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics)
val f = () => threadStats.map(_.getBytesWritten).sum
val baselineBytesWritten = f()
() => f() - baselineBytesWritten
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafStatuses(fs, fs.getFileStatus(basePath))
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, leaves) = fs.listStatus(status.getPath).partition(_.isDirectory)
leaves ++ directories.flatMap(f => listLeafStatuses(fs, f))
}
if (baseStatus.isDirectory) recurse(baseStatus) else Seq(baseStatus)
}
def listLeafDirStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafDirStatuses(fs, fs.getFileStatus(basePath))
}
def listLeafDirStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, files) = fs.listStatus(status.getPath).partition(_.isDirectory)
val leaves = if (directories.isEmpty) Seq(status) else Seq.empty[FileStatus]
leaves ++ directories.flatMap(dir => listLeafDirStatuses(fs, dir))
}
assert(baseStatus.isDirectory)
recurse(baseStatus)
}
def isGlobPath(pattern: Path): Boolean = {
pattern.toString.exists("{}[]*?\\\\".toSet.contains)
}
def globPath(pattern: Path): Seq[Path] = {
val fs = pattern.getFileSystem(conf)
globPath(fs, pattern)
}
def globPath(fs: FileSystem, pattern: Path): Seq[Path] = {
Option(fs.globStatus(pattern)).map { statuses =>
statuses.map(_.getPath.makeQualified(fs.getUri, fs.getWorkingDirectory)).toSeq
}.getOrElse(Seq.empty[Path])
}
def globPathIfNecessary(pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(pattern) else Seq(pattern)
}
def globPathIfNecessary(fs: FileSystem, pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(fs, pattern) else Seq(pattern)
}
/**
* Lists all the files in a directory with the specified prefix, and does not end with the
* given suffix. The returned {{FileStatus}} instances are sorted by the modification times of
* the respective files.
*/
def listFilesSorted(
remoteFs: FileSystem,
dir: Path,
prefix: String,
exclusionSuffix: String): Array[FileStatus] = {
try {
val fileStatuses = remoteFs.listStatus(dir,
new PathFilter {
override def accept(path: Path): Boolean = {
val name = path.getName
name.startsWith(prefix) && !name.endsWith(exclusionSuffix)
}
})
Arrays.sort(fileStatuses, (o1: FileStatus, o2: FileStatus) =>
Longs.compare(o1.getModificationTime, o2.getModificationTime))
fileStatuses
} catch {
case NonFatal(e) =>
logWarning("Error while attempting to list files from application staging dir", e)
Array.empty
}
}
private[spark] def getSuffixForCredentialsPath(credentialsPath: Path): Int = {
val fileName = credentialsPath.getName
fileName.substring(
fileName.lastIndexOf(SparkHadoopUtil.SPARK_YARN_CREDS_COUNTER_DELIM) + 1).toInt
}
private val HADOOP_CONF_PATTERN = "(\\\\$\\\\{hadoopconf-[^\\\\}\\\\$\\\\s]+\\\\})".r.unanchored
/**
* Substitute variables by looking them up in Hadoop configs. Only variables that match the
* ${hadoopconf- .. } pattern are substituted.
*/
def substituteHadoopVariables(text: String, hadoopConf: Configuration): String = {
text match {
case HADOOP_CONF_PATTERN(matched) =>
logDebug(text + " matched " + HADOOP_CONF_PATTERN)
val key = matched.substring(13, matched.length() - 1) // remove ${hadoopconf- .. }
val eval = Option[String](hadoopConf.get(key))
.map { value =>
logDebug("Substituted " + matched + " with " + value)
text.replace(matched, value)
}
if (eval.isEmpty) {
// The variable was not found in Hadoop configs, so return text as is.
text
} else {
// Continue to substitute more variables.
substituteHadoopVariables(eval.get, hadoopConf)
}
case _ =>
logDebug(text + " didn't match " + HADOOP_CONF_PATTERN)
text
}
}
/**
* Dump the credentials' tokens to string values.
*
* @param credentials credentials
* @return an iterator over the string values. If no credentials are passed in: an empty list
*/
private[spark] def dumpTokens(credentials: Credentials): Iterable[String] = {
if (credentials != null) {
credentials.getAllTokens.asScala.map(tokenToString)
} else {
Seq.empty
}
}
/**
* Convert a token to a string for logging.
* If its an abstract delegation token, attempt to unmarshall it and then
* print more details, including timestamps in human-readable form.
*
* @param token token to convert to a string
* @return a printable string value.
*/
private[spark] def tokenToString(token: Token[_ <: TokenIdentifier]): String = {
val df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT, Locale.US)
val buffer = new StringBuilder(128)
buffer.append(token.toString)
try {
val ti = token.decodeIdentifier
buffer.append("; ").append(ti)
ti match {
case dt: AbstractDelegationTokenIdentifier =>
// include human times and the renewer, which the HDFS tokens toString omits
buffer.append("; Renewer: ").append(dt.getRenewer)
buffer.append("; Issued: ").append(df.format(new Date(dt.getIssueDate)))
buffer.append("; Max Date: ").append(df.format(new Date(dt.getMaxDate)))
case _ =>
}
} catch {
case e: IOException =>
logDebug(s"Failed to decode $token: $e", e)
}
buffer.toString
}
def serialize(creds: Credentials): Array[Byte] = {
val byteStream = new ByteArrayOutputStream
val dataStream = new DataOutputStream(byteStream)
creds.writeTokenStorageToStream(dataStream)
byteStream.toByteArray
}
def deserialize(tokenBytes: Array[Byte]): Credentials = {
val tokensBuf = new ByteArrayInputStream(tokenBytes)
val creds = new Credentials()
creds.readTokenStorageStream(new DataInputStream(tokensBuf))
creds
}
def isProxyUser(ugi: UserGroupInformation): Boolean = {
ugi.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.PROXY
}
}
private[spark] object SparkHadoopUtil {
private lazy val instance = new SparkHadoopUtil
val SPARK_YARN_CREDS_TEMP_EXTENSION = ".tmp"
val SPARK_YARN_CREDS_COUNTER_DELIM = "-"
/**
* Number of records to update input metrics when reading from HadoopRDDs.
*
* Each update is potentially expensive because we need to use reflection to access the
* Hadoop FileSystem API of interest (only available in 2.5), so we should do this sparingly.
*/
private[spark] val UPDATE_INPUT_METRICS_INTERVAL_RECORDS = 1000
/**
* Name of the file containing the gateway's Hadoop configuration, to be overlayed on top of the
* cluster's Hadoop config. It is up to the Spark code launching the application to create
* this file if it's desired. If the file doesn't exist, it will just be ignored.
*/
private[spark] val SPARK_HADOOP_CONF_FILE = "__spark_hadoop_conf__.xml"
def get: SparkHadoopUtil = instance
/**
* Returns a Configuration object with Spark configuration applied on top. Unlike
* the instance method, this will always return a Configuration instance, and not a
* cluster manager-specific type.
*/
private[spark] def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = new Configuration()
appendS3AndSparkHadoopHiveConfigurations(conf, hadoopConf)
hadoopConf
}
private def appendS3AndSparkHadoopHiveConfigurations(
conf: SparkConf,
hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
if (conf != null) {
// Explicitly check for S3 environment variables
val keyId = System.getenv("AWS_ACCESS_KEY_ID")
val accessKey = System.getenv("AWS_SECRET_ACCESS_KEY")
if (keyId != null && accessKey != null) {
hadoopConf.set("fs.s3.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3n.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3a.access.key", keyId)
hadoopConf.set("fs.s3.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3n.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3a.secret.key", accessKey)
val sessionToken = System.getenv("AWS_SESSION_TOKEN")
if (sessionToken != null) {
hadoopConf.set("fs.s3a.session.token", sessionToken)
}
}
appendSparkHadoopConfigs(conf, hadoopConf)
appendSparkHiveConfigs(conf, hadoopConf)
val bufferSize = conf.get(BUFFER_SIZE).toString
hadoopConf.set("io.file.buffer.size", bufferSize)
}
}
private def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Copy any "spark.hadoop.foo=bar" spark properties into conf as "foo=bar"
for ((key, value) <- conf.getAll if key.startsWith("spark.hadoop.")) {
hadoopConf.set(key.substring("spark.hadoop.".length), value)
}
}
private def appendSparkHiveConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Copy any "spark.hive.foo=bar" spark properties into conf as "hive.foo=bar"
for ((key, value) <- conf.getAll if key.startsWith("spark.hive.")) {
hadoopConf.set(key.substring("spark.".length), value)
}
}
// scalastyle:off line.size.limit
/**
* Create a file on the given file system, optionally making sure erasure coding is disabled.
*
* Disabling EC can be helpful as HDFS EC doesn't support hflush(), hsync(), or append().
* https://hadoop.apache.org/docs/r3.0.0/hadoop-project-dist/hadoop-hdfs/HDFSErasureCoding.html#Limitations
*/
// scalastyle:on line.size.limit
def createFile(fs: FileSystem, path: Path, allowEC: Boolean): FSDataOutputStream = {
if (allowEC) {
fs.create(path)
} else {
try {
// Use reflection as this uses APIs only available in Hadoop 3
val builderMethod = fs.getClass().getMethod("createFile", classOf[Path])
// the builder api does not resolve relative paths, nor does it create parent dirs, while
// the old api does.
if (!fs.mkdirs(path.getParent())) {
throw new IOException(s"Failed to create parents of $path")
}
val qualifiedPath = fs.makeQualified(path)
val builder = builderMethod.invoke(fs, qualifiedPath)
val builderCls = builder.getClass()
// this may throw a NoSuchMethodException if the path is not on hdfs
val replicateMethod = builderCls.getMethod("replicate")
val buildMethod = builderCls.getMethod("build")
val b2 = replicateMethod.invoke(builder)
buildMethod.invoke(b2).asInstanceOf[FSDataOutputStream]
} catch {
case _: NoSuchMethodException =>
// No createFile() method, we're using an older hdfs client, which doesn't give us control
// over EC vs. replication. Older hdfs doesn't have EC anyway, so just create a file with
// old apis.
fs.create(path)
}
}
}
}
| ConeyLiu/spark | core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala | Scala | apache-2.0 | 19,854 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io._
import java.lang.ref.{ReferenceQueue => JReferenceQueue, WeakReference}
import java.nio.ByteBuffer
import java.nio.channels.Channels
import java.util.Collections
import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, TimeUnit}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.{Failure, Random, Success, Try}
import scala.util.control.NonFatal
import com.codahale.metrics.{MetricRegistry, MetricSet}
import com.google.common.cache.CacheBuilder
import org.apache.commons.io.IOUtils
import org.apache.spark._
import org.apache.spark.executor.DataReadMethod
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.internal.config.Network
import org.apache.spark.memory.{MemoryManager, MemoryMode}
import org.apache.spark.metrics.source.Source
import org.apache.spark.network._
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.client.StreamCallbackWithID
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.shuffle._
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo
import org.apache.spark.network.util.TransportConf
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.serializer.{SerializerInstance, SerializerManager}
import org.apache.spark.shuffle.{MigratableResolver, ShuffleManager, ShuffleWriteMetricsReporter}
import org.apache.spark.storage.BlockManagerMessages.{DecommissionBlockManager, ReplicateBlock}
import org.apache.spark.storage.memory._
import org.apache.spark.unsafe.Platform
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
/* Class for returning a fetched block and associated metrics. */
private[spark] class BlockResult(
val data: Iterator[Any],
val readMethod: DataReadMethod.Value,
val bytes: Long)
/**
* Abstracts away how blocks are stored and provides different ways to read the underlying block
* data. Callers should call [[dispose()]] when they're done with the block.
*/
private[spark] trait BlockData {
def toInputStream(): InputStream
/**
* Returns a Netty-friendly wrapper for the block's data.
*
* Please see `ManagedBuffer.convertToNetty()` for more details.
*/
def toNetty(): Object
def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer
def toByteBuffer(): ByteBuffer
def size: Long
def dispose(): Unit
}
private[spark] class ByteBufferBlockData(
val buffer: ChunkedByteBuffer,
val shouldDispose: Boolean) extends BlockData {
override def toInputStream(): InputStream = buffer.toInputStream(dispose = false)
override def toNetty(): Object = buffer.toNetty
override def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer = {
buffer.copy(allocator)
}
override def toByteBuffer(): ByteBuffer = buffer.toByteBuffer
override def size: Long = buffer.size
override def dispose(): Unit = {
if (shouldDispose) {
buffer.dispose()
}
}
}
private[spark] class HostLocalDirManager(
futureExecutionContext: ExecutionContext,
cacheSize: Int,
blockStoreClient: BlockStoreClient) extends Logging {
private val executorIdToLocalDirsCache =
CacheBuilder
.newBuilder()
.maximumSize(cacheSize)
.build[String, Array[String]]()
private[spark] def getCachedHostLocalDirs: Map[String, Array[String]] =
executorIdToLocalDirsCache.synchronized {
executorIdToLocalDirsCache.asMap().asScala.toMap
}
private[spark] def getCachedHostLocalDirsFor(executorId: String): Option[Array[String]] =
executorIdToLocalDirsCache.synchronized {
Option(executorIdToLocalDirsCache.getIfPresent(executorId))
}
private[spark] def getHostLocalDirs(
host: String,
port: Int,
executorIds: Array[String])(
callback: Try[Map[String, Array[String]]] => Unit): Unit = {
val hostLocalDirsCompletable = new CompletableFuture[java.util.Map[String, Array[String]]]
blockStoreClient.getHostLocalDirs(
host,
port,
executorIds,
hostLocalDirsCompletable)
hostLocalDirsCompletable.whenComplete { (hostLocalDirs, throwable) =>
if (hostLocalDirs != null) {
callback(Success(hostLocalDirs.asScala.toMap))
executorIdToLocalDirsCache.synchronized {
executorIdToLocalDirsCache.putAll(hostLocalDirs)
}
} else {
callback(Failure(throwable))
}
}
}
}
/**
* Manager running on every node (driver and executors) which provides interfaces for putting and
* retrieving blocks both locally and remotely into various stores (memory, disk, and off-heap).
*
* Note that [[initialize()]] must be called before the BlockManager is usable.
*/
private[spark] class BlockManager(
val executorId: String,
rpcEnv: RpcEnv,
val master: BlockManagerMaster,
val serializerManager: SerializerManager,
val conf: SparkConf,
memoryManager: MemoryManager,
mapOutputTracker: MapOutputTracker,
shuffleManager: ShuffleManager,
val blockTransferService: BlockTransferService,
securityManager: SecurityManager,
externalBlockStoreClient: Option[ExternalBlockStoreClient])
extends BlockDataManager with BlockEvictionHandler with Logging {
// same as `conf.get(config.SHUFFLE_SERVICE_ENABLED)`
private[spark] val externalShuffleServiceEnabled: Boolean = externalBlockStoreClient.isDefined
private val remoteReadNioBufferConversion =
conf.get(Network.NETWORK_REMOTE_READ_NIO_BUFFER_CONVERSION)
private[spark] val subDirsPerLocalDir = conf.get(config.DISKSTORE_SUB_DIRECTORIES)
val diskBlockManager = {
// Only perform cleanup if an external service is not serving our shuffle files.
val deleteFilesOnStop =
!externalShuffleServiceEnabled || executorId == SparkContext.DRIVER_IDENTIFIER
new DiskBlockManager(conf, deleteFilesOnStop)
}
// Visible for testing
private[storage] val blockInfoManager = new BlockInfoManager
private val futureExecutionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("block-manager-future", 128))
// Actual storage of where blocks are kept
private[spark] val memoryStore =
new MemoryStore(conf, blockInfoManager, serializerManager, memoryManager, this)
private[spark] val diskStore = new DiskStore(conf, diskBlockManager, securityManager)
memoryManager.setMemoryStore(memoryStore)
// Note: depending on the memory manager, `maxMemory` may actually vary over time.
// However, since we use this only for reporting and logging, what we actually want here is
// the absolute maximum value that `maxMemory` can ever possibly reach. We may need
// to revisit whether reporting this value as the "max" is intuitive to the user.
private val maxOnHeapMemory = memoryManager.maxOnHeapStorageMemory
private val maxOffHeapMemory = memoryManager.maxOffHeapStorageMemory
private[spark] val externalShuffleServicePort = StorageUtils.externalShuffleServicePort(conf)
var blockManagerId: BlockManagerId = _
// Address of the server that serves this executor's shuffle files. This is either an external
// service, or just our own Executor's BlockManager.
private[spark] var shuffleServerId: BlockManagerId = _
// Client to read other executors' blocks. This is either an external service, or just the
// standard BlockTransferService to directly connect to other Executors.
private[spark] val blockStoreClient = externalBlockStoreClient.getOrElse(blockTransferService)
// Max number of failures before this block manager refreshes the block locations from the driver
private val maxFailuresBeforeLocationRefresh =
conf.get(config.BLOCK_FAILURES_BEFORE_LOCATION_REFRESH)
private val storageEndpoint = rpcEnv.setupEndpoint(
"BlockManagerEndpoint" + BlockManager.ID_GENERATOR.next,
new BlockManagerStorageEndpoint(rpcEnv, this, mapOutputTracker))
// Pending re-registration action being executed asynchronously or null if none is pending.
// Accesses should synchronize on asyncReregisterLock.
private var asyncReregisterTask: Future[Unit] = null
private val asyncReregisterLock = new Object
// Field related to peer block managers that are necessary for block replication
@volatile private var cachedPeers: Seq[BlockManagerId] = _
private val peerFetchLock = new Object
private var lastPeerFetchTimeNs = 0L
private var blockReplicationPolicy: BlockReplicationPolicy = _
// visible for test
// This is volatile since if it's defined we should not accept remote blocks.
@volatile private[spark] var decommissioner: Option[BlockManagerDecommissioner] = None
// A DownloadFileManager used to track all the files of remote blocks which are above the
// specified memory threshold. Files will be deleted automatically based on weak reference.
// Exposed for test
private[storage] val remoteBlockTempFileManager =
new BlockManager.RemoteBlockDownloadFileManager(this)
private val maxRemoteBlockToMem = conf.get(config.MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM)
var hostLocalDirManager: Option[HostLocalDirManager] = None
@inline final private def isDecommissioning() = {
decommissioner.isDefined
}
@inline final private def checkShouldStore(blockId: BlockId) = {
// Don't reject broadcast blocks since they may be stored during task exec and
// don't need to be migrated.
if (isDecommissioning() && !blockId.isBroadcast) {
throw new BlockSavedOnDecommissionedBlockManagerException(blockId)
}
}
// This is a lazy val so someone can migrating RDDs even if they don't have a MigratableResolver
// for shuffles. Used in BlockManagerDecommissioner & block puts.
private[storage] lazy val migratableResolver: MigratableResolver = {
shuffleManager.shuffleBlockResolver.asInstanceOf[MigratableResolver]
}
override def getLocalDiskDirs: Array[String] = diskBlockManager.localDirsString
/**
* Abstraction for storing blocks from bytes, whether they start in memory or on disk.
*
* @param blockSize the decrypted size of the block
*/
private[spark] abstract class BlockStoreUpdater[T](
blockSize: Long,
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
tellMaster: Boolean,
keepReadLock: Boolean) {
/**
* Reads the block content into the memory. If the update of the block store is based on a
* temporary file this could lead to loading the whole file into a ChunkedByteBuffer.
*/
protected def readToByteBuffer(): ChunkedByteBuffer
protected def blockData(): BlockData
protected def saveToDiskStore(): Unit
private def saveDeserializedValuesToMemoryStore(inputStream: InputStream): Boolean = {
try {
val values = serializerManager.dataDeserializeStream(blockId, inputStream)(classTag)
memoryStore.putIteratorAsValues(blockId, values, level.memoryMode, classTag) match {
case Right(_) => true
case Left(iter) =>
// If putting deserialized values in memory failed, we will put the bytes directly
// to disk, so we don't need this iterator and can close it to free resources
// earlier.
iter.close()
false
}
} finally {
IOUtils.closeQuietly(inputStream)
}
}
private def saveSerializedValuesToMemoryStore(bytes: ChunkedByteBuffer): Boolean = {
val memoryMode = level.memoryMode
memoryStore.putBytes(blockId, blockSize, memoryMode, () => {
if (memoryMode == MemoryMode.OFF_HEAP && bytes.chunks.exists(!_.isDirect)) {
bytes.copy(Platform.allocateDirectBuffer)
} else {
bytes
}
})
}
/**
* Put the given data according to the given level in one of the block stores, replicating
* the values if necessary.
*
* If the block already exists, this method will not overwrite it.
*
* If keepReadLock is true, this method will hold the read lock when it returns (even if the
* block already exists). If false, this method will hold no locks when it returns.
*
* @return true if the block was already present or if the put succeeded, false otherwise.
*/
def save(): Boolean = {
doPut(blockId, level, classTag, tellMaster, keepReadLock) { info =>
val startTimeNs = System.nanoTime()
// Since we're storing bytes, initiate the replication before storing them locally.
// This is faster as data is already serialized and ready to send.
val replicationFuture = if (level.replication > 1) {
Future {
// This is a blocking action and should run in futureExecutionContext which is a cached
// thread pool.
replicate(blockId, blockData(), level, classTag)
}(futureExecutionContext)
} else {
null
}
if (level.useMemory) {
// Put it in memory first, even if it also has useDisk set to true;
// We will drop it to disk later if the memory store can't hold it.
val putSucceeded = if (level.deserialized) {
saveDeserializedValuesToMemoryStore(blockData().toInputStream())
} else {
saveSerializedValuesToMemoryStore(readToByteBuffer())
}
if (!putSucceeded && level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
saveToDiskStore()
}
} else if (level.useDisk) {
saveToDiskStore()
}
val putBlockStatus = getCurrentBlockStatus(blockId, info)
val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid
if (blockWasSuccessfullyStored) {
// Now that the block is in either the memory or disk store,
// tell the master about it.
info.size = blockSize
if (tellMaster && info.tellMaster) {
reportBlockStatus(blockId, putBlockStatus)
}
addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus)
}
logDebug(s"Put block ${blockId} locally took ${Utils.getUsedTimeNs(startTimeNs)}")
if (level.replication > 1) {
// Wait for asynchronous replication to finish
try {
ThreadUtils.awaitReady(replicationFuture, Duration.Inf)
} catch {
case NonFatal(t) =>
throw new SparkException("Error occurred while waiting for replication to finish", t)
}
}
if (blockWasSuccessfullyStored) {
None
} else {
Some(blockSize)
}
}.isEmpty
}
}
/**
* Helper for storing a block from bytes already in memory.
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*/
private case class ByteBufferBlockStoreUpdater[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
bytes: ChunkedByteBuffer,
tellMaster: Boolean = true,
keepReadLock: Boolean = false)
extends BlockStoreUpdater[T](bytes.size, blockId, level, classTag, tellMaster, keepReadLock) {
override def readToByteBuffer(): ChunkedByteBuffer = bytes
/**
* The ByteBufferBlockData wrapper is not disposed of to avoid releasing buffers that are
* owned by the caller.
*/
override def blockData(): BlockData = new ByteBufferBlockData(bytes, false)
override def saveToDiskStore(): Unit = diskStore.putBytes(blockId, bytes)
}
/**
* Helper for storing a block based from bytes already in a local temp file.
*/
private[spark] case class TempFileBasedBlockStoreUpdater[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
tmpFile: File,
blockSize: Long,
tellMaster: Boolean = true,
keepReadLock: Boolean = false)
extends BlockStoreUpdater[T](blockSize, blockId, level, classTag, tellMaster, keepReadLock) {
override def readToByteBuffer(): ChunkedByteBuffer = {
val allocator = level.memoryMode match {
case MemoryMode.ON_HEAP => ByteBuffer.allocate _
case MemoryMode.OFF_HEAP => Platform.allocateDirectBuffer _
}
blockData().toChunkedByteBuffer(allocator)
}
override def blockData(): BlockData = diskStore.getBytes(tmpFile, blockSize)
override def saveToDiskStore(): Unit = diskStore.moveFileToBlock(tmpFile, blockSize, blockId)
override def save(): Boolean = {
val res = super.save()
tmpFile.delete()
res
}
}
/**
* Initializes the BlockManager with the given appId. This is not performed in the constructor as
* the appId may not be known at BlockManager instantiation time (in particular for the driver,
* where it is only learned after registration with the TaskScheduler).
*
* This method initializes the BlockTransferService and BlockStoreClient, registers with the
* BlockManagerMaster, starts the BlockManagerWorker endpoint, and registers with a local shuffle
* service if configured.
*/
def initialize(appId: String): Unit = {
blockTransferService.init(this)
externalBlockStoreClient.foreach { blockStoreClient =>
blockStoreClient.init(appId)
}
blockReplicationPolicy = {
val priorityClass = conf.get(config.STORAGE_REPLICATION_POLICY)
val clazz = Utils.classForName(priorityClass)
val ret = clazz.getConstructor().newInstance().asInstanceOf[BlockReplicationPolicy]
logInfo(s"Using $priorityClass for block replication policy")
ret
}
val id =
BlockManagerId(executorId, blockTransferService.hostName, blockTransferService.port, None)
val idFromMaster = master.registerBlockManager(
id,
diskBlockManager.localDirsString,
maxOnHeapMemory,
maxOffHeapMemory,
storageEndpoint)
blockManagerId = if (idFromMaster != null) idFromMaster else id
shuffleServerId = if (externalShuffleServiceEnabled) {
logInfo(s"external shuffle service port = $externalShuffleServicePort")
BlockManagerId(executorId, blockTransferService.hostName, externalShuffleServicePort)
} else {
blockManagerId
}
// Register Executors' configuration with the local shuffle service, if one should exist.
if (externalShuffleServiceEnabled && !blockManagerId.isDriver) {
registerWithExternalShuffleServer()
}
hostLocalDirManager = {
if ((conf.get(config.SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED) &&
!conf.get(config.SHUFFLE_USE_OLD_FETCH_PROTOCOL)) ||
Utils.isPushBasedShuffleEnabled(conf)) {
Some(new HostLocalDirManager(
futureExecutionContext,
conf.get(config.STORAGE_LOCAL_DISK_BY_EXECUTORS_CACHE_SIZE),
blockStoreClient))
} else {
None
}
}
logInfo(s"Initialized BlockManager: $blockManagerId")
}
def shuffleMetricsSource: Source = {
import BlockManager._
if (externalShuffleServiceEnabled) {
new ShuffleMetricsSource("ExternalShuffle", blockStoreClient.shuffleMetrics())
} else {
new ShuffleMetricsSource("NettyBlockTransfer", blockStoreClient.shuffleMetrics())
}
}
private def registerWithExternalShuffleServer(): Unit = {
logInfo("Registering executor with local external shuffle service.")
val shuffleConfig = new ExecutorShuffleInfo(
diskBlockManager.localDirsString,
diskBlockManager.subDirsPerLocalDir,
shuffleManager.getClass.getName)
val MAX_ATTEMPTS = conf.get(config.SHUFFLE_REGISTRATION_MAX_ATTEMPTS)
val SLEEP_TIME_SECS = 5
for (i <- 1 to MAX_ATTEMPTS) {
try {
// Synchronous and will throw an exception if we cannot connect.
blockStoreClient.asInstanceOf[ExternalBlockStoreClient].registerWithShuffleServer(
shuffleServerId.host, shuffleServerId.port, shuffleServerId.executorId, shuffleConfig)
return
} catch {
case e: Exception if i < MAX_ATTEMPTS =>
logError(s"Failed to connect to external shuffle server, will retry ${MAX_ATTEMPTS - i}"
+ s" more times after waiting $SLEEP_TIME_SECS seconds...", e)
Thread.sleep(SLEEP_TIME_SECS * 1000L)
case NonFatal(e) =>
throw new SparkException("Unable to register with external shuffle server due to : " +
e.getMessage, e)
}
}
}
/**
* Report all blocks to the BlockManager again. This may be necessary if we are dropped
* by the BlockManager and come back or if we become capable of recovering blocks on disk after
* an executor crash.
*
* This function deliberately fails silently if the master returns false (indicating that
* the storage endpoint needs to re-register). The error condition will be detected again by the
* next heart beat attempt or new block registration and another try to re-register all blocks
* will be made then.
*/
private def reportAllBlocks(): Unit = {
logInfo(s"Reporting ${blockInfoManager.size} blocks to the master.")
for ((blockId, info) <- blockInfoManager.entries) {
val status = getCurrentBlockStatus(blockId, info)
if (info.tellMaster && !tryToReportBlockStatus(blockId, status)) {
logError(s"Failed to report $blockId to master; giving up.")
return
}
}
}
/**
* Re-register with the master and report all blocks to it. This will be called by the heart beat
* thread if our heartbeat to the block manager indicates that we were not registered.
*
* Note that this method must be called without any BlockInfo locks held.
*/
def reregister(): Unit = {
// TODO: We might need to rate limit re-registering.
logInfo(s"BlockManager $blockManagerId re-registering with master")
master.registerBlockManager(blockManagerId, diskBlockManager.localDirsString, maxOnHeapMemory,
maxOffHeapMemory, storageEndpoint)
reportAllBlocks()
}
/**
* Re-register with the master sometime soon.
*/
private def asyncReregister(): Unit = {
asyncReregisterLock.synchronized {
if (asyncReregisterTask == null) {
asyncReregisterTask = Future[Unit] {
// This is a blocking action and should run in futureExecutionContext which is a cached
// thread pool
reregister()
asyncReregisterLock.synchronized {
asyncReregisterTask = null
}
}(futureExecutionContext)
}
}
}
/**
* For testing. Wait for any pending asynchronous re-registration; otherwise, do nothing.
*/
def waitForAsyncReregister(): Unit = {
val task = asyncReregisterTask
if (task != null) {
try {
ThreadUtils.awaitReady(task, Duration.Inf)
} catch {
case NonFatal(t) =>
throw new Exception("Error occurred while waiting for async. reregistration", t)
}
}
}
override def getHostLocalShuffleData(
blockId: BlockId,
dirs: Array[String]): ManagedBuffer = {
shuffleManager.shuffleBlockResolver.getBlockData(blockId, Some(dirs))
}
/**
* Interface to get local block data. Throws an exception if the block cannot be found or
* cannot be read successfully.
*/
override def getLocalBlockData(blockId: BlockId): ManagedBuffer = {
if (blockId.isShuffle) {
logDebug(s"Getting local shuffle block ${blockId}")
try {
shuffleManager.shuffleBlockResolver.getBlockData(blockId)
} catch {
case e: IOException =>
if (conf.get(config.STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) {
FallbackStorage.read(conf, blockId)
} else {
throw e
}
}
} else {
getLocalBytes(blockId) match {
case Some(blockData) =>
new BlockManagerManagedBuffer(blockInfoManager, blockId, blockData, true)
case None =>
// If this block manager receives a request for a block that it doesn't have then it's
// likely that the master has outdated block statuses for this block. Therefore, we send
// an RPC so that this block is marked as being unavailable from this block manager.
reportBlockStatus(blockId, BlockStatus.empty)
throw new BlockNotFoundException(blockId.toString)
}
}
}
/**
* Put the block locally, using the given storage level.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*/
override def putBlockData(
blockId: BlockId,
data: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Boolean = {
putBytes(blockId, new ChunkedByteBuffer(data.nioByteBuffer()), level)(classTag)
}
override def putBlockDataAsStream(
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_]): StreamCallbackWithID = {
checkShouldStore(blockId)
if (blockId.isShuffle) {
logDebug(s"Putting shuffle block ${blockId}")
try {
return migratableResolver.putShuffleBlockAsStream(blockId, serializerManager)
} catch {
case e: ClassCastException => throw new SparkException(
s"Unexpected shuffle block ${blockId} with unsupported shuffle " +
s"resolver ${shuffleManager.shuffleBlockResolver}")
}
}
logDebug(s"Putting regular block ${blockId}")
// All other blocks
val (_, tmpFile) = diskBlockManager.createTempLocalBlock()
val channel = new CountingWritableChannel(
Channels.newChannel(serializerManager.wrapForEncryption(new FileOutputStream(tmpFile))))
logTrace(s"Streaming block $blockId to tmp file $tmpFile")
new StreamCallbackWithID {
override def getID: String = blockId.name
override def onData(streamId: String, buf: ByteBuffer): Unit = {
while (buf.hasRemaining) {
channel.write(buf)
}
}
override def onComplete(streamId: String): Unit = {
logTrace(s"Done receiving block $blockId, now putting into local blockManager")
// Note this is all happening inside the netty thread as soon as it reads the end of the
// stream.
channel.close()
val blockSize = channel.getCount
val blockStored = TempFileBasedBlockStoreUpdater(
blockId, level, classTag, tmpFile, blockSize).save()
if (!blockStored) {
throw new Exception(s"Failure while trying to store block $blockId on $blockManagerId.")
}
}
override def onFailure(streamId: String, cause: Throwable): Unit = {
// the framework handles the connection itself, we just need to do local cleanup
channel.close()
tmpFile.delete()
}
}
}
/**
* Get the local merged shuffle block data for the given block ID as multiple chunks.
* A merged shuffle file is divided into multiple chunks according to the index file.
* Instead of reading the entire file as a single block, we split it into smaller chunks
* which will be memory efficient when performing certain operations.
*/
def getLocalMergedBlockData(
blockId: ShuffleBlockId,
dirs: Array[String]): Seq[ManagedBuffer] = {
shuffleManager.shuffleBlockResolver.getMergedBlockData(blockId, Some(dirs))
}
/**
* Get the local merged shuffle block meta data for the given block ID.
*/
def getLocalMergedBlockMeta(
blockId: ShuffleBlockId,
dirs: Array[String]): MergedBlockMeta = {
shuffleManager.shuffleBlockResolver.getMergedBlockMeta(blockId, Some(dirs))
}
/**
* Get the BlockStatus for the block identified by the given ID, if it exists.
* NOTE: This is mainly for testing.
*/
def getStatus(blockId: BlockId): Option[BlockStatus] = {
blockInfoManager.get(blockId).map { info =>
val memSize = if (memoryStore.contains(blockId)) memoryStore.getSize(blockId) else 0L
val diskSize = if (diskStore.contains(blockId)) diskStore.getSize(blockId) else 0L
BlockStatus(info.level, memSize = memSize, diskSize = diskSize)
}
}
/**
* Get the ids of existing blocks that match the given filter. Note that this will
* query the blocks stored in the disk block manager (that the block manager
* may not know of).
*/
def getMatchingBlockIds(filter: BlockId => Boolean): Seq[BlockId] = {
// The `toArray` is necessary here in order to force the list to be materialized so that we
// don't try to serialize a lazy iterator when responding to client requests.
(blockInfoManager.entries.map(_._1) ++ diskBlockManager.getAllBlocks())
.filter(filter)
.toArray
.toSeq
}
/**
* Tell the master about the current storage status of a block. This will send a block update
* message reflecting the current status, *not* the desired storage level in its block info.
* For example, a block with MEMORY_AND_DISK set might have fallen out to be only on disk.
*
* droppedMemorySize exists to account for when the block is dropped from memory to disk (so
* it is still valid). This ensures that update in master will compensate for the increase in
* memory on the storage endpoint.
*/
private[spark] def reportBlockStatus(
blockId: BlockId,
status: BlockStatus,
droppedMemorySize: Long = 0L): Unit = {
val needReregister = !tryToReportBlockStatus(blockId, status, droppedMemorySize)
if (needReregister) {
logInfo(s"Got told to re-register updating block $blockId")
// Re-registering will report our new block for free.
asyncReregister()
}
logDebug(s"Told master about block $blockId")
}
/**
* Actually send a UpdateBlockInfo message. Returns the master's response,
* which will be true if the block was successfully recorded and false if
* the storage endpoint needs to re-register.
*/
private def tryToReportBlockStatus(
blockId: BlockId,
status: BlockStatus,
droppedMemorySize: Long = 0L): Boolean = {
val storageLevel = status.storageLevel
val inMemSize = Math.max(status.memSize, droppedMemorySize)
val onDiskSize = status.diskSize
master.updateBlockInfo(blockManagerId, blockId, storageLevel, inMemSize, onDiskSize)
}
/**
* Return the updated storage status of the block with the given ID. More specifically, if
* the block is dropped from memory and possibly added to disk, return the new storage level
* and the updated in-memory and on-disk sizes.
*/
private def getCurrentBlockStatus(blockId: BlockId, info: BlockInfo): BlockStatus = {
info.synchronized {
info.level match {
case null =>
BlockStatus.empty
case level =>
val inMem = level.useMemory && memoryStore.contains(blockId)
val onDisk = level.useDisk && diskStore.contains(blockId)
val deserialized = if (inMem) level.deserialized else false
val replication = if (inMem || onDisk) level.replication else 1
val storageLevel = StorageLevel(
useDisk = onDisk,
useMemory = inMem,
useOffHeap = level.useOffHeap,
deserialized = deserialized,
replication = replication)
val memSize = if (inMem) memoryStore.getSize(blockId) else 0L
val diskSize = if (onDisk) diskStore.getSize(blockId) else 0L
BlockStatus(storageLevel, memSize, diskSize)
}
}
}
/**
* Get locations of an array of blocks.
*/
private def getLocationBlockIds(blockIds: Array[BlockId]): Array[Seq[BlockManagerId]] = {
val startTimeNs = System.nanoTime()
val locations = master.getLocations(blockIds).toArray
logDebug(s"Got multiple block location in ${Utils.getUsedTimeNs(startTimeNs)}")
locations
}
/**
* Cleanup code run in response to a failed local read.
* Must be called while holding a read lock on the block.
*/
private def handleLocalReadFailure(blockId: BlockId): Nothing = {
releaseLock(blockId)
// Remove the missing block so that its unavailability is reported to the driver
removeBlock(blockId)
throw new SparkException(s"Block $blockId was not found even though it's read-locked")
}
/**
* Get block from local block manager as an iterator of Java objects.
*/
def getLocalValues(blockId: BlockId): Option[BlockResult] = {
logDebug(s"Getting local block $blockId")
blockInfoManager.lockForReading(blockId) match {
case None =>
logDebug(s"Block $blockId was not found")
None
case Some(info) =>
val level = info.level
logDebug(s"Level for block $blockId is $level")
val taskContext = Option(TaskContext.get())
if (level.useMemory && memoryStore.contains(blockId)) {
val iter: Iterator[Any] = if (level.deserialized) {
memoryStore.getValues(blockId).get
} else {
serializerManager.dataDeserializeStream(
blockId, memoryStore.getBytes(blockId).get.toInputStream())(info.classTag)
}
// We need to capture the current taskId in case the iterator completion is triggered
// from a different thread which does not have TaskContext set; see SPARK-18406 for
// discussion.
val ci = CompletionIterator[Any, Iterator[Any]](iter, {
releaseLock(blockId, taskContext)
})
Some(new BlockResult(ci, DataReadMethod.Memory, info.size))
} else if (level.useDisk && diskStore.contains(blockId)) {
val diskData = diskStore.getBytes(blockId)
val iterToReturn: Iterator[Any] = {
if (level.deserialized) {
val diskValues = serializerManager.dataDeserializeStream(
blockId,
diskData.toInputStream())(info.classTag)
maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
} else {
val stream = maybeCacheDiskBytesInMemory(info, blockId, level, diskData)
.map { _.toInputStream(dispose = false) }
.getOrElse { diskData.toInputStream() }
serializerManager.dataDeserializeStream(blockId, stream)(info.classTag)
}
}
val ci = CompletionIterator[Any, Iterator[Any]](iterToReturn, {
releaseLockAndDispose(blockId, diskData, taskContext)
})
Some(new BlockResult(ci, DataReadMethod.Disk, info.size))
} else {
handleLocalReadFailure(blockId)
}
}
}
/**
* Get block from the local block manager as serialized bytes.
*/
def getLocalBytes(blockId: BlockId): Option[BlockData] = {
logDebug(s"Getting local block $blockId as bytes")
assert(!blockId.isShuffle, s"Unexpected ShuffleBlockId $blockId")
blockInfoManager.lockForReading(blockId).map { info => doGetLocalBytes(blockId, info) }
}
/**
* Get block from the local block manager as serialized bytes.
*
* Must be called while holding a read lock on the block.
* Releases the read lock upon exception; keeps the read lock upon successful return.
*/
private def doGetLocalBytes(blockId: BlockId, info: BlockInfo): BlockData = {
val level = info.level
logDebug(s"Level for block $blockId is $level")
// In order, try to read the serialized bytes from memory, then from disk, then fall back to
// serializing in-memory objects, and, finally, throw an exception if the block does not exist.
if (level.deserialized) {
// Try to avoid expensive serialization by reading a pre-serialized copy from disk:
if (level.useDisk && diskStore.contains(blockId)) {
// Note: we purposely do not try to put the block back into memory here. Since this branch
// handles deserialized blocks, this block may only be cached in memory as objects, not
// serialized bytes. Because the caller only requested bytes, it doesn't make sense to
// cache the block's deserialized objects since that caching may not have a payoff.
diskStore.getBytes(blockId)
} else if (level.useMemory && memoryStore.contains(blockId)) {
// The block was not found on disk, so serialize an in-memory copy:
new ByteBufferBlockData(serializerManager.dataSerializeWithExplicitClassTag(
blockId, memoryStore.getValues(blockId).get, info.classTag), true)
} else {
handleLocalReadFailure(blockId)
}
} else { // storage level is serialized
if (level.useMemory && memoryStore.contains(blockId)) {
new ByteBufferBlockData(memoryStore.getBytes(blockId).get, false)
} else if (level.useDisk && diskStore.contains(blockId)) {
val diskData = diskStore.getBytes(blockId)
maybeCacheDiskBytesInMemory(info, blockId, level, diskData)
.map(new ByteBufferBlockData(_, false))
.getOrElse(diskData)
} else {
handleLocalReadFailure(blockId)
}
}
}
/**
* Get block from remote block managers.
*
* This does not acquire a lock on this block in this JVM.
*/
private[spark] def getRemoteValues[T: ClassTag](blockId: BlockId): Option[BlockResult] = {
val ct = implicitly[ClassTag[T]]
getRemoteBlock(blockId, (data: ManagedBuffer) => {
val values =
serializerManager.dataDeserializeStream(blockId, data.createInputStream())(ct)
new BlockResult(values, DataReadMethod.Network, data.size)
})
}
/**
* Get the remote block and transform it to the provided data type.
*
* If the block is persisted to the disk and stored at an executor running on the same host then
* first it is tried to be accessed using the local directories of the other executor directly.
* If the file is successfully identified then tried to be transformed by the provided
* transformation function which expected to open the file. If there is any exception during this
* transformation then block access falls back to fetching it from the remote executor via the
* network.
*
* @param blockId identifies the block to get
* @param bufferTransformer this transformer expected to open the file if the block is backed by a
* file by this it is guaranteed the whole content can be loaded
* @tparam T result type
*/
private[spark] def getRemoteBlock[T](
blockId: BlockId,
bufferTransformer: ManagedBuffer => T): Option[T] = {
logDebug(s"Getting remote block $blockId")
require(blockId != null, "BlockId is null")
// Because all the remote blocks are registered in driver, it is not necessary to ask
// all the storage endpoints to get block status.
val locationsAndStatusOption = master.getLocationsAndStatus(blockId, blockManagerId.host)
if (locationsAndStatusOption.isEmpty) {
logDebug(s"Block $blockId is unknown by block manager master")
None
} else {
val locationsAndStatus = locationsAndStatusOption.get
val blockSize = locationsAndStatus.status.diskSize.max(locationsAndStatus.status.memSize)
locationsAndStatus.localDirs.flatMap { localDirs =>
val blockDataOption =
readDiskBlockFromSameHostExecutor(blockId, localDirs, locationsAndStatus.status.diskSize)
val res = blockDataOption.flatMap { blockData =>
try {
Some(bufferTransformer(blockData))
} catch {
case NonFatal(e) =>
logDebug("Block from the same host executor cannot be opened: ", e)
None
}
}
logInfo(s"Read $blockId from the disk of a same host executor is " +
(if (res.isDefined) "successful." else "failed."))
res
}.orElse {
fetchRemoteManagedBuffer(blockId, blockSize, locationsAndStatus).map(bufferTransformer)
}
}
}
private def preferExecutors(locations: Seq[BlockManagerId]): Seq[BlockManagerId] = {
val (executors, shuffleServers) = locations.partition(_.port != externalShuffleServicePort)
executors ++ shuffleServers
}
/**
* Return a list of locations for the given block, prioritizing the local machine since
* multiple block managers can share the same host, followed by hosts on the same rack.
*
* Within each of the above listed groups (same host, same rack and others) executors are
* preferred over the external shuffle service.
*/
private[spark] def sortLocations(locations: Seq[BlockManagerId]): Seq[BlockManagerId] = {
val locs = Random.shuffle(locations)
val (preferredLocs, otherLocs) = locs.partition(_.host == blockManagerId.host)
val orderedParts = blockManagerId.topologyInfo match {
case None => Seq(preferredLocs, otherLocs)
case Some(_) =>
val (sameRackLocs, differentRackLocs) = otherLocs.partition {
loc => blockManagerId.topologyInfo == loc.topologyInfo
}
Seq(preferredLocs, sameRackLocs, differentRackLocs)
}
orderedParts.map(preferExecutors).reduce(_ ++ _)
}
/**
* Fetch the block from remote block managers as a ManagedBuffer.
*/
private def fetchRemoteManagedBuffer(
blockId: BlockId,
blockSize: Long,
locationsAndStatus: BlockManagerMessages.BlockLocationsAndStatus): Option[ManagedBuffer] = {
// If the block size is above the threshold, we should pass our FileManger to
// BlockTransferService, which will leverage it to spill the block; if not, then passed-in
// null value means the block will be persisted in memory.
val tempFileManager = if (blockSize > maxRemoteBlockToMem) {
remoteBlockTempFileManager
} else {
null
}
var runningFailureCount = 0
var totalFailureCount = 0
val locations = sortLocations(locationsAndStatus.locations)
val maxFetchFailures = locations.size
var locationIterator = locations.iterator
while (locationIterator.hasNext) {
val loc = locationIterator.next()
logDebug(s"Getting remote block $blockId from $loc")
val data = try {
val buf = blockTransferService.fetchBlockSync(loc.host, loc.port, loc.executorId,
blockId.toString, tempFileManager)
if (blockSize > 0 && buf.size() == 0) {
throw new IllegalStateException("Empty buffer received for non empty block")
}
buf
} catch {
case NonFatal(e) =>
runningFailureCount += 1
totalFailureCount += 1
if (totalFailureCount >= maxFetchFailures) {
// Give up trying anymore locations. Either we've tried all of the original locations,
// or we've refreshed the list of locations from the master, and have still
// hit failures after trying locations from the refreshed list.
logWarning(s"Failed to fetch block after $totalFailureCount fetch failures. " +
s"Most recent failure cause:", e)
return None
}
logWarning(s"Failed to fetch remote block $blockId " +
s"from $loc (failed attempt $runningFailureCount)", e)
// If there is a large number of executors then locations list can contain a
// large number of stale entries causing a large number of retries that may
// take a significant amount of time. To get rid of these stale entries
// we refresh the block locations after a certain number of fetch failures
if (runningFailureCount >= maxFailuresBeforeLocationRefresh) {
locationIterator = sortLocations(master.getLocations(blockId)).iterator
logDebug(s"Refreshed locations from the driver " +
s"after ${runningFailureCount} fetch failures.")
runningFailureCount = 0
}
// This location failed, so we retry fetch from a different one by returning null here
null
}
if (data != null) {
// If the ManagedBuffer is a BlockManagerManagedBuffer, the disposal of the
// byte buffers backing it may need to be handled after reading the bytes.
// In this case, since we just fetched the bytes remotely, we do not have
// a BlockManagerManagedBuffer. The assert here is to ensure that this holds
// true (or the disposal is handled).
assert(!data.isInstanceOf[BlockManagerManagedBuffer])
return Some(data)
}
logDebug(s"The value of block $blockId is null")
}
logDebug(s"Block $blockId not found")
None
}
/**
* Reads the block from the local directories of another executor which runs on the same host.
*/
private[spark] def readDiskBlockFromSameHostExecutor(
blockId: BlockId,
localDirs: Array[String],
blockSize: Long): Option[ManagedBuffer] = {
val file = ExecutorDiskUtils.getFile(localDirs, subDirsPerLocalDir, blockId.name)
if (file.exists()) {
val managedBuffer = securityManager.getIOEncryptionKey() match {
case Some(key) =>
// Encrypted blocks cannot be memory mapped; return a special object that does decryption
// and provides InputStream / FileRegion implementations for reading the data.
new EncryptedManagedBuffer(
new EncryptedBlockData(file, blockSize, conf, key))
case _ =>
val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle")
new FileSegmentManagedBuffer(transportConf, file, 0, file.length)
}
Some(managedBuffer)
} else {
None
}
}
/**
* Get block from remote block managers as serialized bytes.
*/
def getRemoteBytes(blockId: BlockId): Option[ChunkedByteBuffer] = {
getRemoteBlock(blockId, (data: ManagedBuffer) => {
// SPARK-24307 undocumented "escape-hatch" in case there are any issues in converting to
// ChunkedByteBuffer, to go back to old code-path. Can be removed post Spark 2.4 if
// new path is stable.
if (remoteReadNioBufferConversion) {
new ChunkedByteBuffer(data.nioByteBuffer())
} else {
ChunkedByteBuffer.fromManagedBuffer(data)
}
})
}
/**
* Get a block from the block manager (either local or remote).
*
* This acquires a read lock on the block if the block was stored locally and does not acquire
* any locks if the block was fetched from a remote block manager. The read lock will
* automatically be freed once the result's `data` iterator is fully consumed.
*/
def get[T: ClassTag](blockId: BlockId): Option[BlockResult] = {
val local = getLocalValues(blockId)
if (local.isDefined) {
logInfo(s"Found block $blockId locally")
return local
}
val remote = getRemoteValues[T](blockId)
if (remote.isDefined) {
logInfo(s"Found block $blockId remotely")
return remote
}
None
}
/**
* Downgrades an exclusive write lock to a shared read lock.
*/
def downgradeLock(blockId: BlockId): Unit = {
blockInfoManager.downgradeLock(blockId)
}
/**
* Release a lock on the given block with explicit TaskContext.
* The param `taskContext` should be passed in case we can't get the correct TaskContext,
* for example, the input iterator of a cached RDD iterates to the end in a child
* thread.
*/
def releaseLock(blockId: BlockId, taskContext: Option[TaskContext] = None): Unit = {
val taskAttemptId = taskContext.map(_.taskAttemptId())
// SPARK-27666. When a task completes, Spark automatically releases all the blocks locked
// by this task. We should not release any locks for a task that is already completed.
if (taskContext.isDefined && taskContext.get.isCompleted) {
logWarning(s"Task ${taskAttemptId.get} already completed, not releasing lock for $blockId")
} else {
blockInfoManager.unlock(blockId, taskAttemptId)
}
}
/**
* Registers a task with the BlockManager in order to initialize per-task bookkeeping structures.
*/
def registerTask(taskAttemptId: Long): Unit = {
blockInfoManager.registerTask(taskAttemptId)
}
/**
* Release all locks for the given task.
*
* @return the blocks whose locks were released.
*/
def releaseAllLocksForTask(taskAttemptId: Long): Seq[BlockId] = {
blockInfoManager.releaseAllLocksForTask(taskAttemptId)
}
/**
* Retrieve the given block if it exists, otherwise call the provided `makeIterator` method
* to compute the block, persist it, and return its values.
*
* @return either a BlockResult if the block was successfully cached, or an iterator if the block
* could not be cached.
*/
def getOrElseUpdate[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
// Attempt to read the block from local or remote storage. If it's present, then we don't need
// to go through the local-get-or-put path.
get[T](blockId)(classTag) match {
case Some(block) =>
return Left(block)
case _ =>
// Need to compute the block.
}
// Initially we hold no locks on this block.
doPutIterator(blockId, makeIterator, level, classTag, keepReadLock = true) match {
case None =>
// doPut() didn't hand work back to us, so the block already existed or was successfully
// stored. Therefore, we now hold a read lock on the block.
val blockResult = getLocalValues(blockId).getOrElse {
// Since we held a read lock between the doPut() and get() calls, the block should not
// have been evicted, so get() not returning the block indicates some internal error.
releaseLock(blockId)
throw new SparkException(s"get() failed for block $blockId even though we held a lock")
}
// We already hold a read lock on the block from the doPut() call and getLocalValues()
// acquires the lock again, so we need to call releaseLock() here so that the net number
// of lock acquisitions is 1 (since the caller will only call release() once).
releaseLock(blockId)
Left(blockResult)
case Some(iter) =>
// The put failed, likely because the data was too large to fit in memory and could not be
// dropped to disk. Therefore, we need to pass the input iterator back to the caller so
// that they can decide what to do with the values (e.g. process them without caching).
Right(iter)
}
}
/**
* @return true if the block was stored or false if an error occurred.
*/
def putIterator[T: ClassTag](
blockId: BlockId,
values: Iterator[T],
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
require(values != null, "Values is null")
doPutIterator(blockId, () => values, level, implicitly[ClassTag[T]], tellMaster) match {
case None =>
true
case Some(iter) =>
// Caller doesn't care about the iterator values, so we can close the iterator here
// to free resources earlier
iter.close()
false
}
}
/**
* A short circuited method to get a block writer that can write data directly to disk.
* The Block will be appended to the File specified by filename. Callers should handle error
* cases.
*/
def getDiskWriter(
blockId: BlockId,
file: File,
serializerInstance: SerializerInstance,
bufferSize: Int,
writeMetrics: ShuffleWriteMetricsReporter): DiskBlockObjectWriter = {
val syncWrites = conf.get(config.SHUFFLE_SYNC)
new DiskBlockObjectWriter(file, serializerManager, serializerInstance, bufferSize,
syncWrites, writeMetrics, blockId)
}
/**
* Put a new block of serialized bytes to the block manager.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*
* @return true if the block was stored or false if an error occurred.
*/
def putBytes[T: ClassTag](
blockId: BlockId,
bytes: ChunkedByteBuffer,
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
require(bytes != null, "Bytes is null")
val blockStoreUpdater =
ByteBufferBlockStoreUpdater(blockId, level, implicitly[ClassTag[T]], bytes, tellMaster)
blockStoreUpdater.save()
}
/**
* Helper method used to abstract common code from [[BlockStoreUpdater.save()]]
* and [[doPutIterator()]].
*
* @param putBody a function which attempts the actual put() and returns None on success
* or Some on failure.
*/
private def doPut[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_],
tellMaster: Boolean,
keepReadLock: Boolean)(putBody: BlockInfo => Option[T]): Option[T] = {
require(blockId != null, "BlockId is null")
require(level != null && level.isValid, "StorageLevel is null or invalid")
checkShouldStore(blockId)
val putBlockInfo = {
val newInfo = new BlockInfo(level, classTag, tellMaster)
if (blockInfoManager.lockNewBlockForWriting(blockId, newInfo)) {
newInfo
} else {
logWarning(s"Block $blockId already exists on this machine; not re-adding it")
if (!keepReadLock) {
// lockNewBlockForWriting returned a read lock on the existing block, so we must free it:
releaseLock(blockId)
}
return None
}
}
val startTimeNs = System.nanoTime()
var exceptionWasThrown: Boolean = true
val result: Option[T] = try {
val res = putBody(putBlockInfo)
exceptionWasThrown = false
if (res.isEmpty) {
// the block was successfully stored
if (keepReadLock) {
blockInfoManager.downgradeLock(blockId)
} else {
blockInfoManager.unlock(blockId)
}
} else {
removeBlockInternal(blockId, tellMaster = false)
logWarning(s"Putting block $blockId failed")
}
res
} catch {
// Since removeBlockInternal may throw exception,
// we should print exception first to show root cause.
case NonFatal(e) =>
logWarning(s"Putting block $blockId failed due to exception $e.")
throw e
} finally {
// This cleanup is performed in a finally block rather than a `catch` to avoid having to
// catch and properly re-throw InterruptedException.
if (exceptionWasThrown) {
// If an exception was thrown then it's possible that the code in `putBody` has already
// notified the master about the availability of this block, so we need to send an update
// to remove this block location.
removeBlockInternal(blockId, tellMaster = tellMaster)
// The `putBody` code may have also added a new block status to TaskMetrics, so we need
// to cancel that out by overwriting it with an empty block status. We only do this if
// the finally block was entered via an exception because doing this unconditionally would
// cause us to send empty block statuses for every block that failed to be cached due to
// a memory shortage (which is an expected failure, unlike an uncaught exception).
addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty)
}
}
val usedTimeMs = Utils.getUsedTimeNs(startTimeNs)
if (level.replication > 1) {
logDebug(s"Putting block ${blockId} with replication took $usedTimeMs")
} else {
logDebug(s"Putting block ${blockId} without replication took ${usedTimeMs}")
}
result
}
/**
* Put the given block according to the given level in one of the block stores, replicating
* the values if necessary.
*
* If the block already exists, this method will not overwrite it.
*
* @param keepReadLock if true, this method will hold the read lock when it returns (even if the
* block already exists). If false, this method will hold no locks when it
* returns.
* @return None if the block was already present or if the put succeeded, or Some(iterator)
* if the put failed.
*/
private def doPutIterator[T](
blockId: BlockId,
iterator: () => Iterator[T],
level: StorageLevel,
classTag: ClassTag[T],
tellMaster: Boolean = true,
keepReadLock: Boolean = false): Option[PartiallyUnrolledIterator[T]] = {
doPut(blockId, level, classTag, tellMaster = tellMaster, keepReadLock = keepReadLock) { info =>
val startTimeNs = System.nanoTime()
var iteratorFromFailedMemoryStorePut: Option[PartiallyUnrolledIterator[T]] = None
// Size of the block in bytes
var size = 0L
if (level.useMemory) {
// Put it in memory first, even if it also has useDisk set to true;
// We will drop it to disk later if the memory store can't hold it.
if (level.deserialized) {
memoryStore.putIteratorAsValues(blockId, iterator(), level.memoryMode, classTag) match {
case Right(s) =>
size = s
case Left(iter) =>
// Not enough space to unroll this block; drop to disk if applicable
if (level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(blockId, out, iter)(classTag)
}
size = diskStore.getSize(blockId)
} else {
iteratorFromFailedMemoryStorePut = Some(iter)
}
}
} else { // !level.deserialized
memoryStore.putIteratorAsBytes(blockId, iterator(), classTag, level.memoryMode) match {
case Right(s) =>
size = s
case Left(partiallySerializedValues) =>
// Not enough space to unroll this block; drop to disk if applicable
if (level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
partiallySerializedValues.finishWritingToStream(out)
}
size = diskStore.getSize(blockId)
} else {
iteratorFromFailedMemoryStorePut = Some(partiallySerializedValues.valuesIterator)
}
}
}
} else if (level.useDisk) {
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(blockId, out, iterator())(classTag)
}
size = diskStore.getSize(blockId)
}
val putBlockStatus = getCurrentBlockStatus(blockId, info)
val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid
if (blockWasSuccessfullyStored) {
// Now that the block is in either the memory or disk store, tell the master about it.
info.size = size
if (tellMaster && info.tellMaster) {
reportBlockStatus(blockId, putBlockStatus)
}
addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus)
logDebug(s"Put block $blockId locally took ${Utils.getUsedTimeNs(startTimeNs)}")
if (level.replication > 1) {
val remoteStartTimeNs = System.nanoTime()
val bytesToReplicate = doGetLocalBytes(blockId, info)
// [SPARK-16550] Erase the typed classTag when using default serialization, since
// NettyBlockRpcServer crashes when deserializing repl-defined classes.
// TODO(ekl) remove this once the classloader issue on the remote end is fixed.
val remoteClassTag = if (!serializerManager.canUseKryo(classTag)) {
scala.reflect.classTag[Any]
} else {
classTag
}
try {
replicate(blockId, bytesToReplicate, level, remoteClassTag)
} finally {
bytesToReplicate.dispose()
}
logDebug(s"Put block $blockId remotely took ${Utils.getUsedTimeNs(remoteStartTimeNs)}")
}
}
assert(blockWasSuccessfullyStored == iteratorFromFailedMemoryStorePut.isEmpty)
iteratorFromFailedMemoryStorePut
}
}
/**
* Attempts to cache spilled bytes read from disk into the MemoryStore in order to speed up
* subsequent reads. This method requires the caller to hold a read lock on the block.
*
* @return a copy of the bytes from the memory store if the put succeeded, otherwise None.
* If this returns bytes from the memory store then the original disk store bytes will
* automatically be disposed and the caller should not continue to use them. Otherwise,
* if this returns None then the original disk store bytes will be unaffected.
*/
private def maybeCacheDiskBytesInMemory(
blockInfo: BlockInfo,
blockId: BlockId,
level: StorageLevel,
diskData: BlockData): Option[ChunkedByteBuffer] = {
require(!level.deserialized)
if (level.useMemory) {
// Synchronize on blockInfo to guard against a race condition where two readers both try to
// put values read from disk into the MemoryStore.
blockInfo.synchronized {
if (memoryStore.contains(blockId)) {
diskData.dispose()
Some(memoryStore.getBytes(blockId).get)
} else {
val allocator = level.memoryMode match {
case MemoryMode.ON_HEAP => ByteBuffer.allocate _
case MemoryMode.OFF_HEAP => Platform.allocateDirectBuffer _
}
val putSucceeded = memoryStore.putBytes(blockId, diskData.size, level.memoryMode, () => {
// https://issues.apache.org/jira/browse/SPARK-6076
// If the file size is bigger than the free memory, OOM will happen. So if we
// cannot put it into MemoryStore, copyForMemory should not be created. That's why
// this action is put into a `() => ChunkedByteBuffer` and created lazily.
diskData.toChunkedByteBuffer(allocator)
})
if (putSucceeded) {
diskData.dispose()
Some(memoryStore.getBytes(blockId).get)
} else {
None
}
}
}
} else {
None
}
}
/**
* Attempts to cache spilled values read from disk into the MemoryStore in order to speed up
* subsequent reads. This method requires the caller to hold a read lock on the block.
*
* @return a copy of the iterator. The original iterator passed this method should no longer
* be used after this method returns.
*/
private def maybeCacheDiskValuesInMemory[T](
blockInfo: BlockInfo,
blockId: BlockId,
level: StorageLevel,
diskIterator: Iterator[T]): Iterator[T] = {
require(level.deserialized)
val classTag = blockInfo.classTag.asInstanceOf[ClassTag[T]]
if (level.useMemory) {
// Synchronize on blockInfo to guard against a race condition where two readers both try to
// put values read from disk into the MemoryStore.
blockInfo.synchronized {
if (memoryStore.contains(blockId)) {
// Note: if we had a means to discard the disk iterator, we would do that here.
memoryStore.getValues(blockId).get
} else {
memoryStore.putIteratorAsValues(blockId, diskIterator, level.memoryMode, classTag) match {
case Left(iter) =>
// The memory store put() failed, so it returned the iterator back to us:
iter
case Right(_) =>
// The put() succeeded, so we can read the values back:
memoryStore.getValues(blockId).get
}
}
}.asInstanceOf[Iterator[T]]
} else {
diskIterator
}
}
/**
* Get peer block managers in the system.
*/
private[storage] def getPeers(forceFetch: Boolean): Seq[BlockManagerId] = {
peerFetchLock.synchronized {
val cachedPeersTtl = conf.get(config.STORAGE_CACHED_PEERS_TTL) // milliseconds
val diff = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastPeerFetchTimeNs)
val timeout = diff > cachedPeersTtl
if (cachedPeers == null || forceFetch || timeout) {
cachedPeers = master.getPeers(blockManagerId).sortBy(_.hashCode)
lastPeerFetchTimeNs = System.nanoTime()
logDebug("Fetched peers from master: " + cachedPeers.mkString("[", ",", "]"))
}
if (cachedPeers.isEmpty &&
conf.get(config.STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) {
Seq(FallbackStorage.FALLBACK_BLOCK_MANAGER_ID)
} else {
cachedPeers
}
}
}
/**
* Replicates a block to peer block managers based on existingReplicas and maxReplicas
*
* @param blockId blockId being replicate
* @param existingReplicas existing block managers that have a replica
* @param maxReplicas maximum replicas needed
* @param maxReplicationFailures number of replication failures to tolerate before
* giving up.
* @return whether block was successfully replicated or not
*/
def replicateBlock(
blockId: BlockId,
existingReplicas: Set[BlockManagerId],
maxReplicas: Int,
maxReplicationFailures: Option[Int] = None): Boolean = {
logInfo(s"Using $blockManagerId to pro-actively replicate $blockId")
blockInfoManager.lockForReading(blockId).forall { info =>
val data = doGetLocalBytes(blockId, info)
val storageLevel = StorageLevel(
useDisk = info.level.useDisk,
useMemory = info.level.useMemory,
useOffHeap = info.level.useOffHeap,
deserialized = info.level.deserialized,
replication = maxReplicas)
// we know we are called as a result of an executor removal or because the current executor
// is getting decommissioned. so we refresh peer cache before trying replication, we won't
// try to replicate to a missing executor/another decommissioning executor
getPeers(forceFetch = true)
try {
replicate(
blockId, data, storageLevel, info.classTag, existingReplicas, maxReplicationFailures)
} finally {
logDebug(s"Releasing lock for $blockId")
releaseLockAndDispose(blockId, data)
}
}
}
/**
* Replicate block to another node. Note that this is a blocking call that returns after
* the block has been replicated.
*/
private def replicate(
blockId: BlockId,
data: BlockData,
level: StorageLevel,
classTag: ClassTag[_],
existingReplicas: Set[BlockManagerId] = Set.empty,
maxReplicationFailures: Option[Int] = None): Boolean = {
val maxReplicationFailureCount = maxReplicationFailures.getOrElse(
conf.get(config.STORAGE_MAX_REPLICATION_FAILURE))
val tLevel = StorageLevel(
useDisk = level.useDisk,
useMemory = level.useMemory,
useOffHeap = level.useOffHeap,
deserialized = level.deserialized,
replication = 1)
val numPeersToReplicateTo = level.replication - 1
val startTime = System.nanoTime
val peersReplicatedTo = mutable.HashSet.empty ++ existingReplicas
val peersFailedToReplicateTo = mutable.HashSet.empty[BlockManagerId]
var numFailures = 0
val initialPeers = getPeers(false).filterNot(existingReplicas.contains)
var peersForReplication = blockReplicationPolicy.prioritize(
blockManagerId,
initialPeers,
peersReplicatedTo,
blockId,
numPeersToReplicateTo)
while(numFailures <= maxReplicationFailureCount &&
!peersForReplication.isEmpty &&
peersReplicatedTo.size < numPeersToReplicateTo) {
val peer = peersForReplication.head
try {
val onePeerStartTime = System.nanoTime
logTrace(s"Trying to replicate $blockId of ${data.size} bytes to $peer")
// This thread keeps a lock on the block, so we do not want the netty thread to unlock
// block when it finishes sending the message.
val buffer = new BlockManagerManagedBuffer(blockInfoManager, blockId, data, false,
unlockOnDeallocate = false)
blockTransferService.uploadBlockSync(
peer.host,
peer.port,
peer.executorId,
blockId,
buffer,
tLevel,
classTag)
logTrace(s"Replicated $blockId of ${data.size} bytes to $peer" +
s" in ${(System.nanoTime - onePeerStartTime).toDouble / 1e6} ms")
peersForReplication = peersForReplication.tail
peersReplicatedTo += peer
} catch {
// Rethrow interrupt exception
case e: InterruptedException =>
throw e
// Everything else we may retry
case NonFatal(e) =>
logWarning(s"Failed to replicate $blockId to $peer, failure #$numFailures", e)
peersFailedToReplicateTo += peer
// we have a failed replication, so we get the list of peers again
// we don't want peers we have already replicated to and the ones that
// have failed previously
val filteredPeers = getPeers(true).filter { p =>
!peersFailedToReplicateTo.contains(p) && !peersReplicatedTo.contains(p)
}
numFailures += 1
peersForReplication = blockReplicationPolicy.prioritize(
blockManagerId,
filteredPeers,
peersReplicatedTo,
blockId,
numPeersToReplicateTo - peersReplicatedTo.size)
}
}
logDebug(s"Replicating $blockId of ${data.size} bytes to " +
s"${peersReplicatedTo.size} peer(s) took ${(System.nanoTime - startTime) / 1e6} ms")
if (peersReplicatedTo.size < numPeersToReplicateTo) {
logWarning(s"Block $blockId replicated to only " +
s"${peersReplicatedTo.size} peer(s) instead of $numPeersToReplicateTo peers")
return false
}
logDebug(s"block $blockId replicated to ${peersReplicatedTo.mkString(", ")}")
return true
}
/**
* Read a block consisting of a single object.
*/
def getSingle[T: ClassTag](blockId: BlockId): Option[T] = {
get[T](blockId).map(_.data.next().asInstanceOf[T])
}
/**
* Write a block consisting of a single object.
*
* @return true if the block was stored or false if the block was already stored or an
* error occurred.
*/
def putSingle[T: ClassTag](
blockId: BlockId,
value: T,
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
putIterator(blockId, Iterator(value), level, tellMaster)
}
/**
* Drop a block from memory, possibly putting it on disk if applicable. Called when the memory
* store reaches its limit and needs to free up space.
*
* If `data` is not put on disk, it won't be created.
*
* The caller of this method must hold a write lock on the block before calling this method.
* This method does not release the write lock.
*
* @return the block's new effective StorageLevel.
*/
private[storage] override def dropFromMemory[T: ClassTag](
blockId: BlockId,
data: () => Either[Array[T], ChunkedByteBuffer]): StorageLevel = {
logInfo(s"Dropping block $blockId from memory")
val info = blockInfoManager.assertBlockIsLockedForWriting(blockId)
var blockIsUpdated = false
val level = info.level
// Drop to disk, if storage level requires
if (level.useDisk && !diskStore.contains(blockId)) {
logInfo(s"Writing block $blockId to disk")
data() match {
case Left(elements) =>
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(
blockId,
out,
elements.toIterator)(info.classTag.asInstanceOf[ClassTag[T]])
}
case Right(bytes) =>
diskStore.putBytes(blockId, bytes)
}
blockIsUpdated = true
}
// Actually drop from memory store
val droppedMemorySize =
if (memoryStore.contains(blockId)) memoryStore.getSize(blockId) else 0L
val blockIsRemoved = memoryStore.remove(blockId)
if (blockIsRemoved) {
blockIsUpdated = true
} else {
logWarning(s"Block $blockId could not be dropped from memory as it does not exist")
}
val status = getCurrentBlockStatus(blockId, info)
if (info.tellMaster) {
reportBlockStatus(blockId, status, droppedMemorySize)
}
if (blockIsUpdated) {
addUpdatedBlockStatusToTaskMetrics(blockId, status)
}
status.storageLevel
}
/**
* Remove all blocks belonging to the given RDD.
*
* @return The number of blocks removed.
*/
def removeRdd(rddId: Int): Int = {
// TODO: Avoid a linear scan by creating another mapping of RDD.id to blocks.
logInfo(s"Removing RDD $rddId")
val blocksToRemove = blockInfoManager.entries.flatMap(_._1.asRDDId).filter(_.rddId == rddId)
blocksToRemove.foreach { blockId => removeBlock(blockId, tellMaster = false) }
blocksToRemove.size
}
def decommissionBlockManager(): Unit = storageEndpoint.ask(DecommissionBlockManager)
private[spark] def decommissionSelf(): Unit = synchronized {
decommissioner match {
case None =>
logInfo("Starting block manager decommissioning process...")
decommissioner = Some(new BlockManagerDecommissioner(conf, this))
decommissioner.foreach(_.start())
case Some(_) =>
logDebug("Block manager already in decommissioning state")
}
}
/**
* Returns the last migration time and a boolean denoting if all the blocks have been migrated.
* If there are any tasks running since that time the boolean may be incorrect.
*/
private[spark] def lastMigrationInfo(): (Long, Boolean) = {
decommissioner.map(_.lastMigrationInfo()).getOrElse((0, false))
}
private[storage] def getMigratableRDDBlocks(): Seq[ReplicateBlock] =
master.getReplicateInfoForRDDBlocks(blockManagerId)
/**
* Remove all blocks belonging to the given broadcast.
*/
def removeBroadcast(broadcastId: Long, tellMaster: Boolean): Int = {
logDebug(s"Removing broadcast $broadcastId")
val blocksToRemove = blockInfoManager.entries.map(_._1).collect {
case bid @ BroadcastBlockId(`broadcastId`, _) => bid
}
blocksToRemove.foreach { blockId => removeBlock(blockId, tellMaster) }
blocksToRemove.size
}
/**
* Remove a block from both memory and disk.
*/
def removeBlock(blockId: BlockId, tellMaster: Boolean = true): Unit = {
logDebug(s"Removing block $blockId")
blockInfoManager.lockForWriting(blockId) match {
case None =>
// The block has already been removed; do nothing.
logWarning(s"Asked to remove block $blockId, which does not exist")
case Some(info) =>
removeBlockInternal(blockId, tellMaster = tellMaster && info.tellMaster)
addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty)
}
}
/**
* Internal version of [[removeBlock()]] which assumes that the caller already holds a write
* lock on the block.
*/
private def removeBlockInternal(blockId: BlockId, tellMaster: Boolean): Unit = {
val blockStatus = if (tellMaster) {
val blockInfo = blockInfoManager.assertBlockIsLockedForWriting(blockId)
Some(getCurrentBlockStatus(blockId, blockInfo))
} else None
// Removals are idempotent in disk store and memory store. At worst, we get a warning.
val removedFromMemory = memoryStore.remove(blockId)
val removedFromDisk = diskStore.remove(blockId)
if (!removedFromMemory && !removedFromDisk) {
logWarning(s"Block $blockId could not be removed as it was not found on disk or in memory")
}
blockInfoManager.removeBlock(blockId)
if (tellMaster) {
// Only update storage level from the captured block status before deleting, so that
// memory size and disk size are being kept for calculating delta.
reportBlockStatus(blockId, blockStatus.get.copy(storageLevel = StorageLevel.NONE))
}
}
private def addUpdatedBlockStatusToTaskMetrics(blockId: BlockId, status: BlockStatus): Unit = {
if (conf.get(config.TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES)) {
Option(TaskContext.get()).foreach { c =>
c.taskMetrics().incUpdatedBlockStatuses(blockId -> status)
}
}
}
def releaseLockAndDispose(
blockId: BlockId,
data: BlockData,
taskContext: Option[TaskContext] = None): Unit = {
releaseLock(blockId, taskContext)
data.dispose()
}
def stop(): Unit = {
decommissioner.foreach(_.stop())
blockTransferService.close()
if (blockStoreClient ne blockTransferService) {
// Closing should be idempotent, but maybe not for the NioBlockTransferService.
blockStoreClient.close()
}
remoteBlockTempFileManager.stop()
diskBlockManager.stop()
rpcEnv.stop(storageEndpoint)
blockInfoManager.clear()
memoryStore.clear()
futureExecutionContext.shutdownNow()
logInfo("BlockManager stopped")
}
}
private[spark] object BlockManager {
private val ID_GENERATOR = new IdGenerator
def blockIdsToLocations(
blockIds: Array[BlockId],
env: SparkEnv,
blockManagerMaster: BlockManagerMaster = null): Map[BlockId, Seq[String]] = {
// blockManagerMaster != null is used in tests
assert(env != null || blockManagerMaster != null)
val blockLocations: Seq[Seq[BlockManagerId]] = if (blockManagerMaster == null) {
env.blockManager.getLocationBlockIds(blockIds)
} else {
blockManagerMaster.getLocations(blockIds)
}
val blockManagers = new HashMap[BlockId, Seq[String]]
for (i <- 0 until blockIds.length) {
blockManagers(blockIds(i)) = blockLocations(i).map { loc =>
ExecutorCacheTaskLocation(loc.host, loc.executorId).toString
}
}
blockManagers.toMap
}
private class ShuffleMetricsSource(
override val sourceName: String,
metricSet: MetricSet) extends Source {
override val metricRegistry = new MetricRegistry
metricRegistry.registerAll(metricSet)
}
class RemoteBlockDownloadFileManager(blockManager: BlockManager)
extends DownloadFileManager with Logging {
// lazy because SparkEnv is set after this
lazy val encryptionKey = SparkEnv.get.securityManager.getIOEncryptionKey()
private class ReferenceWithCleanup(
file: DownloadFile,
referenceQueue: JReferenceQueue[DownloadFile]
) extends WeakReference[DownloadFile](file, referenceQueue) {
val filePath = file.path()
def cleanUp(): Unit = {
logDebug(s"Clean up file $filePath")
if (!file.delete()) {
logDebug(s"Fail to delete file $filePath")
}
}
}
private val referenceQueue = new JReferenceQueue[DownloadFile]
private val referenceBuffer = Collections.newSetFromMap[ReferenceWithCleanup](
new ConcurrentHashMap)
private val POLL_TIMEOUT = 1000
@volatile private var stopped = false
private val cleaningThread = new Thread() { override def run(): Unit = { keepCleaning() } }
cleaningThread.setDaemon(true)
cleaningThread.setName("RemoteBlock-temp-file-clean-thread")
cleaningThread.start()
override def createTempFile(transportConf: TransportConf): DownloadFile = {
val file = blockManager.diskBlockManager.createTempLocalBlock()._2
encryptionKey match {
case Some(key) =>
// encryption is enabled, so when we read the decrypted data off the network, we need to
// encrypt it when writing to disk. Note that the data may have been encrypted when it
// was cached on disk on the remote side, but it was already decrypted by now (see
// EncryptedBlockData).
new EncryptedDownloadFile(file, key)
case None =>
new SimpleDownloadFile(file, transportConf)
}
}
override def registerTempFileToClean(file: DownloadFile): Boolean = {
referenceBuffer.add(new ReferenceWithCleanup(file, referenceQueue))
}
def stop(): Unit = {
stopped = true
cleaningThread.interrupt()
cleaningThread.join()
}
private def keepCleaning(): Unit = {
while (!stopped) {
try {
Option(referenceQueue.remove(POLL_TIMEOUT))
.map(_.asInstanceOf[ReferenceWithCleanup])
.foreach { ref =>
referenceBuffer.remove(ref)
ref.cleanUp()
}
} catch {
case _: InterruptedException =>
// no-op
case NonFatal(e) =>
logError("Error in cleaning thread", e)
}
}
}
}
/**
* A DownloadFile that encrypts data when it is written, and decrypts when it's read.
*/
private class EncryptedDownloadFile(
file: File,
key: Array[Byte]) extends DownloadFile {
private val env = SparkEnv.get
override def delete(): Boolean = file.delete()
override def openForWriting(): DownloadFileWritableChannel = {
new EncryptedDownloadWritableChannel()
}
override def path(): String = file.getAbsolutePath
private class EncryptedDownloadWritableChannel extends DownloadFileWritableChannel {
private val countingOutput: CountingWritableChannel = new CountingWritableChannel(
Channels.newChannel(env.serializerManager.wrapForEncryption(new FileOutputStream(file))))
override def closeAndRead(): ManagedBuffer = {
countingOutput.close()
val size = countingOutput.getCount
new EncryptedManagedBuffer(new EncryptedBlockData(file, size, env.conf, key))
}
override def write(src: ByteBuffer): Int = countingOutput.write(src)
override def isOpen: Boolean = countingOutput.isOpen()
override def close(): Unit = countingOutput.close()
}
}
}
| wangmiao1981/spark | core/src/main/scala/org/apache/spark/storage/BlockManager.scala | Scala | apache-2.0 | 83,260 |
package scala.slick.test.ql.ql
import org.junit.Test
import org.junit.Assert._
import scala.slick.ql._
import scala.slick.ql.TypeMapper._
import scala.slick.driver.{ExtendedTable => Table}
import scala.slick.session._
import scala.slick.session.Database.threadLocalSession
import scala.slick.testutil._
import scala.slick.testutil.TestDB._
object ColumnDefaultTest extends DBTestObject(H2Mem, SQLiteMem, Postgres, MySQL, DerbyMem, HsqldbMem, SQLServer)
class ColumnDefaultTest(tdb: TestDB) extends DBTest(tdb) {
import tdb.driver.Implicit._
case class User(id: Int, first: String, last: String)
object A extends Table[(Int, String, Option[Boolean])]("a") {
def id = column[Int]("id")
def a = column[String]("a", O Default "foo")
def b = column[Option[Boolean]]("b", O Default Some(true))
def * = id ~ a ~ b
}
@Test def test() {
db withSession {
A.ddl.createStatements foreach println
A.ddl.create
A.id insert 42
assertEquals(List((42, "foo", Some(true))), Query(A).list)
}
}
}
| szeiger/scala-query | src/test/scala/scala/slick/test/ql/ColumnDefaultTest.scala | Scala | bsd-2-clause | 1,043 |
package artisanal.pickle.maker
package stores
import tags._
import scala.reflect.internal.pickling._
import java.util.concurrent.ConcurrentHashMap
case class ThisTypeStore() {
val owners: scala.collection.concurrent.Map[String, ThisTpe_owner_] = scala.collection.convert.Wrappers.JConcurrentMapWrapper(new ConcurrentHashMap[String, ThisTpe_owner_]())
def accept(owner: ThisTpe_owner_) {
if (!owners.contains(owner.thisTypeName)) {
owners += owner.thisTypeName -> owner
}
}
}
| julianpeeters/artisanal-pickle-maker | src/main/scala/stores/ThisTypeStore.scala | Scala | apache-2.0 | 502 |
package org.apache.spark.mllib.treelib.core
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.rdd._
import scala.collection.immutable.HashMap
import java.io._
import java.io.DataOutputStream
import java.io.FileOutputStream
import java.io.DataInputStream
import java.io.FileInputStream
import org.apache.spark.mllib.treelib.cart._
import scala.concurrent._
import scala.collection.immutable.Queue
/**
* This class is representative for the model of forest
*/
class RandomForest (var numberOfTrees : Int) extends Serializable {
/**
* The directory which contains data of this forest
*/
@transient private var forestPlace = "/tmp/" + System.nanoTime()
/**
* Forest of trees
*/
private var trees = List[TreeModel]()//new Array[TreeModel](numberOfTrees)
/**
* Set index-th tree
*/
def setTree(index : Int, treeModel: TreeModel) = {
//if (index >=0 && index < numberOfTrees)
//trees.update(index, treeModel)
trees = trees :+ (treeModel)
}
def predictOne(input: String, delimeter: String = ",") : Any = {
var prediction_vote = new HashMap[String, Int]()
var maxVote = Int.MinValue
var finalPrediction: Any = null
var values = input.split(delimeter)
//for (i <- 0 until numberOfTrees) {
if (trees == null)
println("TREE NULL")
trees.foreach(
tree => {
val prediction = tree.predict(values)
var numVote = prediction_vote.getOrElse(prediction, 0) + 1
prediction_vote = prediction_vote.updated(prediction, numVote)
if (numVote > maxVote) {
maxVote = numVote
finalPrediction = prediction
}
})
finalPrediction
}
/**
* Predict value of the target feature base on the values of input features
*
* @param testingData the RDD of testing data
* @return a RDD contain predicted values
*/
def predict(testingData: RDD[String],
delimiter : String = ",",
ignoreBranchIDs : Set[BigInt] = Set[BigInt]()
) : RDD[String] = {
testingData.map(line => RandomForest.this.predictOne(line).toString)
//testingData.map(line => this.predict(line.split(delimiter)))
}
override def toString : String = {
"number of trees:%d\\n%s".format(numberOfTrees, trees.mkString("\\n"))
}
/***********************************************/
/* REGION WRITING AND LOADING MODEL */
/***********************************************/
/**
* Write the current tree model to file
*
* @param path where we want to write to
*/
def writeToFile(path: String) = {
val ois = new ObjectOutputStream(new FileOutputStream(path))
ois.writeObject(trees)
ois.close()
}
/**
* Load tree model from file
*
* @param path the location of file which contains tree model
*/
def loadModelFromFile(path: String) = {
//val js = new JavaSerializer(null, null)
val ois = new ObjectInputStream(new FileInputStream(path)) {
override def resolveClass(desc: java.io.ObjectStreamClass): Class[_] = {
try { Class.forName(desc.getName, false, getClass.getClassLoader) }
catch { case ex: ClassNotFoundException => super.resolveClass(desc) }
}
}
var rt = ois.readObject().asInstanceOf[List[TreeModel]]
//treeModel = rt
//this.featureSet = treeModel.featureSet
//this.usefulFeatureSet = treeModel.usefulFeatureSet
trees = rt
this.numberOfTrees = trees.length
ois.close()
}
}
/**
* This class is used for building forest model
*/
class RandomForestBuilder {
/**
* The number of tree
*/
private var numberOfTrees : Int = 100
var MAXIMUM_PARALLEL_TREES = 2
def setNumberOfTree(nTrees : Int) = {
numberOfTrees = nTrees;
forest = new RandomForest(numberOfTrees)
}
/**
* The number of random features, which will be use in each feature split
*/
var numberOfRandomFeatures : Int = 0
private var minSplit : Int = 0
/**
* Forest of trees
*/
var forest : RandomForest = new RandomForest(numberOfTrees)
private var featureNames : Array[String] = null
private var trainingData : RDD[String] = _
/**
* Set training data, which will be used to build the forest
* @param trainingData the training data (without header)
*/
def setData(trainingData: RDD[String]) {
this.trainingData = trainingData
}
/**
* Because we didn't support included header in csv file, we use this function to set the features' name
* @param fNames the names of features
*/
def setFeatureName(fNames : Array[String]) = {
this.featureNames = fNames
}
def setMinSplit(m : Int) = {
this.minSplit = m
}
/**
* Build the forest
* @param yFeature name of target feature, the feature which we want to predict.
* Default value is the name of the last feature
* @param xFeatures set of names of features which will be used to predict the target feature
* Default value is all features names, except target feature
*/
def buildForest[T <: TreeBuilder : ClassManifest](yFeature: String = "",
xFeatures: Set[Any] = Set[Any]()) : RandomForest = {
var waitingTrees = Queue[Int]()
var finishedTrees = Queue[Int]()
var numberRunningTrees = 0
var numberWaitingTrees = numberOfTrees
class ThreadTreeBuilder(treeID: Int, caller : RandomForestBuilder) extends Serializable with Runnable {
@Override
def run() {
println("\\n\\n ====== Build tree " + treeID + " ============= \\n\\n")
try{
var tree: T = (implicitly[ClassManifest[T]]).erasure.newInstance.asInstanceOf[T]
tree.useCache = true
val samplingData = trainingData.sample(true, 1.0, System.nanoTime().toInt)
//val obb = trainingData.subtract(samplingData)
tree.setDataset(samplingData)
tree.useRandomSubsetFeature = true
//tree.setMinSplit(this.minSplit)
if (caller.featureNames != null)
tree.setFeatureNames(caller.featureNames)
val model = tree.buildTree(yFeature, xFeatures)
//println(model)
this.synchronized {
forest.setTree(treeID, model)
finishedTrees = finishedTrees.enqueue(treeID)
}
samplingData.unpersist(false)
}
catch {
case e : Throwable => {
println("Error when building tree " + treeID + ":\\n" + e.getStackTraceString)
}
this.synchronized {
finishedTrees = finishedTrees.enqueue(treeID)
}
}
System.gc()
System.runFinalization()
}
} // END CLASS ThreadTreeBuilder
for (i <- 0 until numberOfTrees)
waitingTrees = waitingTrees.enqueue(i)
while (numberRunningTrees != 0 || numberWaitingTrees != 0) {
System.gc()
System.runFinalization()
while (!finishedTrees.isEmpty){
finishedTrees.dequeue match {
case (treeID, xs) => {
println("Finish tree" + treeID)
numberRunningTrees = numberRunningTrees - 1
finishedTrees = xs
}
}
}
while (!waitingTrees.isEmpty && numberRunningTrees < MAXIMUM_PARALLEL_TREES){
println("numberRunningTrees:" + numberRunningTrees)
waitingTrees.dequeue match {
case (currentTreeID, xs) => {
numberRunningTrees = numberRunningTrees + 1
numberWaitingTrees = numberWaitingTrees - 1
waitingTrees = xs
println("launch tree " + currentTreeID)
launchTreeBuilder(currentTreeID, trainingData)
}
}
}
Thread sleep 1000
}
def launchTreeBuilder(treeID: Int, trainingData : RDD[String]) = {
var thread = new Thread(new ThreadTreeBuilder(treeID, this))
thread.start()
//thread.join()
}
/*
for (i <- 0 until numberOfTrees) {
System.gc()
System.runFinalization()
println("\\n\\n ====== Build tree " + i + " ============= \\n\\n")
var tree : T = (implicitly[ClassManifest[T]]).erasure.newInstance.asInstanceOf[T]
tree.useCache = true
val samplingData = trainingData.sample(true, 1.0, System.nanoTime().toInt)
val obb = trainingData.subtract(samplingData)
tree.setDataset(samplingData)
tree.useRandomSubsetFeature = true
//tree.setMinSplit(this.minSplit)
if (this.featureNames != null)
tree.setFeatureNames(this.featureNames)
forest.setTree(i, tree.buildTree(yFeature, xFeatures))
samplingData.unpersist(true)
}
*
*
*/
forest
}
}
| bigfootproject/spark-dectree | spark/mllib/src/main/scala/org/apache/spark/mllib/treelib/core/RandomForestBuilder.scala | Scala | apache-2.0 | 9,414 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import javax.swing._
import java.awt.Color
import java.awt.Font
import java.awt.Component
import java.awt.BorderLayout
import java.net.URL
import javax.swing.border.EmptyBorder
import org.scalatest.events._
/**
* A ListCellRenderer for the event List in the GUI.
*
* @author Bill Venners
*/
private[tools] class IconEmbellishedListCellRenderer extends EventHolderListCellRenderer {
private val DEEP_RED: Color = new Color(0xEE, 0x55, 0x66)
private val UNCOMFORTABLE_GRAY: Color = new Color(0xaf, 0xaf, 0x9f)
private val BACKGROUND_BLUE: Color = new Color(0x45, 0x76, 0xd4)
private val myClassLoader: ClassLoader = classOf[IconEmbellishedListCellRenderer].getClassLoader
private object Icons {
// Unselected icon URLs
private val purpleURL: URL = myClassLoader.getResource("images/purpledot.gif")
private val greenURL: URL = myClassLoader.getResource("images/greendot.gif")
private val redURL: URL = myClassLoader.getResource("images/reddot.gif")
private val blueURL: URL = myClassLoader.getResource("images/bluedot.gif")
private val grayURL: URL = myClassLoader.getResource("images/graydot.gif")
private val cyanURL: URL = myClassLoader.getResource("images/cyandot.gif")
private val yellowURL: URL = myClassLoader.getResource("images/yellowdot.gif")
// Selected icon URLs
private val purpleSelURL: URL = myClassLoader.getResource("images/purpledotsel.gif")
private val greenSelURL: URL = myClassLoader.getResource("images/greendotsel.gif")
private val redSelURL: URL = myClassLoader.getResource("images/reddotsel.gif")
private val blueSelURL: URL = myClassLoader.getResource("images/bluedotsel.gif")
private val graySelURL: URL = myClassLoader.getResource("images/graydotsel.gif")
private val cyanSelURL: URL = myClassLoader.getResource("images/cyandotsel.gif")
private val yellowSelURL: URL = myClassLoader.getResource("images/yellowdotsel.gif")
// Unselected icon images
private val purpleImageIcon: ImageIcon = new ImageIcon(purpleURL)
private val greenImageIcon: ImageIcon = new ImageIcon(greenURL)
private val redImageIcon: ImageIcon = new ImageIcon(redURL)
private val blueImageIcon: ImageIcon = new ImageIcon(blueURL)
private val grayImageIcon: ImageIcon = new ImageIcon(grayURL)
private val cyanImageIcon: ImageIcon = new ImageIcon(cyanURL)
private val yellowImageIcon: ImageIcon = new ImageIcon(yellowURL)
// Selected icon images
private val purpleSelImageIcon: ImageIcon = new ImageIcon(purpleSelURL)
private val greenSelImageIcon: ImageIcon = new ImageIcon(greenSelURL)
private val redSelImageIcon: ImageIcon = new ImageIcon(redSelURL)
private val blueSelImageIcon: ImageIcon = new ImageIcon(blueSelURL)
private val graySelImageIcon: ImageIcon = new ImageIcon(graySelURL)
private val cyanSelImageIcon: ImageIcon = new ImageIcon(cyanSelURL)
private val yellowSelImageIcon: ImageIcon = new ImageIcon(yellowSelURL)
val runStartingIcon = grayImageIcon
val testStartingIcon = purpleImageIcon
val testSucceededIcon = greenImageIcon
val testIgnoredIcon = yellowImageIcon
val testPendingIcon = yellowImageIcon
val testCanceledIcon = yellowImageIcon
val testFailedIcon = redImageIcon
val suiteStartingIcon = cyanImageIcon
val suiteCompletedIcon = cyanImageIcon
val suiteAbortedIcon = redImageIcon
val infoProvidedIcon = blueImageIcon
val scopeOpenedIcon = blueImageIcon
val scopeClosedIcon = blueImageIcon
val scopePendingIcon = yellowImageIcon
val runStoppedIcon = grayImageIcon
val runAbortedIcon = redImageIcon
val runCompletedIcon = grayImageIcon
val alertProvidedIcon = yellowImageIcon
val noteProvidedIcon = greenImageIcon
val runStartingSelIcon = graySelImageIcon
val testStartingSelIcon = purpleSelImageIcon
val testSucceededSelIcon = greenSelImageIcon
val testIgnoredSelIcon = yellowSelImageIcon
val testPendingSelIcon = yellowSelImageIcon
val testCanceledSelIcon = yellowSelImageIcon
val testFailedSelIcon = redSelImageIcon
val suiteStartingSelIcon = cyanSelImageIcon
val suiteCompletedSelIcon = cyanSelImageIcon
val suiteAbortedSelIcon = redSelImageIcon
val infoProvidedSelIcon = blueSelImageIcon
val scopeOpenedSelIcon = blueSelImageIcon
val scopeClosedSelIcon = blueSelImageIcon
val scopePendingSelIcon = blueSelImageIcon
val runStoppedSelIcon = graySelImageIcon
val runAbortedSelIcon = redSelImageIcon
val runCompletedSelIcon = graySelImageIcon
val alertProvidedSelIcon = yellowSelImageIcon
val noteProvidedSelIcon = greenSelImageIcon
}
private def setRendererFont(renderer: JLabel, color: Color) {
val font: Font = renderer.getFont()
renderer.setFont(new Font(font.getFontName(), Font.BOLD, font.getSize()))
renderer.setForeground(color)
}
protected def decorate(renderer: JLabel, value: Object, isSelected: Boolean): Component = {
// Setting to a specific background color because that color was used to make icons that
// look nice when the row is selected.
if (isSelected)
renderer.setBackground(BACKGROUND_BLUE)
val event: Event = value.asInstanceOf[EventHolder].event
event match {
case _: DiscoveryStarting =>
case _: DiscoveryCompleted =>
case _: RunStarting => {
if (isSelected)
renderer.setIcon(Icons.runStartingSelIcon)
else
renderer.setIcon(Icons.runStartingIcon)
}
case _: TestStarting => {
if (isSelected)
renderer.setIcon(Icons.testStartingSelIcon)
else
renderer.setIcon(Icons.testStartingIcon)
}
case _: TestSucceeded => {
if (isSelected)
renderer.setIcon(Icons.testSucceededSelIcon)
else
renderer.setIcon(Icons.testSucceededIcon)
}
case _: TestIgnored => {
if (isSelected)
renderer.setIcon(Icons.testIgnoredSelIcon)
else
renderer.setIcon(Icons.testIgnoredIcon)
setRendererFont(renderer, UNCOMFORTABLE_GRAY)
}
case _: TestPending => {
if (isSelected)
renderer.setIcon(Icons.testPendingSelIcon)
else
renderer.setIcon(Icons.testPendingIcon)
}
case _: TestCanceled => {
if (isSelected)
renderer.setIcon(Icons.testCanceledSelIcon)
else
renderer.setIcon(Icons.testCanceledIcon)
}
case _: TestFailed => {
if (isSelected)
renderer.setIcon(Icons.testFailedSelIcon)
else
renderer.setIcon(Icons.testFailedIcon)
setRendererFont(renderer, DEEP_RED)
}
case _: RunAborted => {
if (isSelected)
renderer.setIcon(Icons.runAbortedSelIcon)
else
renderer.setIcon(Icons.runAbortedIcon)
setRendererFont(renderer, DEEP_RED)
}
case _: SuiteAborted => {
if (isSelected)
renderer.setIcon(Icons.suiteAbortedSelIcon)
else
renderer.setIcon(Icons.suiteAbortedIcon)
setRendererFont(renderer, DEEP_RED)
}
case _: SuiteStarting => {
if (isSelected)
renderer.setIcon(Icons.suiteStartingSelIcon)
else
renderer.setIcon(Icons.suiteStartingIcon)
}
case _: SuiteCompleted => {
if (isSelected)
renderer.setIcon(Icons.suiteCompletedSelIcon)
else
renderer.setIcon(Icons.suiteCompletedIcon)
}
case _: InfoProvided => {
if (isSelected)
renderer.setIcon(Icons.infoProvidedSelIcon)
else
renderer.setIcon(Icons.infoProvidedIcon)
}
case _: MarkupProvided => { // Shouldn't get here because not registering markup events
if (isSelected)
renderer.setIcon(Icons.infoProvidedSelIcon)
else
renderer.setIcon(Icons.infoProvidedIcon)
}
case _: ScopeOpened => {
if (isSelected)
renderer.setIcon(Icons.scopeOpenedSelIcon)
else
renderer.setIcon(Icons.scopeOpenedIcon)
}
case _: ScopeClosed => {
if (isSelected)
renderer.setIcon(Icons.scopeClosedSelIcon)
else
renderer.setIcon(Icons.scopeClosedIcon)
}
case _: ScopePending => {
if (isSelected)
renderer.setIcon(Icons.scopePendingSelIcon)
else
renderer.setIcon(Icons.scopePendingIcon)
}
case _: RunCompleted => {
if (isSelected)
renderer.setIcon(Icons.runCompletedSelIcon)
else
renderer.setIcon(Icons.runCompletedIcon)
}
case _: RunStopped => {
if (isSelected)
renderer.setIcon(Icons.runStoppedSelIcon)
else
renderer.setIcon(Icons.runStoppedIcon)
}
case _: AlertProvided => {
if (isSelected)
renderer.setIcon(Icons.alertProvidedSelIcon)
else
renderer.setIcon(Icons.alertProvidedIcon)
}
case _: NoteProvided => { // Shouldn't get here because not registering markup events
if (isSelected)
renderer.setIcon(Icons.noteProvidedSelIcon)
else
renderer.setIcon(Icons.noteProvidedIcon)
}
}
event.formatter match {
case Some(IndentedText(_, _, indentationLevel)) =>
if (indentationLevel > 0) {
val panel = new JPanel(new BorderLayout)
panel.setBackground(renderer.getBackground)
val WidthOfIconInPixels = 12
panel.setBorder(new EmptyBorder(0, WidthOfIconInPixels * indentationLevel, 0, 0))
renderer.setBorder(new EmptyBorder(0, 0, 0, 0))
panel.add(renderer, BorderLayout.CENTER)
panel
}
else renderer
case _ =>
renderer
}
}
}
| travisbrown/scalatest | src/main/scala/org/scalatest/tools/IconEmbellishedListCellRenderer.scala | Scala | apache-2.0 | 10,492 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package geomesa.plugin.wfs
import org.apache.wicket.behavior.SimpleAttributeModifier
import org.apache.wicket.markup.html.form.validation.IFormValidator
import org.apache.wicket.markup.html.form.{FormComponent, Form}
import org.apache.wicket.model.{ResourceModel, IModel, PropertyModel}
import org.geoserver.catalog.DataStoreInfo
import org.geoserver.web.data.store.StoreEditPanel
import org.geoserver.web.data.store.panel.TextParamPanel
import org.geoserver.web.util.MapModel
import org.geotools.data.DataAccessFactory.Param
class AccumuloDataStoreEditPanel (componentId: String, storeEditForm: Form[_])
extends StoreEditPanel(componentId, storeEditForm) {
val model = storeEditForm.getModel
setDefaultModel(model)
val storeInfo = storeEditForm.getModelObject.asInstanceOf[DataStoreInfo]
val paramsModel = new PropertyModel(model, "connectionParameters")
val instanceId = addTextPanel(paramsModel, new Param("instanceId", classOf[String], "The Accumulo Instance ID", true))
val zookeepers = addTextPanel(paramsModel, new Param("zookeepers", classOf[String], "Zookeepers", true))
val user = addTextPanel(paramsModel, new Param("user", classOf[String], "User", true))
val password = addTextPanel(paramsModel, new Param("password", classOf[String], "Password", true))
val auths = addTextPanel(paramsModel, new Param("auths", classOf[String], "Authorizations", true))
val tableName = addTextPanel(paramsModel, new Param("tableName", classOf[String], "The Accumulo Table Name", true))
val dependentFormComponents = Array[FormComponent[_]](instanceId, zookeepers, user, password, tableName)
dependentFormComponents.map(_.setOutputMarkupId(true))
storeEditForm.add(new IFormValidator() {
def getDependentFormComponents = dependentFormComponents
def validate(form: Form[_]) {
require(user.getValue != null)
require(password.getValue != null)
require(instanceId.getValue != null)
require(tableName.getValue != null)
require(zookeepers.getValue != null)
require(auths.getValue != null)
}
})
def addTextPanel(paramsModel: IModel[_], param: Param): FormComponent[_] = {
val paramName = param.key
val resourceKey = getClass.getSimpleName + "." + paramName
val required = param.required
val textParamPanel =
new TextParamPanel(paramName,
new MapModel(paramsModel, paramName).asInstanceOf[IModel[_]],
new ResourceModel(resourceKey, paramName), required)
textParamPanel.getFormComponent.setType(classOf[String])
val defaultTitle = String.valueOf(param.description)
val titleModel = new ResourceModel(resourceKey + ".title", defaultTitle)
val title = String.valueOf(titleModel.getObject)
textParamPanel.add(new SimpleAttributeModifier("title", title))
add(textParamPanel)
textParamPanel.getFormComponent
}
} | anthonyccri/geomesa | geomesa-plugin/src/main/scala/geomesa/plugin/wfs/AccumuloDataStoreEditPanel.scala | Scala | apache-2.0 | 3,466 |
package com.pnns.vc3
import com.pnns.AlgorithmInstance
import com.pnns.Node
import scala.collection.immutable.HashSet
/**
* VC3 algorithm instance.
*/
class Vc3Instance(val node: Node) extends AlgorithmInstance[(State, State), (String, String)] {
private var wState: State = UR()
private var bState: State = UR()
private var round = 0
private val pcount = node.ports.length
private var m: List[Int] = Nil
private var x = HashSet((0 until pcount): _*)
override def hasStopped() = {
isStoppingState(wState) && isStoppingState(bState)
}
private def isStoppingState(state: State) = {
state match {
case US() => true
case MS() => true
case _ => false
}
}
override def state() = (wState, bState)
override def send() = {
wSend.zip(bSend)
}
/**
* Generate outgoing messages for the black node.
*/
private def bSend() = {
bState match {
case UR() if round % 2 == 1 => burSend
case _ => Vector.fill(pcount)(null)
}
}
/**
* Generate outgoing messages for the black node in UR state.
*/
private def burSend() = {
if (m.isEmpty) {
if (x.isEmpty)
bState = US()
Vector.fill(pcount)(null)
} else {
val proposal = m.min
bState = MS()
Vector.tabulate(pcount)(i => {
if (i == proposal)
"accept"
else
null
})
}
}
/**
* Generate outgoing messages for the white node.
*/
private def wSend() = {
wState match {
case UR() if round % 2 == 0 => wurSend
case MR() if round % 2 == 0 => wmrSend
case _ => Vector.fill(pcount)(null)
}
}
/**
* Generate outgoing messages for the white node in UR state.
*/
private def wurSend() = {
val k = (round + 1) / 2
if (k < pcount) {
Vector.tabulate(pcount)(pid => {
if (pid == k)
"proposal"
else
null
})
} else {
wState = US()
Vector.fill(pcount)(null)
}
}
/**
* Generate outgoing messages for the white node in MR state.
*/
private def wmrSend() = {
wState = MS()
Vector.fill(pcount)("matched")
}
override def receive(msgs: Vector[(String, String)]) {
wReceive(msgs map {
case (_, w) => w
})
bReceive(msgs map {
case (b, _) => b
})
round += 1
}
/**
* Message processing for the black node.
*/
private def bReceive(msgs: Vector[String]) {
bState match {
case UR() if round % 2 == 0 => burReceive(msgs)
case _ =>
}
}
/**
* Message processing for the black node in UR state.
*/
private def burReceive(msgs: Vector[String]) {
msgs.zipWithIndex foreach {
case (msg, port) => breceiveMsg(msg, port)
}
}
/**
* Helper method for black node message processing.
*/
private def breceiveMsg(msg: String, port: Int) {
msg match {
case "matched" => x -= port
case "proposal" => m = port :: m
case _ =>
}
}
/**
* Message processing for the white node.
*/
private def wReceive(msgs: Vector[String]) {
wState match {
case UR() if round % 2 == 1 => wurReceive(msgs)
case _ =>
}
}
/**
* Message processing for the white node in UR state.
*/
private def wurReceive(msgs: Vector[String]) {
val accept = msgs.indexWhere(msg => msg == "accept")
if (accept >= 0)
wState = MR()
}
}
| operutka/pn-network-simulator | src/main/scala/com/pnns/vc3/Vc3Instance.scala | Scala | mit | 3,460 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{Calculated, CtBigDecimal, CtBoxIdentifier}
import uk.gov.hmrc.ct.ct600.v2.calculations.CorporationTaxCalculator
import uk.gov.hmrc.ct.ct600.v2.retriever.CT600BoxRetriever
case class B56(value: BigDecimal) extends CtBoxIdentifier("Tax") with CtBigDecimal
object B56 extends CorporationTaxCalculator with Calculated[B56, CT600BoxRetriever] {
override def calculate(fieldValueRetriever: CT600BoxRetriever): B56 =
corporationTaxFy2(
fieldValueRetriever.retrieveB54(),
fieldValueRetriever.retrieveB55())
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B56.scala | Scala | apache-2.0 | 1,187 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.scheduler._
private case class PRJob(jobId: Long, poolName: String, stageIds: Seq[Int])
class PRBatchWindow {
var durationMs: Long = _
var loss: Double = _
var dLoss: Double = _
}
/**
* Singleton entry point to SLAQ utility scheduling.
*/
object PoolReweighterLoss extends Logging {
private val batchWindows = new mutable.HashMap[String, ArrayBuffer[PRBatchWindow]]
private val pool2numCores = new mutable.HashMap[String, Int]
private lazy val sc = SparkContext.getOrCreate()
val listener = new PRJobListener
val tokens = new mutable.HashMap[String, Long]
private var batchIntervalMs = 0
private var isFair = false
@volatile var isRunning = false
/**
* Whether there are active pools registered with this object.
*/
def hasRegisteredPools: Boolean = pool2numCores.nonEmpty
/**
* Called by application code to report loss values for a given pool periodically.
* This must be called after the pool has registered with this object and has run
* at least one task.
*/
def updateLoss(loss: Double): Unit = {
val poolName = sc.getLocalProperty("spark.scheduler.pool")
logInfo(s"LOGAN: $poolName curr loss: $loss")
val bws = batchWindows(poolName)
bws.append(listener.currentWindows(poolName))
// Update loss and delta loss, if any, in most recent batch window
bws.last.loss = loss
if (bws.size >= 2) {
bws.last.dLoss = bws(bws.size - 2).loss - bws.last.loss
}
listener.currentWindows.put(poolName, new PRBatchWindow)
}
/**
* Signal that no more losses will be reported.
*/
def done(poolName: String): Unit = {
pool2numCores.remove(poolName)
batchWindows.remove(poolName)
tokens.remove(poolName)
listener.currentWindows.remove(poolName)
listener.avgTaskTime.remove(poolName)
}
/**
* Stop the thread that assigns tokens periodically to each pool.
*/
def stop(): Unit = {
isRunning = false
}
/**
* Start the thread that assigns tokens periodically to each pool.
* This must be called before [[register]].
*/
def start(t: Int = 10, fair: Boolean = false): Unit = {
// Fail fast against wrong scheduling mode, otherwise token accounting would be incorrect
if (sc.taskScheduler.rootPool.schedulingMode != SchedulingMode.FAIR) {
throw new IllegalStateException("Please set 'spark.scheduler.mode' to 'FAIR'!")
}
sc.addSparkListener(listener)
isFair = fair
batchIntervalMs = t * 1000
isRunning = true
val thread = new Thread {
override def run(): Unit = {
while (isRunning) {
Thread.sleep(batchIntervalMs)
assignTokens()
printTokens()
}
}
}
thread.start()
}
/**
* Register a pool with the scheduler and kick off a round of token assignment.
*/
def register(poolName: String): Unit = {
pool2numCores.put(poolName, -1)
batchWindows.put(poolName, new ArrayBuffer[PRBatchWindow])
tokens.put(poolName, 0)
assignTokens()
}
/**
* Assign tokens to all registered pools.
*
* This is the core of the scheduling logic. The number of tokens assigned to a
* pool is proportional to how often tasks belonging to the pool are launched.
* At the end of each task, the average task time across all tasks running in
* the pool are subtracted from the number of tokens assigned.
*
* There are two scheduling modes. In fair scheduling, the same number of tokens
* are assigned to all the pools. In utility scheduling, the number of tokens
* assigned to a pool is proportional to the amount of utility increase the pool
* is expected to have by the next scheduling batch.
*/
private def assignTokens(): Unit = {
if (pool2numCores.isEmpty) {
return
}
if (isFair) {
// This is fair scheduling. We assign the same number of tokens to each pool.
// The specific number of tokens used here is not important.
for ((poolName: String, numCores: Int) <- pool2numCores) {
tokens(poolName) = batchIntervalMs
}
} else {
// This is utility scheduling. We use a heap to rank the pools based on how much
// utility improvement each pool will experience from one additional core.
def diff(t: (String, Double)) = t._2
val heap = new mutable.PriorityQueue[(String, Double)]()(Ordering.by(diff))
val totalCores = sc.defaultParallelism
val fairshare = totalCores / pool2numCores.size
val minCore = 3
// Initialize all pools with min cores.
// If a pool does not have enough losses, just initialize it to the fair share.
// Note: This does not currently handle the case when number of pools * min cores
// exceeds the total number of cores in the cluster, in which case remaining cores
// may fall below zero and behavior is undefined.
var remainingCores = totalCores
for ((poolName: String, _) <- pool2numCores) {
if (batchWindows(poolName).size <= 1) {
pool2numCores(poolName) = fairshare
remainingCores -= fairshare
} else {
// Initialize heap with amount of utility increase from one additional core
val util1 = predictNormalizedDeltaLoss(poolName, minCore)
val util2 = predictNormalizedDeltaLoss(poolName, minCore + 1)
heap.enqueue((poolName, util2 - util1))
pool2numCores(poolName) = minCore
remainingCores -= minCore
}
}
// Assign num cores to pools
while (remainingCores > 0 && heap.nonEmpty) {
val (poolName, _) = heap.dequeue()
pool2numCores(poolName) += 1
val alloc = pool2numCores(poolName)
val utilCurr = predictNormalizedDeltaLoss(poolName, alloc)
val utilNext = predictNormalizedDeltaLoss(poolName, alloc + 1)
heap.enqueue((poolName, utilNext - utilCurr))
remainingCores -= 1
}
// Convert num cores to tokens
for ((poolName: String, numCores: Int) <- pool2numCores) {
tokens(poolName) = batchIntervalMs * numCores
}
}
}
/**
* Print number of tokens assigned to each pool.
*/
private def printTokens(): Unit = {
if (tokens.nonEmpty) {
val hashes = "##########################################################"
val timeString = s"Time = ${System.currentTimeMillis()}"
val tokenString = tokens.toArray
.sortBy { case (poolName, _) => poolName }
.map { case (poolName, numTokens) => s"$poolName = $numTokens" }
.mkString("\\n")
println(s"\\n\\n\\n$hashes\\n$timeString\\n$tokenString\\n$hashes\\n\\n\\n")
}
}
/**
* Predict loss reported by a pool after a specified number of iterations.
* Currently this simply fits a geometric curve to the loss function.
*/
private def predictLoss(poolName: String, numIterations: Int): Double = {
val bws = batchWindows(poolName)
if (bws.size > 1) {
val lastLoss = bws.last.loss
val nextLastLoss = bws(bws.size - 2).loss
val a = lastLoss / nextLastLoss
lastLoss * Math.pow(a, numIterations)
} else {
0.0
}
}
/**
* Predict delta loss reported by a pool by the next scheduling batch if it were
* assigned the specified number of cores. The delta loss here is normalized by
* the max delta loss observed in the pool so far.
*/
private def predictNormalizedDeltaLoss(poolName: String, numCores: Int): Double = {
val numLosses = batchWindows(poolName).size
if (numLosses > 1) {
// Ignore first iteration because it may be noisy
val avgIterDuration = batchWindows(poolName).drop(1).map(_.durationMs).sum / numLosses
val numItersByNextBatch = (numCores * batchIntervalMs / avgIterDuration).toInt
val predictedLoss = predictLoss(poolName, numItersByNextBatch)
val maxDeltaLoss = batchWindows(poolName).map(_.dLoss).max
(batchWindows(poolName).last.loss - predictedLoss) / maxDeltaLoss
} else {
0.0
}
}
}
/**
* Listener that keeps track of task duration statistics for each pool.
*/
class PRJobListener extends SparkListener with Logging {
private val currentJobs = new mutable.HashMap[Long, PRJob]
val currentWindows = new mutable.HashMap[String, PRBatchWindow]
val avgTaskTime = new mutable.HashMap[String, (Long, Int)]
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
val poolName = jobStart.properties.getProperty("spark.scheduler.pool")
if (poolName != null && poolName != "default") {
val jobId = jobStart.jobId
val job = PRJob(jobId, poolName, jobStart.stageIds.toList)
currentJobs.put(jobId, job)
}
}
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
val jobId = jobEnd.jobId
if (currentJobs.contains(jobId)) {
currentJobs.remove(jobId)
}
}
/**
* Update task duration statistics in the pool to which this task belongs.
*/
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = synchronized {
val stageId = taskEnd.stageId
val taskDurationMs =
(taskEnd.taskMetrics.executorCpuTime +
taskEnd.taskMetrics.resultSerializationTime +
taskEnd.taskMetrics.executorDeserializeCpuTime) / 1000000
currentJobs.values.foreach { job =>
if (job.stageIds.contains(stageId)) {
val poolName = job.poolName
// Update duration in the current batch window
if (!currentWindows.contains(poolName)) {
currentWindows.put(poolName, new PRBatchWindow)
}
currentWindows(poolName).durationMs += taskDurationMs
// Update average task time
if (!avgTaskTime.contains(poolName)) {
avgTaskTime.put(poolName, (0, 0))
}
val (existingDurationMs, numTasks) = avgTaskTime(poolName)
avgTaskTime(poolName) = (existingDurationMs + taskDurationMs, numTasks + 1)
}
}
}
}
| andrewor14/iolap | core/src/main/scala/org/apache/spark/PoolReweighterLoss.scala | Scala | apache-2.0 | 10,807 |
package com.ekuaibao.scalicicle
import java.io.{ByteArrayOutputStream, IOException}
import java.security.MessageDigest
import scala.collection.immutable.NumericRange
import scala.concurrent._
import scala.util.control.NoStackTrace
/**
* Generates IDs using Redis that have strong guarantees of k-ordering, and include a timestamp that can be considered
* issued by a time oracle so long as time is kept in check on the Redis instances used.
*
* This allows events to be generated in a distributed fashion, stored in a immutable data-store, and a fetch to
* reconstruct time ordering at any point in the future with strong guarantees that the order is the intended one.
*
* We are generating an ID that will be comprised of the following:
*
* > 41 bit time + 10 bit logical shard id + 12 bit sequence id
*
* Note this adds to 63 bit, because the MSB is reserved in some languages and we value interoperability.
*/
class IdGenerator(redis: Redis, luaScript: String, luaScriptSha: String) {
import IdGenerator._
/**
* Generate an ID.
*
* @return A ID.
*/
def generateId()(implicit executor: ExecutionContext): Future[Long] = {
executeOrLoadLuaScript(1)
}
/**
* Generate a batch of IDs.
*
* @param batchSize The number IDs to return.
* @return A list of IDs. The number of IDs may be less than or equal to the batch size depending on if the sequence needs to roll in Redis.
*/
def generateIdBatch(batchSize: Int)(implicit executor: ExecutionContext): Future[NumericRange[Long]] = {
validateBatchSize(batchSize)
executeOrLoadLuaScript(batchSize) map { id =>
NumericRange(id, id + batchSize * SEQUENCE_STEP, SEQUENCE_STEP)
}
}
/**
* Try executing the Lua script using the SHA of its contents.
*
* If the Lua script hasn't been loaded before, we'll load it first and then try executing it again. This should
* only need to be done once per version of the given Lua script. This guards against a Redis server being added
* into the pool to help increase capacity, as the script will just be loaded again if missing.
*
* This also gives a performance gain:
*
* * If the Lua script is already loaded, it's already parsed, tokenised and in memory. This is MUCH faster
* than loading it again every time using eval instead of evalsha.
* * If the script with this SHA was already loaded by another process, we can use it instead of loading it
* again, giving us a small performance gain.
*
* @param batchSize The number to increment the sequence by in Redis.
* @return The result of executing the Lua script.
*/
private def executeOrLoadLuaScript(batchSize: Int)(implicit executor: ExecutionContext): Future[Long] = {
val size = batchSize.toString
val time = System.currentTimeMillis().toString
// Great! The script was already loaded and ran, so we saved a call.
redis.evalSha(luaScriptSha, size, time).recoverWith {
case RedisScriptNotFoundException =>
// Otherwise we need to load and try again, failing if it doesn't work the second time.
redis.scriptLoad(luaScript).flatMap { sha =>
redis.evalSha(luaScriptSha, size, time)
}
}
}
/**
* Check that the given batch size is within the bounds that we allow. This is important to
* check, as otherwise someone may set the batch size to a negative causing the sequencing
* in Redis to fail.
*
* @param batchSize The batch size as specified by the user.
*/
private def validateBatchSize(batchSize: Int) = {
if (batchSize <= 0 || batchSize > MAX_BATCH_SIZE) {
throw new InvalidBatchSizeException(s"The batch size is less than 1 or is greater than the supported maximum of $MAX_BATCH_SIZE")
}
}
}
object IdGenerator {
val LUA_SCRIPT_RESOURCE_PATH = "/id-generation.lua"
val LOGICAL_SHARD_ID_BITS = 10
val SEQUENCE_BITS = 12
val MAX_LOGICAL_SHARD_ID = (1 << LOGICAL_SHARD_ID_BITS) - 1
val MIN_LOGICAL_SHARD_ID = 0L
val MAX_BATCH_SIZE = (1 << SEQUENCE_BITS) - 1
val SEQUENCE_STEP = 1L << LOGICAL_SHARD_ID_BITS
/**
* Create an ID generator that will operate using the given Redis client.
*
* Note that this constructor means that if a failure occurs, we will attempt to retry generating the ID up to the
* number of `maximumAttempts` specified. Specify 1 to try only once.
*
* @param redis The abstract RedisClient interface to use for ID generation.
*/
def apply(redis: Redis): IdGenerator = {
val script = try {
val out = new ByteArrayOutputStream(1024)
val is = this.getClass.getResourceAsStream(LUA_SCRIPT_RESOURCE_PATH)
try {
val buf = Array.ofDim[Byte](512)
var len = is.read(buf)
while (len != -1) {
out.write(buf, 0, len)
len = is.read(buf)
}
out.toByteArray
} finally {
out.close()
is.close()
}
} catch {
case ex: IOException =>
throw new LuaScriptFailedToLoadException("Could not load Icicle Lua script from the resources in the JAR.", ex);
}
val digest = MessageDigest.getInstance("SHA1")
val sha = HexUtil.bytes2hex(digest.digest(script))
new IdGenerator(redis, new String(script, "UTF-8").intern(), sha)
}
}
trait Redis {
def scriptLoad(lua: String): Future[String]
def evalSha(sha: String, args: String*): Future[Long]
}
object RedisScriptNotFoundException extends RuntimeException with NoStackTrace | ekuaibao/scalicicle | src/main/scala/com/ekuaibao/scalicicle/IdGenerator.scala | Scala | mit | 5,525 |
package org.reactivecouchbase.rs.scaladsl
import akka.util.ByteString
import io.circe._
import io.circe.syntax._
import io.circe.parser._
import com.couchbase.client.java.document.json._
import org.reactivecouchbase.rs.scaladsl.json._
import scala.language.implicitConversions
package object circejson {
private def handleParseResult[T](result: Either[io.circe.Error, T]): JsonResult[T] = result.fold[JsonResult[T]](
failure => JsonError(List(JsonValidationError(List(failure.getMessage)))),
success => JsonSuccess(success)
)
private val printer: Printer = Printer.noSpaces.copy(dropNullValues = true)
val defaultCirceReads: JsonReads[Json] = JsonReads(bs => handleParseResult(parse(bs.utf8String)))
val defaultCirceWrites: JsonWrites[Json] = JsonWrites(jsv => ByteString(printer.pretty(jsv)))
implicit val defaultCirceFormat: JsonFormat[Json] = JsonFormat(defaultCirceReads, defaultCirceWrites)
/**
* Converts between io.circe.Json and org.reactivecouchbase.rs.scaladsl.json objects
* @param encoder [[https://circe.github.io/circe/codec.html#custom-encodersdecoders Encoder]] for ModelType
* @param decoder [[https://circe.github.io/circe/codec.html#custom-encodersdecoders Decoder]] for ModelType
* @return JsonFormat[MODELTYPE]
* */
def createCBFormat[MODELTYPE](implicit encoder: Encoder[MODELTYPE],
decoder: Decoder[MODELTYPE]): JsonFormat[MODELTYPE] =
JsonFormat[MODELTYPE](
JsonReads[MODELTYPE](bs => handleParseResult(decode[MODELTYPE](bs.utf8String))),
JsonWrites[MODELTYPE](jsv => ByteString(printer.pretty(jsv.asJson)))
)
implicit val defaultCirceConverter: CouchbaseJsonDocConverter[Json] = new CouchbaseJsonDocConverter[Json] {
override def convertTo(ref: AnyRef): Json = convertToJson(ref)
override def convertFrom(ref: Json): Any = convertJsonValue(ref)
}
case class CirceQueryParams(query: Json) extends QueryParams {
override def isEmpty: Boolean = query.isNull
override def toJsonObject: com.couchbase.client.java.document.json.JsonObject = convertToJson(query)
}
implicit class EnhancedJsonObject(val obj: Json) extends AnyVal {
def asQueryParams: CirceQueryParams = CirceQueryParams(obj)
}
import collection.JavaConverters._
private def convertJsonValue(value: Json): Any = value match {
case x if x.isNull => JsonNull.INSTANCE
case x if x.isBoolean => x.asBoolean.getOrElse(JsonNull.INSTANCE)
case x if x.isString => x.asString.getOrElse(JsonNull.INSTANCE)
case x if x.isNumber => x.asNumber.flatMap(_.toBigDecimal.map(_.bigDecimal)).getOrElse(JsonNull.INSTANCE)
case x if x.isArray =>
x.asArray
.map(_.foldLeft(JsonArray.create())((a, b) => a.add(convertJsonValue(b))))
.getOrElse(JsonNull.INSTANCE)
case x if x.isObject =>
x.asObject
.map(
_.toList.foldLeft(com.couchbase.client.java.document.json.JsonObject.create())(
(a, b) => a.put(b._1, convertJsonValue(b._2))
)
)
.getOrElse(JsonNull.INSTANCE)
case _ => throw new RuntimeException("Unknown type")
}
private def convertToJson(value: Json): com.couchbase.client.java.document.json.JsonObject =
value.asObject
.map { x: io.circe.JsonObject =>
x.toList.foldLeft(com.couchbase.client.java.document.json.JsonObject.create())(
(a, b) => a.put(b._1, convertJsonValue(b._2))
)
}
.getOrElse(com.couchbase.client.java.document.json.JsonObject.empty())
private def convertToJson(value: Any): Json = value match {
case a: com.couchbase.client.java.document.json.JsonObject =>
Json.obj(a.toMap.asScala.toMap.mapValues(convertToJson).toList: _*)
case a: JsonArray => Json.arr(a.toList.asScala.toIndexedSeq.map(convertToJson): _*)
case a: Boolean => Json.fromBoolean(a)
case a: Double => Json.fromDouble(a).getOrElse(Json.Null)
case a: Long => Json.fromLong(a)
case a: Int => Json.fromInt(a)
case a: String => Json.fromString(a)
//noinspection ScalaStyle
case null => Json.Null
case _ => throw new RuntimeException("Unknown type")
}
}
| ReactiveCouchbase/reactivecouchbase-rs-core | src/main/scala/org/reactivecouchbase/rs/scaladsl/json/circejson.scala | Scala | apache-2.0 | 4,234 |
package no.uio.musit.models
import org.scalatest.{Inside, MustMatchers, WordSpec}
class CaseNumbersSpec extends WordSpec with MustMatchers with Inside {
"CaseNumber" should {
"have pipe separator around one id" in {
CaseNumbers(Seq("a")).toDbString mustBe "|a|"
}
"have pipe separator between multiple ids" in {
CaseNumbers(Seq("a", "b")).toDbString mustBe "|a|b|"
}
"parse multiple ids" in {
CaseNumbers("|a|b|").underlying must contain allOf ("a", "b")
}
}
}
| MUSIT-Norway/musit | musit-models/src/test/scala/no/uio/musit/models/CaseNumbersSpec.scala | Scala | gpl-2.0 | 513 |
/*
SirRre.scala
PDE approximation for a SIR epidemic model
Numerical solution of an SPDE
*/
package rd
object SirRre {
import smfsb._
import breeze.linalg.{Vector => BVec, _}
import breeze.numerics._
def main(args: Array[String]): Unit = {
val r = 250; val c = 300
val model = sir[DoubleState]()
val step = Spatial.euler2d(model, DenseVector(3.0, 2.0, 0.0), 0.005)
val x00 = DenseVector(100.0, 0.0, 0.0)
val x0 = DenseVector(50.0, 50.0, 0.0)
val xx00 = PMatrix(r, c, Vector.fill(r*c)(x00))
val xx0 = xx00.updated(c/2, r/2, x0)
val s = Stream.iterate(xx0)(step(_,0.0,0.05))
val si = s map (toSfxI3(_))
scalaview.SfxImageViewer(si, 1, autoStart=true)
}
}
// eof
| darrenjw/blog | reaction-diffusion/src/main/scala/rd/SirRre.scala | Scala | apache-2.0 | 720 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.concurrent.locks.ReentrantReadWriteLock
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, ResolvedHint}
import org.apache.spark.sql.execution.columnar.InMemoryRelation
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.storage.StorageLevel.MEMORY_AND_DISK
/** Holds a cached logical plan and its data */
case class CachedData(plan: LogicalPlan, cachedRepresentation: InMemoryRelation)
/**
* Provides support in a SQLContext for caching query results and automatically using these cached
* results when subsequent queries are executed. Data is cached using byte buffers stored in an
* InMemoryRelation. This relation is automatically substituted query plans that return the
* `sameResult` as the originally cached query.
*
* Internal to Spark SQL.
*/
class CacheManager extends Logging {
@transient
private val cachedData = new java.util.LinkedList[CachedData]
@transient
private val cacheLock = new ReentrantReadWriteLock
/** Acquires a read lock on the cache for the duration of `f`. */
private def readLock[A](f: => A): A = {
val lock = cacheLock.readLock()
lock.lock()
try f finally {
lock.unlock()
}
}
/** Acquires a write lock on the cache for the duration of `f`. */
private def writeLock[A](f: => A): A = {
val lock = cacheLock.writeLock()
lock.lock()
try f finally {
lock.unlock()
}
}
/** Clears all cached tables. */
def clearCache(): Unit = writeLock {
cachedData.asScala.foreach(_.cachedRepresentation.cacheBuilder.clearCache())
cachedData.clear()
}
/** Checks if the cache is empty. */
def isEmpty: Boolean = readLock {
cachedData.isEmpty
}
/**
* Caches the data produced by the logical representation of the given [[Dataset]].
* Unlike `RDD.cache()`, the default storage level is set to be `MEMORY_AND_DISK` because
* recomputing the in-memory columnar representation of the underlying table is expensive.
*/
def cacheQuery(
query: Dataset[_],
tableName: Option[String] = None,
storageLevel: StorageLevel = MEMORY_AND_DISK): Unit = {
val planToCache = query.logicalPlan
if (lookupCachedData(planToCache).nonEmpty) {
logWarning("Asked to cache already cached data.")
} else {
val sparkSession = query.sparkSession
val inMemoryRelation = InMemoryRelation(
sparkSession.sessionState.conf.useCompression,
sparkSession.sessionState.conf.columnBatchSize, storageLevel,
sparkSession.sessionState.executePlan(planToCache).executedPlan,
tableName,
planToCache)
writeLock {
if (lookupCachedData(planToCache).nonEmpty) {
logWarning("Data has already been cached.")
} else {
cachedData.add(CachedData(planToCache, inMemoryRelation))
}
}
}
}
/**
* Un-cache the given plan or all the cache entries that refer to the given plan.
* @param query The [[Dataset]] to be un-cached.
* @param cascade If true, un-cache all the cache entries that refer to the given
* [[Dataset]]; otherwise un-cache the given [[Dataset]] only.
* @param blocking Whether to block until all blocks are deleted.
*/
def uncacheQuery(
query: Dataset[_],
cascade: Boolean,
blocking: Boolean = true): Unit = {
uncacheQuery(query.sparkSession, query.logicalPlan, cascade, blocking)
}
/**
* Un-cache the given plan or all the cache entries that refer to the given plan.
* @param spark The Spark session.
* @param plan The plan to be un-cached.
* @param cascade If true, un-cache all the cache entries that refer to the given
* plan; otherwise un-cache the given plan only.
* @param blocking Whether to block until all blocks are deleted.
*/
def uncacheQuery(
spark: SparkSession,
plan: LogicalPlan,
cascade: Boolean,
blocking: Boolean): Unit = {
val shouldRemove: LogicalPlan => Boolean =
if (cascade) {
_.find(_.sameResult(plan)).isDefined
} else {
_.sameResult(plan)
}
val plansToUncache = mutable.Buffer[CachedData]()
writeLock {
val it = cachedData.iterator()
while (it.hasNext) {
val cd = it.next()
if (shouldRemove(cd.plan)) {
plansToUncache += cd
it.remove()
}
}
}
plansToUncache.foreach { cd =>
cd.cachedRepresentation.cacheBuilder.clearCache(blocking)
}
// Re-compile dependent cached queries after removing the cached query.
if (!cascade) {
recacheByCondition(spark, _.find(_.sameResult(plan)).isDefined, clearCache = false)
}
}
/**
* Tries to re-cache all the cache entries that refer to the given plan.
*/
def recacheByPlan(spark: SparkSession, plan: LogicalPlan): Unit = {
recacheByCondition(spark, _.find(_.sameResult(plan)).isDefined)
}
private def recacheByCondition(
spark: SparkSession,
condition: LogicalPlan => Boolean,
clearCache: Boolean = true): Unit = {
val needToRecache = scala.collection.mutable.ArrayBuffer.empty[CachedData]
writeLock {
val it = cachedData.iterator()
while (it.hasNext) {
val cd = it.next()
// If `clearCache` is false (which means the recache request comes from a non-cascading
// cache invalidation) and the cache buffer has already been loaded, we do not need to
// re-compile a physical plan because the old plan will not be used any more by the
// CacheManager although it still lives in compiled `Dataset`s and it could still work.
// Otherwise, it means either `clearCache` is true, then we have to clear the cache buffer
// and re-compile the physical plan; or it is a non-cascading cache invalidation and cache
// buffer is still empty, then we could have a more efficient new plan by removing
// dependency on the previously removed cache entries.
// Note that the `CachedRDDBuilder`.`isCachedColumnBuffersLoaded` call is a non-locking
// status test and may not return the most accurate cache buffer state. So the worse case
// scenario can be:
// 1) The buffer has been loaded, but `isCachedColumnBuffersLoaded` returns false, then we
// will clear the buffer and build a new plan. It is inefficient but doesn't affect
// correctness.
// 2) The buffer has been cleared, but `isCachedColumnBuffersLoaded` returns true, then we
// will keep it as it is. It means the physical plan has been re-compiled already in the
// other thread.
val buildNewPlan =
clearCache || !cd.cachedRepresentation.cacheBuilder.isCachedColumnBuffersLoaded
if (condition(cd.plan) && buildNewPlan) {
needToRecache += cd
// Remove the cache entry before we create a new one, so that we can have a different
// physical plan.
it.remove()
}
}
}
needToRecache.map { cd =>
cd.cachedRepresentation.cacheBuilder.clearCache()
val plan = spark.sessionState.executePlan(cd.plan).executedPlan
val newCache = InMemoryRelation(
cacheBuilder = cd.cachedRepresentation
.cacheBuilder.copy(cachedPlan = plan)(_cachedColumnBuffers = null),
logicalPlan = cd.plan)
val recomputedPlan = cd.copy(cachedRepresentation = newCache)
writeLock {
if (lookupCachedData(recomputedPlan.plan).nonEmpty) {
logWarning("While recaching, data was already added to cache.")
} else {
cachedData.add(recomputedPlan)
}
}
}
}
/** Optionally returns cached data for the given [[Dataset]] */
def lookupCachedData(query: Dataset[_]): Option[CachedData] = readLock {
lookupCachedData(query.logicalPlan)
}
/** Optionally returns cached data for the given [[LogicalPlan]]. */
def lookupCachedData(plan: LogicalPlan): Option[CachedData] = readLock {
cachedData.asScala.find(cd => plan.sameResult(cd.plan))
}
/** Replaces segments of the given logical plan with cached versions where possible. */
def useCachedData(plan: LogicalPlan): LogicalPlan = {
val newPlan = plan transformDown {
// Do not lookup the cache by hint node. Hint node is special, we should ignore it when
// canonicalizing plans, so that plans which are same except hint can hit the same cache.
// However, we also want to keep the hint info after cache lookup. Here we skip the hint
// node, so that the returned caching plan won't replace the hint node and drop the hint info
// from the original plan.
case hint: ResolvedHint => hint
case currentFragment =>
lookupCachedData(currentFragment)
.map(_.cachedRepresentation.withOutput(currentFragment.output))
.getOrElse(currentFragment)
}
newPlan transformAllExpressions {
case s: SubqueryExpression => s.withNewPlan(useCachedData(s.plan))
}
}
/**
* Tries to re-cache all the cache entries that contain `resourcePath` in one or more
* `HadoopFsRelation` node(s) as part of its logical plan.
*/
def recacheByPath(spark: SparkSession, resourcePath: String): Unit = {
val (fs, qualifiedPath) = {
val path = new Path(resourcePath)
val fs = path.getFileSystem(spark.sessionState.newHadoopConf())
(fs, fs.makeQualified(path))
}
recacheByCondition(spark, _.find(lookupAndRefresh(_, fs, qualifiedPath)).isDefined)
}
/**
* Traverses a given `plan` and searches for the occurrences of `qualifiedPath` in the
* [[org.apache.spark.sql.execution.datasources.FileIndex]] of any [[HadoopFsRelation]] nodes
* in the plan. If found, we refresh the metadata and return true. Otherwise, this method returns
* false.
*/
private def lookupAndRefresh(plan: LogicalPlan, fs: FileSystem, qualifiedPath: Path): Boolean = {
plan match {
case lr: LogicalRelation => lr.relation match {
case hr: HadoopFsRelation =>
val prefixToInvalidate = qualifiedPath.toString
val invalidate = hr.location.rootPaths
.map(_.makeQualified(fs.getUri, fs.getWorkingDirectory).toString)
.exists(_.startsWith(prefixToInvalidate))
if (invalidate) hr.location.refresh()
invalidate
case _ => false
}
case _ => false
}
}
}
| hhbyyh/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/CacheManager.scala | Scala | apache-2.0 | 11,679 |
/*
* Copyright (C) 2012 e-Research Laboratory, School of ITEE,
* The University of Queensland
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.metadata.qldarch.snippet
import scala.xml.{NodeSeq, Text}
import net.liftweb.util.Helpers._
import net.liftweb.common._
import net.liftweb.http._
import net.metadata.qldarch.model.Person
import net.metadata.qldarch.model.Resource
class Resources extends Logger {
def list =
".row *" #> Resource.findAll().map(r =>
".title *" #> Text(r.title) &
".recorder *" #> Text(r.creator.obj.map(c => c.forDisplay).openOr("Unknown recorder")) &
".collection *" #> Text(r.collection.obj.map(c => c.forDisplay).openOr("Unknown collection")) &
".format *" #> Text(r.format.obj.map(f => f.forDisplay).openOr("Unknown mimetype")) &
".date_recorded *" #> Text(r.createdDate.toString) &
".file_name *" #> Text(r.fileName.toString.reverse.takeWhile(_ != '/').reverse) &
".view" #> (".link [href]" #> ("/resources/view/" + r.id)) &
".edit" #> (".link [href]" #> ("/resources/edit/" + r.id)))
}
| recurse/qldarch | src/main/scala/net/metadata/qldarch/snippet/Resources.scala | Scala | agpl-3.0 | 1,742 |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package sky.localnode
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.util.Timeout
import scala.concurrent.Await
import akka.pattern.ask
import scala.concurrent.duration._
class LocalActor extends Actor with ActorLogging {
//Get a reference to the remote actor
val remoteActor = context.actorSelection("akka.tcp://RemoteNodeApp@hadoop1:2552/user/remoteActor")
implicit val timeout = Timeout(5 seconds)
def receive: Receive = {
case message: String =>
val future = (remoteActor ? message).mapTo[String]
val result = Await.result(future, timeout.duration)
log.info("Message received from Server -> {}", result)
}
}
| szekai/akka-example | AkkaRemotingExample/LocalNodeApp/src/main/scala/sky/localnode/LocalActor.scala | Scala | apache-2.0 | 853 |
package org.scalacoin.marshallers.rpc.bitcoincore.wallet
import org.scalacoin.protocol.rpc.bitcoincore.wallet.{WalletInfo, WalletInfoImpl}
import spray.json._
/**
* Created by Tom on 1/6/2016.
*/
object WalletMarshaller extends DefaultJsonProtocol {
val walletVersionKey = "walletversion"
val balanceKey = "balance"
val unconfirmedBalanceKey = "unconfirmed_balance"
val immatureBalanceKey = "immature_balance"
val txCountKey = "txcount"
val keyPoolOldestKey = "keypoololdest"
val keyPoolSizeKey = "keypoolsize"
implicit object WalletFormatter extends RootJsonFormat[WalletInfo] {
override def read(value: JsValue): WalletInfo = {
val obj = value.asJsObject
val walletVersion = obj.fields(walletVersionKey).convertTo[Int]
val balance = obj.fields(balanceKey).convertTo[Double]
val unconfirmedBalance = obj.fields(unconfirmedBalanceKey).convertTo[Double]
val immatureBalance = obj.fields(immatureBalanceKey).convertTo[Double]
val txCount = obj.fields(txCountKey).convertTo[Int]
val keyPoolOldest = obj.fields(keyPoolOldestKey).convertTo[Long]
val keyPoolSize = obj.fields(keyPoolSizeKey).convertTo[Int]
WalletInfoImpl(walletVersion, balance, unconfirmedBalance, immatureBalance, txCount, keyPoolOldest, keyPoolSize)
}
override def write(wallet : WalletInfo) : JsValue = {
val m : Map[String,JsValue] = Map (
walletVersionKey -> JsNumber(wallet.walletVersion),
balanceKey -> JsNumber(wallet.balance),
unconfirmedBalanceKey -> JsNumber(wallet.unconfirmedBalance),
immatureBalanceKey -> JsNumber(wallet.immatureBalance),
txCountKey -> JsNumber(wallet.txCount),
keyPoolOldestKey -> JsNumber(wallet.keyPoolOldest),
keyPoolSizeKey -> JsNumber(wallet.keyPoolSize)
)
JsObject(m)
}
}
} | TomMcCabe/scalacoin | src/main/scala/org/scalacoin/marshallers/rpc/bitcoincore/wallet/WalletMarshaller.scala | Scala | mit | 1,844 |
package ch.wsl.box.model.shared
case class EntityKind(kind:String){
def isEntity:Boolean = entityOrForm == "entity"
def entityOrForm = kind match{
case "table"|"view" => "entity"
case _ => kind
}
def plural:String = kind match {
case "entity" => "entities"
case "boxentity" => "boxentities"
case _ => s"${kind}s" //for tables, views and forms
}
}
object EntityKind {
final val ENTITY = EntityKind("entity") //table or view
final val TABLE = EntityKind("table")
final val VIEW = EntityKind("view")
final val FORM = EntityKind("form")
final val BOX_TABLE = EntityKind("box-table")
final val BOX_FORM = EntityKind("box-form")
final val FUNCTION = EntityKind("function")
final val EXPORT = EntityKind("export")
}
| Insubric/box | shared/src/main/scala/ch/wsl/box/model/shared/EntityKind.scala | Scala | apache-2.0 | 781 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.