code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.twitter.scalding
import com.twitter.bijection.{ Injection, AbstractInjection }
import com.twitter.bijection.Inversion._
import com.twitter.elephantbird.cascading2.scheme.LzoTextLine
import org.json4s._
import org.json4s.native.Serialization._
import org.json4s.{ NoTypeHints, native }
import scala.collection.JavaConverters._
import scala.util.Try
import cascading.pipe.Pipe
/**
* This type uses the structural type of a case class, but not it's name, to describe the Json using json4s.
* This is intended to be used for intermediate output from a REPL session.
* The intended use is to save adhoc data between sessions.
* The fully qualified class name of classes defined in a REPL is not stable between REPL sessions.
*
* We believe using a fixed schema, such as thrift or Avro is a much safer way to do long term productionized data
* pipelines to minimize risks of incompatible changes to schema that render old data unreadable.
*/
object TypedJson {
private implicit val formats = native.Serialization.formats(NoTypeHints)
private def caseClass2Json[A <: AnyRef](implicit tt: Manifest[A], fmt: Formats): Injection[A, String] = new AbstractInjection[A, String] {
override def apply(a: A): String = write(a)
override def invert(b: String): Try[A] = attempt(b)(read[A])
}
def apply[T <: AnyRef: Manifest](p: String) = new TypedJson(p)
}
class TypedJson[T <: AnyRef: Manifest](p: String) extends FixedPathSource(p)
with TextSourceScheme
with SingleMappable[T]
with TypedSink[T] {
import Dsl._
import TypedJson._
private[this] val fieldSym = 'jsonString
@transient private[this] lazy val inj = caseClass2Json[T]
override def transformForWrite(pipe: Pipe) =
pipe.mapTo((0) -> (fieldSym)) { inj.apply(_: T) }
override def transformForRead(pipe: Pipe) =
pipe.mapTo(('line) -> (fieldSym)) { (jsonStr: String) => inj.invert(jsonStr).get }
override def setter[U <: T] = TupleSetter.asSubSetter[T, U](TupleSetter.singleSetter[T])
override def toIterator(implicit config: Config, mode: Mode): Iterator[T] = {
val tap = createTap(Read)(mode)
CascadingMode.cast(mode)
.openForRead(config, tap)
.asScala
.map { te =>
inj.invert(te.selectTuple('line).getObject(0).asInstanceOf[String]).get
}
}
}
case class TypedJsonLzo[T <: AnyRef: Manifest](p: String) extends TypedJson[T](p) {
override def hdfsScheme = HadoopSchemeInstance(new LzoTextLine().asInstanceOf[cascading.scheme.Scheme[_, _, _, _, _]])
}
| jzmq/scalding | scalding-json/src/main/scala/com/twitter/scalding/TypedJson.scala | Scala | apache-2.0 | 2,515 |
package filodb.core.reprojector
import com.typesafe.config.ConfigFactory
import org.velvia.filo.TupleRowReader
import filodb.core.NamesTestData
import filodb.core.metadata.{Column, Dataset}
import filodb.core.store.SegmentSpec
import org.scalatest.{FunSpec, Matchers, BeforeAndAfter}
class MemTableMemoryTest extends FunSpec with Matchers with BeforeAndAfter {
import NamesTestData._
val newSetting = "memtable.max-rows-per-table = 200000"
val config = ConfigFactory.parseString(newSetting).withFallback(
ConfigFactory.load("application_test.conf")).getConfig("filodb")
val mTable = new FiloMemTable(projection, config)
import scala.concurrent.ExecutionContext.Implicits.global
before {
mTable.clearAllData()
}
val numRows = 100000
val lotsOfNames = (0 until (numRows/6)).toIterator.flatMap { partNum =>
names.map { t => (t._1, t._2, t._3, t._4, Some(partNum.toString)) }.toIterator
}
private def printDetailedMemUsage() {
val mxBean = java.lang.management.ManagementFactory.getMemoryMXBean
println(mxBean.getNonHeapMemoryUsage)
}
// To really see amount of memory used, might have to uncomment the thread sleep and use VisualVM,
// because tests are forked.
it("should add tons of rows without overflowing memory and taking too long") {
val start = System.currentTimeMillis
val startFreeMem = sys.runtime.freeMemory
println(s"Start: free memory = $startFreeMem")
printDetailedMemUsage()
var numRows = 0
lotsOfNames.map(TupleRowReader).grouped(2000).foreach { rows =>
mTable.ingestRows(rows.toSeq)
numRows += rows.length
// println(s"Ingested $numRows rows")
// Thread sleep 1000
}
val elapsed = System.currentTimeMillis - start
val endFreeMem = sys.runtime.freeMemory
println(s"End: free memory = $endFreeMem elapsed = ${elapsed} ms")
printDetailedMemUsage()
}
}
| markhamstra/FiloDB | core/src/test/scala/filodb.core/reprojector/MemTableMemoryTest.scala | Scala | apache-2.0 | 1,911 |
/* Copyright 2009-2014 EPFL, Lausanne */
package leon.custom
import leon._
import leon.lang._
import leon.collection._
import leon.annotation._
sealed abstract class List[T] {
def size: BigInt = (this match {
case Nil() => BigInt(0)
case Cons(h, t) => BigInt(1) + t.size
}) ensuring (_ >= 0)
def content: Set[T] = this match {
case Nil() => Set()
case Cons(h, t) => Set(h) ++ t.content
}
def contains(v: T): Boolean = (this match {
case Cons(h, t) if h == v => true
case Cons(_, t) => t.contains(v)
case Nil() => false
}) ensuring { res => res == (content contains v) }
def ++(that: List[T]): List[T] = (this match {
case Nil() => that
case Cons(x, xs) => Cons(x, xs ++ that)
}) ensuring { res => (res.content == this.content ++ that.content) && (res.size == this.size + that.size)}
def head: T = {
require(this != Nil[T]())
this match {
case Cons(h, t) => h
}
}
def tail: List[T] = {
require(this != Nil[T]())
this match {
case Cons(h, t) => t
}
}
def apply(index: BigInt): T = {
require(0 <= index && index < size)
if (index == 0) {
head
} else {
tail(index-1)
}
}
def ::(t:T): List[T] = Cons(t, this)
def :+(t:T): List[T] = {
this match {
case Nil() => this // FIXME forgot t
case Cons(x, xs) => Cons(x, xs :+ (t))
}
} ensuring(res => (res.size == size + 1) && (res.content == content ++ Set(t)))
def reverse: List[T] = {
this match {
case Nil() => this
case Cons(x,xs) => xs.reverse :+ x
}
} ensuring (res => (res.size == size) && (res.content == content))
def take(i: BigInt): List[T] = (this, i) match {
case (Nil(), _) => Nil()
case (Cons(h, t), i) =>
if (i == 0) {
Nil()
} else {
Cons(h, t.take(i-1))
}
}
def drop(i: BigInt): List[T] = (this, i) match {
case (Nil(), _) => Nil()
case (Cons(h, t), i) =>
if (i == 0) {
Cons(h, t)
} else {
t.drop(i-1)
}
}
def slice(from: BigInt, to: BigInt): List[T] = {
require(from < to && to < size && from >= 0)
drop(from).take(to-from)
}
def replace(from: T, to: T): List[T] = this match {
case Nil() => Nil()
case Cons(h, t) =>
val r = t.replace(from, to)
if (h == from) {
Cons(to, r)
} else {
Cons(h, r)
}
}
private def chunk0(s: BigInt, l: List[T], acc: List[T], res: List[List[T]], s0: BigInt): List[List[T]] = l match {
case Nil() =>
if (acc.size > 0) {
res :+ acc
} else {
res
}
case Cons(h, t) =>
if (s0 == 0) {
chunk0(s, l, Nil(), res :+ acc, s)
} else {
chunk0(s, t, acc :+ h, res, s0-1)
}
}
def chunks(s: BigInt): List[List[T]] = {
require(s > 0)
chunk0(s, this, Nil(), Nil(), s)
}
def zip[B](that: List[B]): List[(T, B)] = (this, that) match {
case (Cons(h1, t1), Cons(h2, t2)) =>
Cons((h1, h2), t1.zip(t2))
case (_) =>
Nil()
}
def -(e: T): List[T] = this match {
case Cons(h, t) =>
if (e == h) {
t - e
} else {
Cons(h, t - e)
}
case Nil() =>
Nil()
}
def --(that: List[T]): List[T] = this match {
case Cons(h, t) =>
if (that.contains(h)) {
t -- that
} else {
Cons(h, t -- that)
}
case Nil() =>
Nil()
}
def &(that: List[T]): List[T] = this match {
case Cons(h, t) =>
if (that.contains(h)) {
Cons(h, t & that)
} else {
t & that
}
case Nil() =>
Nil()
}
def pad(s: BigInt, e: T): List[T] = { (this, s) match {
case (_, s) if s <= 0 =>
this
case (Nil(), s) =>
Cons(e, Nil().pad(s-1, e))
case (Cons(h, t), s) =>
Cons(h, t.pad(s, e))
}} ensuring { res =>
((this,s,e), res) passes {
case (Cons(a,Nil()), BigInt(2), x) => Cons(a, Cons(x, Cons(x, Nil())))
}
}
def find(e: T): Option[BigInt] = this match {
case Nil() => None()
case Cons(h, t) =>
if (h == e) {
Some(0)
} else {
t.find(e) match {
case None() => None()
case Some(i) => Some(i+1)
}
}
}
def init: List[T] = (this match {
case Cons(h, Nil()) =>
Nil[T]()
case Cons(h, t) =>
Cons[T](h, t.init)
case Nil() =>
Nil[T]()
}) ensuring ( (r: List[T]) => ((r.size < this.size) || (this.size == 0)) )
def lastOption: Option[T] = this match {
case Cons(h, t) =>
t.lastOption.orElse(Some(h))
case Nil() =>
None()
}
def firstOption: Option[T] = this match {
case Cons(h, t) =>
Some(h)
case Nil() =>
None()
}
def unique: List[T] = this match {
case Nil() => Nil()
case Cons(h, t) =>
Cons(h, t.unique - h)
}
def splitAt(e: T): List[List[T]] = split(Cons(e, Nil()))
def split(seps: List[T]): List[List[T]] = this match {
case Cons(h, t) =>
if (seps.contains(h)) {
Cons(Nil(), t.split(seps))
} else {
val r = t.split(seps)
Cons(Cons(h, r.head), r.tail)
}
case Nil() =>
Cons(Nil(), Nil())
}
def count(e: T): BigInt = this match {
case Cons(h, t) =>
if (h == e) {
1 + t.count(e)
} else {
t.count(e)
}
case Nil() =>
0
}
def evenSplit: (List[T], List[T]) = {
val c = size/2
(take(c), drop(c))
}
def insertAt(pos: BigInt, l: List[T]): List[T] = {
if(pos < 0) {
insertAt(size + pos, l)
} else if(pos == 0) {
l ++ this
} else {
this match {
case Cons(h, t) =>
Cons(h, t.insertAt(pos-1, l))
case Nil() =>
l
}
}
}
def replaceAt(pos: BigInt, l: List[T]): List[T] = {
if(pos < 0) {
replaceAt(size + pos, l)
} else if(pos == 0) {
l ++ this.drop(l.size)
} else {
this match {
case Cons(h, t) =>
Cons(h, t.replaceAt(pos-1, l))
case Nil() =>
l
}
}
}
def rotate(s: BigInt): List[T] = {
if (s < 0) {
rotate(size+s)
} else {
val s2 = s % size
drop(s2) ++ take(s2)
}
}
def isEmpty = this match {
case Nil() => true
case _ => false
}
}
@ignore
object List {
def apply[T](elems: T*): List[T] = ???
}
@library
object ListOps {
def flatten[T](ls: List[List[T]]): List[T] = ls match {
case Cons(h, t) => h ++ flatten(t)
case Nil() => Nil()
}
def isSorted(ls: List[BigInt]): Boolean = ls match {
case Nil() => true
case Cons(_, Nil()) => true
case Cons(h1, Cons(h2, _)) if(h1 > h2) => false
case Cons(_, t) => isSorted(t)
}
def sorted(ls: List[BigInt]): List[BigInt] = ls match {
case Cons(h, t) => insSort(sorted(t), h)
case Nil() => Nil()
}
def insSort(ls: List[BigInt], v: BigInt): List[BigInt] = ls match {
case Nil() => Cons(v, Nil())
case Cons(h, t) =>
if (v <= h) {
Cons(v, t)
} else {
Cons(h, insSort(t, v))
}
}
}
case class Cons[T](h: T, t: List[T]) extends List[T]
case class Nil[T]() extends List[T]
@library
object ListSpecs {
def snocIndex[T](l : List[T], t : T, i : BigInt) : Boolean = {
require(0 <= i && i < l.size + 1)
// proof:
(l match {
case Nil() => true
case Cons(x, xs) => if (i > 0) snocIndex[T](xs, t, i-1) else true
}) &&
// claim:
((l :+ t).apply(i) == (if (i < l.size) l(i) else t))
}.holds
def reverseIndex[T](l : List[T], i : BigInt) : Boolean = {
require(0 <= i && i < l.size)
(l match {
case Nil() => true
case Cons(x,xs) => snocIndex(l, x, i) && reverseIndex[T](l,i)
}) &&
(l.reverse.apply(i) == l.apply(l.size - 1 - i))
}.holds
def appendIndex[T](l1 : List[T], l2 : List[T], i : BigInt) : Boolean = {
require(0 <= i && i < l1.size + l2.size)
(l1 match {
case Nil() => true
case Cons(x,xs) => if (i==0) true else appendIndex[T](xs,l2,i-1)
}) &&
((l1 ++ l2).apply(i) == (if (i < l1.size) l1(i) else l2(i - l1.size)))
}.holds
def appendAssoc[T](l1 : List[T], l2 : List[T], l3 : List[T]) : Boolean = {
(l1 match {
case Nil() => true
case Cons(x,xs) => appendAssoc(xs,l2,l3)
}) &&
(((l1 ++ l2) ++ l3) == (l1 ++ (l2 ++ l3)))
}.holds
def snocIsAppend[T](l : List[T], t : T) : Boolean = {
(l match {
case Nil() => true
case Cons(x,xs) => snocIsAppend(xs,t)
}) &&
((l :+ t) == l ++ Cons[T](t, Nil()))
}.holds
def snocAfterAppend[T](l1 : List[T], l2 : List[T], t : T) : Boolean = {
(l1 match {
case Nil() => true
case Cons(x,xs) => snocAfterAppend(xs,l2,t)
}) &&
((l1 ++ l2) :+ t == (l1 ++ (l2 :+ t)))
}.holds
def snocReverse[T](l : List[T], t : T) : Boolean = {
(l match {
case Nil() => true
case Cons(x,xs) => snocReverse(xs,t)
}) &&
((l :+ t).reverse == Cons(t, l.reverse))
}.holds
def reverseReverse[T](l : List[T]) : Boolean = {
(l match {
case Nil() => true
case Cons(x,xs) => reverseReverse[T](xs) && snocReverse[T](xs.reverse, x)
}) &&
(l.reverse.reverse == l)
}.holds
//// my hand calculation shows this should work, but it does not seem to be found
//def reverseAppend[T](l1 : List[T], l2 : List[T]) : Boolean = {
// (l1 match {
// case Nil() => true
// case Cons(x,xs) => {
// reverseAppend(xs,l2) &&
// snocAfterAppend[T](l2.reverse, xs.reverse, x) &&
// l1.reverse == (xs.reverse :+ x)
// }
// }) &&
// ((l1 ++ l2).reverse == (l2.reverse ++ l1.reverse))
//}.holds
}
| ericpony/scala-examples | testcases/repair/List/List3.scala | Scala | mit | 9,676 |
// scalac: -Xfatal-warnings
//
// none of these should complain about exhaustivity
class Test {
// It would fail on the following inputs: (_, false), (_, true)
def x(in: (Int, Boolean)) = in match { case (i: Int, b: Boolean) => 3 }
// There is no warning if the Int is ignored or bound without an explicit type:
def y(in: (Int, Boolean)) = in match { case (_, b: Boolean) => 3 }
// Keeping the explicit type for the Int but dropping the one for Boolean presents a spurious warning again:
// It would fail on the following input: (_, _)
def z(in: (Int, Boolean)) = in match { case (i: Int, b) => 3 }
}
| scala/scala | test/files/pos/t6008.scala | Scala | apache-2.0 | 617 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Linked}
case class CP266(value: Int) extends CtBoxIdentifier("Total profits chargeable to CT (box 37)") with CtInteger
object CP266 extends Linked[CP295, CP266]{
override def apply(source: CP295): CP266 = CP266(source.value)
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP266.scala | Scala | apache-2.0 | 930 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.hive.test.TestHive._
/**
* A set of tests that validates support for Hive Explain command.
*/
class HiveExplainSuite extends QueryTest {
test("explain extended command") {
checkExistence(sql(" explain select * from src where key=123 "), true,
"== Physical Plan ==")
checkExistence(sql(" explain select * from src where key=123 "), false,
"== Parsed Logical Plan ==",
"== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==")
checkExistence(sql(" explain extended select * from src where key=123 "), true,
"== Parsed Logical Plan ==",
"== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==",
"== Physical Plan ==",
"Code Generation", "== RDD ==")
}
test("explain create table command") {
checkExistence(sql("explain create table temp__b as select * from src limit 2"), true,
"== Physical Plan ==",
"InsertIntoHiveTable",
"Limit",
"src")
checkExistence(sql("explain extended create table temp__b as select * from src limit 2"), true,
"== Parsed Logical Plan ==",
"== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==",
"== Physical Plan ==",
"CreateTableAsSelect",
"InsertIntoHiveTable",
"Limit",
"src")
checkExistence(sql(
"""
| EXPLAIN EXTENDED CREATE TABLE temp__b
| ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
| WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2")
| STORED AS RCFile
| TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22")
| AS SELECT * FROM src LIMIT 2
""".stripMargin), true,
"== Parsed Logical Plan ==",
"== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==",
"== Physical Plan ==",
"CreateTableAsSelect",
"InsertIntoHiveTable",
"Limit",
"src")
}
}
| hengyicai/OnlineAggregationUCAS | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala | Scala | apache-2.0 | 2,977 |
package com.thoughtworks.microbuilder.play
import java.util.concurrent.TimeUnit.SECONDS
import com.github.dreamhead.moco.{Moco, _}
import com.ning.http.client.AsyncHttpClientConfig
import com.thoughtworks.microbuilder.core.IRouteConfiguration
import com.thoughtworks.microbuilder.play.Implicits._
import com.thoughtworks.microbuilder.play.exception.MicrobuilderException.{TextApplicationException, WrongResponseFormatException}
import org.junit.runner.RunWith
import org.specs2.mock.{Mockito => SpecMockito}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.{AfterAll, BeforeAll}
import play.api.libs.ws._
import play.api.libs.ws.ning._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.language.implicitConversions
@RunWith(classOf[JUnitRunner])
class RpcOutgoingTest extends Specification with SpecMockito with BeforeAll with AfterAll {
val ws:WSClient = new NingWSClient(new AsyncHttpClientConfig.Builder().build())
val mockWsApi = new WSAPI {
override def url(url: String) = ws.url(url)
override def client = ws
}
var theServer: Runner = null
val configuration: IRouteConfiguration = MyRouteConfigurationFactory.routeConfiguration_com_thoughtworks_microbuilder_play_MyRpc
val myRpc: MyRpc = MyOutgoingProxyFactory.outgoingProxy_com_thoughtworks_microbuilder_play_MyRpc(
new PlayOutgoingJsonService("http://localhost:8090", configuration, mockWsApi)
)
"This is a specification of using microbuilder-play tools to make http requests".txt
"Should throw TextApplicationException with TEXT_APPLICATION_FAILURE when structuralFailure is not configured" >> {
Await.result(myRpc.myMethod(1, "failure"), Duration(5, SECONDS)) must throwA.like {
case TextApplicationException(textError, code) =>{
textError === "server error"
code === 500
}
}
}
"Should convert myMethod to http get request and get the response" >> {
val response = Await.result(myRpc.myMethod(1, "abc"), Duration(5, SECONDS))
response.myInnerEntity.message === "this is a message"
response.myInnerEntity.code === 1
}
"Should convert createResource to http post request and get created response" >> {
val response = Await.result(myRpc.createResource("books", new Book(1, "name")), Duration(5, SECONDS))
response.result === "created"
}
"Should throw native exception if the response is not legal json" >> {
Await.result(myRpc.myMethod(1, "wrong_json"), Duration(5, SECONDS)) must throwA.like {
case WrongResponseFormatException(textError) => textError === "Wrong JSON format: not a JSON"
}
}
def beforeAll() {
val server = Moco.httpServer(8090)
server.get(Moco.by(Moco.uri("/my-method/1/name/abc"))).response("""
{
"myInnerEntity": {
"code":1,
"message":"this is a message"
}
}""")
server.get(Moco.by(Moco.uri("/my-method/1/name/failure"))).response(Moco.`with`(Moco.text("server error")), Moco.status(500))
server.get(Moco.by(Moco.uri("/my-method/1/name/wrong_json"))).response(Moco.`with`(Moco.text("not a JSON")), Moco.status(200))
server.post(Moco.by(Moco.uri("/books"))).response(
"""
{"result":"created"}
""")
theServer = Runner.runner(server)
theServer.start()
}
override def afterAll() = {
theServer.stop()
ws.close()
}
}
| ThoughtWorksInc/microbuilder-play | src/test/scala/com/thoughtworks/microbuilder/play/RpcOutgoingTest.scala | Scala | apache-2.0 | 3,508 |
package com.twitter.finatra.http.internal.marshalling.mustache
import com.github.mustachejava.{Mustache, MustacheFactory}
import com.twitter.finatra.conversions.map._
import com.twitter.finatra.utils.AutoClosable.tryWith
import com.twitter.io.Buf
import java.io.{ByteArrayOutputStream, OutputStreamWriter}
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import javax.inject.{Inject, Singleton}
import scala.collection.JavaConverters._
@Singleton
class MustacheService @Inject()(
mustacheFactory: MustacheFactory) {
private val templateToMustacheCache = new ConcurrentHashMap[String, Mustache]().asScala
/* Public */
def createBuffer(templateName: String, obj: Any): Buf = {
val mustache = lookupMustache(templateName)
tryWith(new ByteArrayOutputStream(1024)) { os =>
tryWith(new OutputStreamWriter(os, StandardCharsets.UTF_8)) { writer =>
mustache.execute(writer, obj)
writer.close()
Buf.ByteArray.Owned(os.toByteArray)
}
}
}
/* Private */
private def lookupMustache(templateName: String): Mustache = {
templateToMustacheCache.atomicGetOrElseUpdate(templateName, {
mustacheFactory.compile(templateName)
})
}
}
| deanh/finatra | http/src/main/scala/com/twitter/finatra/http/internal/marshalling/mustache/MustacheService.scala | Scala | apache-2.0 | 1,234 |
/*
* Copyright (c) 2012-2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich
package hadoop
package jobs
// Java
import java.io.File
import java.io.BufferedWriter
import java.io.FileWriter
// Apache Commons IO
import org.apache.commons.io.FileUtils
import org.apache.commons.io.filefilter.TrueFileFilter
// Apache Commons Codec
import org.apache.commons.codec.binary.Base64
// Scala
import scala.collection.mutable.ListBuffer
import scala.io.Source
import scala.collection.JavaConverters._
// Scalaz
import scalaz._
import Scalaz._
// Scala
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
// Scalding
import com.twitter.scalding._
// Specs2
import org.specs2.matcher.{Matcher, Expectable}
import org.specs2.matcher.Matchers._
/**
* Holds helpers for running integration
* tests on SnowPlow EtlJobs.
*/
object JobSpecHelpers {
/**
* A Specs2 matcher to check if a Scalding
* output sink is empty or not.
*/
val beEmpty: Matcher[ListBuffer[_]] =
((_: ListBuffer[_]).isEmpty, "is not empty")
/**
* A Specs2 matcher to check if a directory
* on disk is empty or not.
*/
val beEmptyFile: Matcher[File] =
((f: File) =>
!f.exists || (f.isFile && f.length == 0),
"is populated file"
)
/**
* How Scalding represents input lines
*/
type ScaldingLines = List[(String, String)]
/**
* Base64-urlsafe encoded version of this standard
* Iglu configuration.
*/
private val IgluConfig = {
val encoder = new Base64(true) // true means "url safe"
new String(encoder.encode(SpecHelpers.IgluConfig.getBytes)
)
}
/**
* A case class to make it easy to write out input
* lines for Scalding jobs without manually appending
* line numbers.
*
* @param l The repeated String parameters
*/
case class Lines(l: String*) {
val lines = l.toList
val numberedLines = number(lines)
/**
* Writes the lines to the given file
*
* @param file The file to write the
* lines to
*/
def writeTo(file: File) = {
val writer = new BufferedWriter(new FileWriter(file))
for (line <- lines) {
writer.write(line)
writer.newLine()
}
writer.close()
}
/**
* Numbers the lines in the Scalding format.
* Converts "My line" to ("0" -> "My line")
*
* @param lines The List of lines to number
* @return the List of ("line number" -> "line")
* tuples.
*/
private def number(lines: List[String]): ScaldingLines =
for ((l, n) <- lines zip (0 until lines.size)) yield (n.toString -> l)
}
/**
* Implicit conversion from a Lines object to
* a ScaldingLines, aka List[(String, String)],
* ready for Scalding to use.
*
* @param lines The Lines object
* @return the ScaldingLines ready for Scalding
*/
implicit def Lines2ScaldingLines(lines : Lines): ScaldingLines = lines.numberedLines
/**
* A standard JobSpec definition used by all of our
* integration tests.
*/
val ShredJobSpec =
JobTest("com.snowplowanalytics.snowplow.enrich.hadoop.ShredJob").
arg("input_folder", "inputFolder").
arg("output_folder", "outputFolder").
arg("bad_rows_folder", "badFolder").
arg("exceptions_folder", "exceptionsFolder").
arg("iglu_config", IgluConfig)
case class Sinks(
val output: File,
val badRows: File,
val exceptions: File) {
def deleteAll() {
for (f <- List(exceptions, badRows, output)) {
f.delete()
}
}
}
/**
* Run the ShredJob using the Scalding Tool.
*
* @param lines The input lines to shred
* @return a Tuple3 containing open File
* objects for the output, bad rows
* and exceptions temporary directories.
*/
def runJobInTool(lines: Lines): Sinks = {
def mkTmpDir(tag: String, createParents: Boolean = false, containing: Option[Lines] = None): File = {
val f = File.createTempFile(s"snowplow-shred-job-${tag}-", "")
if (createParents) f.mkdirs() else f.mkdir()
containing.map(_.writeTo(f))
f
}
val input = mkTmpDir("input", createParents = true, containing = lines.some)
val output = mkTmpDir("output")
val badRows = mkTmpDir("bad-rows")
val exceptions = mkTmpDir("exceptions")
val args = Array[String]("com.snowplowanalytics.snowplow.enrich.hadoop.ShredJob", "--local",
"--input_folder", input.getAbsolutePath,
"--output_folder", output.getAbsolutePath,
"--bad_rows_folder", badRows.getAbsolutePath,
"--exceptions_folder", exceptions.getAbsolutePath,
"--iglu_config", IgluConfig)
// Execute
Tool.main(args)
input.delete()
Sinks(output, badRows, exceptions)
}
/**
* Removes the timestamp from bad rows so that what remains is deterministic
*
* @param badRow
* @return The bad row without the timestamp
*/
def removeTstamp(badRow: String): String = {
val badRowJson = parse(badRow)
val badRowWithoutTimestamp = ("line", (badRowJson \\ "line")) ~ ("errors", (badRowJson \\ "errors"))
compact(badRowWithoutTimestamp)
}
/**
* Reads a file at the given path into a List of Strings
*
* @param root A root filepath
* @param relativePath The relative path to the file from
* the root
* @return the file contents
*/
def readFile(root: File, relativePath: String): List[String] =
Source
.fromFile(new File(root, relativePath))
.getLines
.toList
/**
* Recursively lists files in a given path, excluding the
* supplied paths.
*
* @param root A root filepath
* @param exclusions A list of paths to exclude from the listing
* @return the list of files contained in the root, minus the
* exclusions
*/
def listFilesWithExclusions(root: File, exclusions: List[String]): List[String] = {
val excl = for {
e <- exclusions
} yield (new File(root, e)).getCanonicalPath
FileUtils.listFiles(root, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE)
.asScala
.toList
.map(_.getCanonicalPath)
.filter(p => !excl.contains(p))
}
}
| bigdecisions/snowplow | 3-enrich/scala-hadoop-shred/src/test/scala/com.snowplowanalytics.snowplow.enrich.hadoop/jobs/JobSpecHelpers.scala | Scala | apache-2.0 | 6,906 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.javadsl.persistence.protobuf
import akka.actor.ExtendedActorSystem
import akka.protobuf.ByteString
import akka.serialization.BaseSerializer
import akka.serialization.Serialization
import akka.serialization.SerializationExtension
import akka.serialization.SerializerWithStringManifest
import com.lightbend.lagom.internal.persistence.cluster.ClusterDistribution.EnsureActive
import com.lightbend.lagom.javadsl.persistence.CommandEnvelope
import com.lightbend.lagom.javadsl.persistence.PersistentEntity
import com.lightbend.lagom.javadsl.persistence.PersistentEntity._
import com.lightbend.lagom.internal.javadsl.persistence.protobuf.msg.{ PersistenceMessages => pm }
/**
* Protobuf serializer of CommandEnvelope, and other PersistentEntity
* messages.
*/
private[lagom] class PersistenceMessageSerializer(val system: ExtendedActorSystem)
extends SerializerWithStringManifest with BaseSerializer {
@volatile
private var ser: Serialization = _
def serialization: Serialization = {
if (ser == null) ser = SerializationExtension(system)
ser
}
val CommandEnvelopeManifest = "A"
val InvalidCommandExceptionManifest = "B"
val UnhandledCommandExceptionManifest = "C"
val PersistExceptionManifest = "D"
val EnsureActiveManifest = "E"
private val emptyByteArray = Array.empty[Byte]
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] β AnyRef](
CommandEnvelopeManifest -> commandEnvelopeFromBinary,
InvalidCommandExceptionManifest -> invalidCommandExceptionFromBinary,
UnhandledCommandExceptionManifest -> unhandledCommandExceptionFromBinary,
PersistExceptionManifest -> persistExceptionFromBinary,
EnsureActiveManifest -> ensureActiveFromBinary
)
override def manifest(obj: AnyRef): String = obj match {
case _: CommandEnvelope β CommandEnvelopeManifest
case _: InvalidCommandException => InvalidCommandExceptionManifest
case _: UnhandledCommandException => UnhandledCommandExceptionManifest
case _: PersistException => PersistExceptionManifest
case _: EnsureActive => EnsureActiveManifest
case _ β
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
def toBinary(obj: AnyRef): Array[Byte] = obj match {
case m: CommandEnvelope β commandEnvelopeToProto(m).toByteArray
case InvalidCommandException(msg) => exceptionToProto(msg).toByteArray
case UnhandledCommandException(msg) => exceptionToProto(msg).toByteArray
case PersistException(msg) => exceptionToProto(msg).toByteArray
case ea: EnsureActive => ensureActiveToProto(ea).toByteArray
case _ β
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef =
fromBinaryMap.get(manifest) match {
case Some(f) β f(bytes)
case None β throw new IllegalArgumentException(
s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]"
)
}
private def commandEnvelopeToProto(commandEnvelope: CommandEnvelope): pm.CommandEnvelope = {
val payload = commandEnvelope.payload.asInstanceOf[AnyRef]
val msgSerializer = serialization.findSerializerFor(payload)
val builder = pm.CommandEnvelope.newBuilder()
.setEntityId(commandEnvelope.entityId)
.setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(payload)))
.setSerializerId(msgSerializer.identifier)
msgSerializer match {
case ser2: SerializerWithStringManifest β
val manifest = ser2.manifest(payload)
if (manifest != "")
builder.setMessageManifest(ByteString.copyFromUtf8(manifest))
case _ β
if (msgSerializer.includeManifest)
builder.setMessageManifest(ByteString.copyFromUtf8(payload.getClass.getName))
}
builder.build()
}
private def commandEnvelopeFromBinary(bytes: Array[Byte]): CommandEnvelope =
commandEnvelopeFromProto(pm.CommandEnvelope.parseFrom(bytes))
private def commandEnvelopeFromProto(commandEnvelope: pm.CommandEnvelope): CommandEnvelope = {
val manifest = if (commandEnvelope.hasMessageManifest) commandEnvelope.getMessageManifest.toStringUtf8 else ""
val payload = serialization.deserialize(
commandEnvelope.getEnclosedMessage.toByteArray,
commandEnvelope.getSerializerId,
manifest
).get
CommandEnvelope(commandEnvelope.getEntityId, payload)
}
private def ensureActiveToProto(ensureActive: EnsureActive): pm.EnsureActive = {
pm.EnsureActive.newBuilder().setEntityId(ensureActive.entityId).build()
}
private def ensureActiveFromBinary(bytes: Array[Byte]): EnsureActive = {
ensureActiveFromProto(pm.EnsureActive.parseFrom(bytes))
}
private def ensureActiveFromProto(ensureActive: pm.EnsureActive): EnsureActive = {
EnsureActive(ensureActive.getEntityId)
}
private def exceptionToProto(msg: String): pm.Exception = {
val builder = pm.Exception.newBuilder()
if (msg != null)
builder.setMessage(msg)
builder.build()
}
private def invalidCommandExceptionFromBinary(bytes: Array[Byte]): InvalidCommandException =
invalidCommandExceptionFromProto(pm.Exception.parseFrom(bytes))
private def invalidCommandExceptionFromProto(exc: pm.Exception): InvalidCommandException =
InvalidCommandException(if (exc.hasMessage) exc.getMessage else null)
private def unhandledCommandExceptionFromBinary(bytes: Array[Byte]): UnhandledCommandException =
unhandledCommandExceptionFromProto(pm.Exception.parseFrom(bytes))
private def unhandledCommandExceptionFromProto(exc: pm.Exception): UnhandledCommandException =
UnhandledCommandException(if (exc.hasMessage) exc.getMessage else null)
private def persistExceptionFromBinary(bytes: Array[Byte]): PersistException =
persistExceptionFromProto(pm.Exception.parseFrom(bytes))
private def persistExceptionFromProto(exc: pm.Exception): PersistException =
PersistException(if (exc.hasMessage) exc.getMessage else null)
}
| rstento/lagom | persistence/javadsl/src/main/scala/com/lightbend/lagom/internal/javadsl/persistence/protobuf/PersistenceMessageSerializer.scala | Scala | apache-2.0 | 6,304 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs
import java.io.File
import java.nio.charset.Charset
import java.nio.file.Path
import java.nio.file.Paths
import java.nio.file.{ Files => JFiles }
import java.util.concurrent.CountDownLatch
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import org.specs2.mock.Mockito
import org.specs2.mutable.After
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import play.api.ApplicationLoader.Context
import play.api._
import play.api.inject.DefaultApplicationLifecycle
import play.api.libs.Files._
import play.api.routing.Router
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._
class TemporaryFileCreatorSpec extends Specification with Mockito {
sequential
val utf8: Charset = Charset.forName("UTF8")
"DefaultTemporaryFileCreator" should {
abstract class WithScope extends Scope with After {
val parentDirectory: Path = {
val f = JFiles.createTempDirectory(null)
f.toFile.deleteOnExit()
f
}
override def after: Any = {
val files = parentDirectory.toFile.listFiles()
if (files != null) {
files.foreach(_.delete())
}
parentDirectory.toFile.delete()
}
}
"not have a race condition when creating temporary files" in {
// See issue https://github.com/playframework/playframework/issues/7700
// We were having problems by creating to many temporary folders and
// keeping track of them inside TemporaryFileCreator and between it and
// TemporaryFileReaper.
val threads = 25
val threadPool: ExecutorService = Executors.newFixedThreadPool(threads)
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
try {
val executionContext = ExecutionContext.fromExecutorService(threadPool)
// Use a latch to stall the threads until they are all ready to go, then
// release them all at once. This maximizes the chance of a race condition
// being visible.
val raceLatch = new CountDownLatch(threads)
val futureResults: Seq[Future[TemporaryFile]] = for (_ <- 0 until threads) yield {
Future {
raceLatch.countDown()
creator.create("foo", "bar")
}(executionContext)
}
val results: Seq[TemporaryFile] = {
import ExecutionContext.Implicits.global // implicit for Future.sequence
Await.result(Future.sequence(futureResults), 30.seconds)
}
val parentDir = results.head.path.getParent
// All temporary files should be created at the same directory
results.forall(_.path.getParent.equals(parentDir)) must beTrue
} finally {
threadPool.shutdown()
}
ok
}
"recreate directory if it is deleted" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val temporaryFile = creator.create("foo", "bar")
JFiles.delete(temporaryFile.toPath)
creator.create("foo", "baz")
lifecycle.stop()
success
}
"when copying file" in {
"copy when destination does not exists and replace disabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("copy.txt")
val destination = parentDirectory.resolve("does-not-exists.txt")
// Create a source file, but not the destination
writeFile(file, "file to be copied")
// do the copy
creator.create(file).copyTo(destination, replace = false)
// Both source and destination must exist
JFiles.exists(file) must beTrue
JFiles.exists(destination) must beTrue
// Both must have the same content
val sourceContent = new String(java.nio.file.Files.readAllBytes(file))
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo(sourceContent)
}
"copy when destination does not exists and replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("copy.txt")
val destination = parentDirectory.resolve("destination.txt")
// Create source file only
writeFile(file, "file to be copied")
creator.create(file).copyTo(destination, replace = true)
// Both source and destination must exist
JFiles.exists(file) must beTrue
JFiles.exists(destination) must beTrue
// Both must have the same content
val sourceContent = new String(java.nio.file.Files.readAllBytes(file))
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo(sourceContent)
}
"copy when destination exists and replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("copy.txt")
val destination = parentDirectory.resolve("destination.txt")
// Create both files
writeFile(file, "file to be copied")
writeFile(destination, "the destination file")
creator.create(file).copyTo(destination, replace = true)
// Both source and destination must exist
JFiles.exists(file) must beTrue
JFiles.exists(destination) must beTrue
// Both must have the same content
val sourceContent = new String(java.nio.file.Files.readAllBytes(file))
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo(sourceContent)
}
"do not copy when destination exists and replace disabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("do-not-replace.txt")
val destination = parentDirectory.resolve("already-exists.txt")
writeFile(file, "file that won't be replaced")
writeFile(destination, "already exists")
val to = creator.create(file).copyTo(destination, replace = false)
new String(java.nio.file.Files.readAllBytes(to)) must contain("already exists")
}
"delete source file has no impact on the destination file" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
writeFile(file, "file to be moved")
val destination = parentDirectory.resolve("destination.txt")
creator.create(file).copyTo(destination, replace = true)
// File was copied
JFiles.exists(file) must beTrue
JFiles.exists(destination) must beTrue
// When deleting the source file the destination will NOT be delete
// since they are NOT using the same inode.
JFiles.delete(file)
// Only source is gone
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
}
}
"when moving file" in {
"move when destination does not exists and replace disabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
val destination = parentDirectory.resolve("does-not-exists.txt")
// Create a source file, but not the destination
writeFile(file, "file to be moved")
// move the file
creator.create(file).moveTo(destination, replace = false)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo("file to be moved")
}
"move when destination does not exists and replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
val destination = parentDirectory.resolve("destination.txt")
// Create source file only
writeFile(file, "file to be moved")
creator.create(file).moveTo(destination, replace = true)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo("file to be moved")
}
"move when destination exists and replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
val destination = parentDirectory.resolve("destination.txt")
// Create both files
writeFile(file, "file to be moved")
writeFile(destination, "the destination file")
creator.create(file).moveTo(destination, replace = true)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo("file to be moved")
}
"do not move when destination exists and replace disabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("do-not-replace.txt")
val destination = parentDirectory.resolve("already-exists.txt")
writeFile(file, "file that won't be replaced")
writeFile(destination, "already exists")
val to = creator.create(file).moveTo(destination, replace = false)
new String(java.nio.file.Files.readAllBytes(to)) must contain("already exists")
}
"move a file atomically with replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
writeFile(file, "file to be moved")
val destination = parentDirectory.resolve("destination.txt")
creator.create(file).atomicMoveWithFallback(destination)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
}
}
"when moving file with the deprecated API" in {
"move when destination does not exists and replace disabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
val destination = parentDirectory.resolve("does-not-exists.txt")
// Create a source file, but not the destination
writeFile(file, "file to be moved")
// move the file
creator.create(file).moveTo(destination, replace = false)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo("file to be moved")
}
"move when destination does not exists and replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
val destination = parentDirectory.resolve("destination.txt")
// Create source file only
writeFile(file, "file to be moved")
creator.create(file).moveTo(destination, replace = true)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo("file to be moved")
}
"move when destination exists and replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
val destination = parentDirectory.resolve("destination.txt")
// Create both files
writeFile(file, "file to be moved")
writeFile(destination, "the destination file")
creator.create(file).moveTo(destination, replace = true)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
val destinationContent = new String(java.nio.file.Files.readAllBytes(destination))
destinationContent must beEqualTo("file to be moved")
}
"do not move when destination exists and replace disabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("do-not-replace.txt")
val destination = parentDirectory.resolve("already-exists.txt")
writeFile(file, "file that won't be replaced")
writeFile(destination, "already exists")
val to = creator.create(file).moveTo(destination, replace = false)
new String(java.nio.file.Files.readAllBytes(to)) must contain("already exists")
}
"move a file atomically with replace enabled" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, Configuration.reference)
val file = parentDirectory.resolve("move.txt")
writeFile(file, "file to be moved")
val destination = parentDirectory.resolve("destination.txt")
creator.create(file).atomicMoveWithFallback(destination)
JFiles.exists(file) must beFalse
JFiles.exists(destination) must beTrue
}
}
"works when using compile time dependency injection" in {
val context = ApplicationLoader.Context.create(
new Environment(new File("."), ApplicationLoader.getClass.getClassLoader, Mode.Test)
)
val appLoader = new ApplicationLoader {
def load(context: Context) = {
new BuiltInComponentsFromContext(context) with NoHttpFiltersComponents {
lazy val router = Router.empty
}.application
}
}
val app = appLoader.load(context)
Play.start(app)
val tempFile =
try {
val tempFileCreator = app.injector.instanceOf[TemporaryFileCreator]
val tempFile = tempFileCreator.create()
tempFile.exists must beTrue
tempFile
} finally {
Play.stop(app)
}
tempFile.exists must beFalse
}
"works when using custom temporary file directory" in new WithScope() {
val lifecycle = new DefaultApplicationLifecycle
val reaper = mock[TemporaryFileReaper]
val path = parentDirectory.toAbsolutePath().toString()
val customPath = s"$path/custom/"
val conf = Configuration.from(Map("play.temporaryFile.dir" -> customPath))
val creator = new DefaultTemporaryFileCreator(lifecycle, reaper, conf)
creator.create("foo", "bar")
JFiles.exists(Paths.get(s"$customPath/playtemp")) must beTrue
}
}
private def writeFile(file: Path, content: String) = {
if (JFiles.exists(file)) JFiles.delete(file)
JFiles.createDirectories(file.getParent)
java.nio.file.Files.write(file, content.getBytes(utf8))
}
}
| benmccann/playframework | core/play/src/test/scala/play/api/libs/TemporaryFileCreatorSpec.scala | Scala | apache-2.0 | 17,804 |
package com.cleawing.finagle.consul
import org.json4s.CustomSerializer
import org.json4s.JsonDSL._
import org.json4s._
import java.util.Base64
package object v1 {
case class NodeDescriptor
(
Node: String,
Address: String
)
case class MemberDescriptor
(
Name: String,
Addr: String,
Port: Int,
Tags: Map[String, String],
Status: Int,
ProtocolMin: Int,
ProtocolMax: Int,
ProtocolCur: Int,
DelegateMin: Int,
DelegateMax: Int,
DelegateCur: Int
)
case class SelfDescriptor
(
Config: Map[String, Any], // TODO. Wrap Config in more detailed case class
Member: MemberDescriptor
)
case class ServiceDescriptor
(
ID: String,
Service: String,
Tags: Option[Set[String]] = None,
Address: Option[String] = None,
Port: Option[Int] = None
)
type ServiceDescriptors = Map[String, ServiceDescriptor]
case class NodeServiceDescriptor
(
Node: String,
Address: String,
ServiceID: String,
ServiceName: String,
ServiceTags: Set[String],
ServiceAddress: String,
ServicePort: Int
)
case class NodeServiceDescriptors
(
Node: NodeDescriptor,
Services: Map[String, ServiceDescriptor]
)
case class CheckDescriptor
(
Node: String,
CheckID: String,
Name: String,
Status: CheckState.Value,
Notes: String,
Output: String,
ServiceID: String,
ServiceName: String
)
type CheckDescriptors = Map[String, CheckDescriptor]
object CheckState extends Enumeration {
val any, unknown, passing, warning, critical = Value
}
case class HealthDescriptor
(
Node: NodeDescriptor,
Service: ServiceDescriptor,
Checks: Seq[CheckDescriptor]
)
case class RegisterCheck
(
Name: String,
ID: Option[String] = None,
Notes: Option[String] = None,
Script: Option[String] = None,
HTTP: Option[String] = None,
Interval: Option[String] = None,
TTL: Option[String] = None,
ServiceId: Option[String] = None
) extends CheckValidation
case class RegisterService
(
Name: String,
ID: Option[String] = None,
Tags: Option[Seq[String]] = None,
Address: Option[String] = None,
Port: Option[Int] = None,
Check: Option[ServiceCheck] = None
)
case class ServiceCheck
(
Script: Option[String] = None,
HTTP: Option[String] = None,
Interval: Option[String] = None,
TTL: Option[String] = None
) extends CheckValidation
case class RegisterNode
(
Node: String,
Address: String,
Datacenter: Option[String] = None,
Service: Option[ServiceDescriptor] = None,
Check: Option[CheckUpdateDescriptor] = None
) {
if (Service.isDefined && Check.isDefined)
throw new IllegalArgumentException("Only Service or Check can be provided at the same time")
}
case class CheckUpdateDescriptor
(
Node: String,
CheckID: String,
Name: String,
Notes: String,
Status: String,
ServiceID: String
)
case class DeregisterNode
(
Node: String,
Datacenter: Option[String] = None,
ServiceID: Option[String] = None,
CheckID: Option[String] = None
) {
if (ServiceID.isDefined && CheckID.isDefined)
throw new IllegalArgumentException("Only ServiceID or CheckID can be provided at the same time")
}
case class SessionDescriptor
(
LockDelay: Option[String] = None,
Name: Option[String] = None,
Node: Option[String] = None,
Checks: Option[Seq[String]] = None,
Behavior: Option[SessionBehavior.Value] = None,
TTL: Option[String] = None
)
case class SessionInfo
(
CreateIndex: Long,
ID: String,
Name: String,
Node: String,
Checks: Seq[String],
LockDelay: Long,
Behavior: SessionBehavior.Value,
TTL: String
)
object SessionBehavior extends Enumeration {
val release, delete = Value
}
case class KvValue
(
CreateIndex: Int,
ModifyIndex: Int,
LockIndex: Int,
Key: String,
Flags: Int,
Value: Option[String],
Session: Option[String]
)
object KvValueSerializer extends CustomSerializer[KvValue](formats => (
{
case JObject
(
JField("CreateIndex", JInt(createIndex)) ::
JField("ModifyIndex", JInt(modifyIndex)) ::
JField("LockIndex", JInt(lockIndex)) ::
JField("Key", JString(key)) ::
JField("Flags", JInt(flags)) ::
JField("Value", JString(encodedValue)) ::
JField("Session", JString(session)) :: Nil
) => KvValue(createIndex.toInt, modifyIndex.toInt, lockIndex.toInt, key, flags.toInt, Some(new String(Base64.getDecoder.decode(encodedValue))), Some(session))
case JObject
(
JField("CreateIndex", JInt(createIndex)) ::
JField("ModifyIndex", JInt(modifyIndex)) ::
JField("LockIndex", JInt(lockIndex)) ::
JField("Key", JString(key)) ::
JField("Flags", JInt(flags)) ::
JField("Value", JString(encodedValue)) :: Nil
) => KvValue(createIndex.toInt, modifyIndex.toInt, lockIndex.toInt, key, flags.toInt, Some(new String(Base64.getDecoder.decode(encodedValue))), None)
case JObject
(
JField("CreateIndex", JInt(createIndex)) ::
JField("ModifyIndex", JInt(modifyIndex)) ::
JField("LockIndex", JInt(lockIndex)) ::
JField("Key", JString(key)) ::
JField("Flags", JInt(flags)) ::
JField("Value", JNull) ::
JField("Session", JString(session)) :: Nil
) => KvValue(createIndex.toInt, modifyIndex.toInt, lockIndex.toInt, key, flags.toInt, None, Some(session))
case JObject
(
JField("CreateIndex", JInt(createIndex)) ::
JField("ModifyIndex", JInt(modifyIndex)) ::
JField("LockIndex", JInt(lockIndex)) ::
JField("Key", JString(key)) ::
JField("Flags", JInt(flags)) ::
JField("Value", JNull) :: Nil
) => KvValue(createIndex.toInt, modifyIndex.toInt, lockIndex.toInt, key, flags.toInt, None, None)
},
PartialFunction.empty)
)
case class Event
(
ID: String,
Name: String,
Payload: Option[String],
NodeFilter: String,
ServiceFilter: String,
TagFilter: String,
Version: Int,
LTime: Int
)
object EventSerializer extends CustomSerializer[Event](formats => (
{
case JObject
(
JField("ID", JString(id)) ::
JField("Name", JString(name)) ::
JField("Payload", JString(payload)) ::
JField("NodeFilter", JString(nodeFilter)) ::
JField("ServiceFilter", JString(serviceFilter)) ::
JField("TagFilter", JString(tagFilter)) ::
JField("Version", JInt(version)) ::
JField("LTime", JInt(lTime)) ::
Nil
) => Event(id, name, Some(new String(Base64.getDecoder.decode(payload))), nodeFilter, serviceFilter, tagFilter, version.toInt, lTime.toInt)
case JObject
(
JField("ID", JString(id)) ::
JField("Name", JString(name)) ::
JField("Payload", JNull) ::
JField("NodeFilter", JString(nodeFilter)) ::
JField("ServiceFilter", JString(serviceFilter)) ::
JField("TagFilter", JString(tagFilter)) ::
JField("Version", JInt(version)) ::
JField("LTime", JInt(lTime)) ::
Nil
) => Event(id, name, None, nodeFilter, serviceFilter, tagFilter, version.toInt, lTime.toInt)
},
PartialFunction.empty)
)
private[v1] trait CheckValidation {
def Script: Option[String]
def HTTP: Option[String]
def Interval: Option[String]
def TTL: Option[String]
Seq(Script, HTTP, TTL).count(_.isDefined) match {
case 0 => throw new IllegalArgumentException("One of Script, HTTP or TTL field should be set")
case c if c > 1 => throw new IllegalArgumentException("Only one of Script, HTTP or TTL field should be set")
case _ => // OK
}
(Script, Interval) match {
case (Some(_), None) => throw new IllegalArgumentException("Interval required for Script check")
case _ => // OK
}
(HTTP, Interval) match {
case (Some(_), None) => throw new IllegalArgumentException("Interval required for HTTP check")
case _ => // OK
}
}
}
| Cleawing/united | finagle-services/src/main/scala/com/cleawing/finagle/consul/v1/package.scala | Scala | apache-2.0 | 8,281 |
package scorex.consensus.nxt.api.http
import javax.ws.rs.Path
import akka.actor.ActorRefFactory
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import io.swagger.annotations._
import play.api.libs.json.Json
import scorex.account.Account
import scorex.api.http.{ApiRoute, CommonApiFunctions, InvalidAddress, JsonResponse}
import scorex.app.RunnableApplication
import scorex.consensus.nxt.NxtLikeConsensusModule
import scorex.crypto.encode.Base58
@Path("/consensus")
@Api(value = "/consensus", description = "Consensus-related calls")
class NxtConsensusApiRoute(override val application: RunnableApplication)(implicit val context: ActorRefFactory)
extends ApiRoute with CommonApiFunctions {
private val consensusModule = application.consensusModule.asInstanceOf[NxtLikeConsensusModule]
private val blockStorage = application.blockStorage
override val route: Route =
pathPrefix("consensus") {
algo ~ basetarget ~ baseTargetId ~ generationSignature ~ generationSignatureId ~ generatingBalance
}
@Path("/generatingbalance/{address}")
@ApiOperation(value = "Generating balance", notes = "Account's generating balance(the same as balance atm)", httpMethod = "GET")
@ApiImplicitParams(Array(
new ApiImplicitParam(name = "address", value = "Address", required = true, dataType = "String", paramType = "path")
))
def generatingBalance: Route = {
path("generatingbalance" / Segment) { case address =>
getJsonRoute {
val account = new Account(address)
if (!Account.isValid(account)) {
InvalidAddress.response
} else {
val json = Json.obj(
"address" -> account.address,
"balance" -> consensusModule.generatingBalance(account)(application.transactionModule))
JsonResponse(json, StatusCodes.OK)
}
}
}
}
@Path("/generationsignature/{blockId}")
@ApiOperation(value = "Generation signature", notes = "Generation signature of a block with specified id", httpMethod = "GET")
@ApiImplicitParams(Array(
new ApiImplicitParam(name = "blockId", value = "Block id ", required = true, dataType = "String", paramType = "path")
))
def generationSignatureId: Route = {
path("generationsignature" / Segment) { case encodedSignature =>
getJsonRoute {
withBlock(blockStorage.history, encodedSignature) { block =>
val gs = consensusModule.consensusBlockData(block).generationSignature
Json.obj(
"generationSignature" -> Base58.encode(gs)
)
}
}
}
}
@Path("/generationsignature")
@ApiOperation(value = "Generation signature last", notes = "Generation signature of a last block", httpMethod = "GET")
def generationSignature: Route = {
path("generationsignature") {
getJsonRoute {
val lastBlock = blockStorage.history.lastBlock
val gs = consensusModule.consensusBlockData(lastBlock).generationSignature
JsonResponse(Json.obj("generationSignature" -> Base58.encode(gs)), StatusCodes.OK)
}
}
}
@Path("/basetarget/{blockId}")
@ApiOperation(value = "Base target", notes = "base target of a block with specified id", httpMethod = "GET")
@ApiImplicitParams(Array(
new ApiImplicitParam(name = "blockId", value = "Block id ", required = true, dataType = "String", paramType = "path")
))
def baseTargetId: Route = {
path("basetarget" / Segment) { case encodedSignature =>
getJsonRoute {
withBlock(blockStorage.history, encodedSignature) { block =>
Json.obj(
"baseTarget" -> consensusModule.consensusBlockData(block).baseTarget
)
}
}
}
}
@Path("/basetarget")
@ApiOperation(value = "Base target last", notes = "Base target of a last block", httpMethod = "GET")
def basetarget: Route = {
path("basetarget") {
getJsonRoute {
val lastBlock = blockStorage.history.lastBlock
val bt = consensusModule.consensusBlockData(lastBlock).baseTarget
JsonResponse(Json.obj("baseTarget" -> bt), StatusCodes.OK)
}
}
}
@Path("/algo")
@ApiOperation(value = "Consensus algo", notes = "Shows which consensus algo being using", httpMethod = "GET")
def algo: Route = {
path("algo") {
getJsonRoute {
JsonResponse(Json.obj("consensusAlgo" -> "nxt"), StatusCodes.OK)
}
}
}
}
| alexeykiselev/WavesScorex | scorex-consensus/src/main/scala/scorex/consensus/nxt/api/http/NxtConsensusApiRoute.scala | Scala | cc0-1.0 | 4,409 |
package dundertext.data
final case class DisplayedText (
text: Text,
in: Time,
length: Length
) {
val out: Time = Time(in.millis + length.millis)
def conatins(t: Time): Boolean = {
t.millis >= in.millis && t.millis < out.millis
}
}
object DisplayedText {
final val Separation = Length(160)
}
| dundertext/dundertext | data/src/main/scala/dundertext/data/DisplayedText.scala | Scala | gpl-3.0 | 312 |
package com.vivint.ceph.lib
import java.io.{ ByteArrayInputStream, ByteArrayOutputStream }
import java.util.zip.GZIPInputStream
import org.apache.commons.io.IOUtils
import org.kamranzafar.jtar.{ TarEntry, TarHeader, TarInputStream, TarOutputStream }
import java.util.zip.GZIPOutputStream
import scala.collection.{Iterator,breakOut}
import java.nio.charset.StandardCharsets.UTF_8
object TgzHelper {
def octal(digits: String): Int =
BigInt(digits, 8).toInt
case class FileEntry(mode: Int, data: Array[Byte])
object FileEntry extends ((Int, Array[Byte]) => FileEntry) {
import scala.language.implicitConversions
implicit def apply(contents: String): FileEntry =
FileEntry(octal("644"), contents.getBytes(UTF_8))
}
def makeTgz(files: Map[String, String]): Array[Byte] = {
makeTgz(files.toSeq.map { case (k, v) =>
k -> FileEntry(v)
} : _*)
}
def makeTgz(files: (String, FileEntry)*): Array[Byte] = {
val dest = new ByteArrayOutputStream
val tgz = new TarOutputStream(new GZIPOutputStream(dest))
val now = System.currentTimeMillis / 1000
files.foreach { case (file, entry) =>
tgz.putNextEntry(new TarEntry(
TarHeader.createHeader(
file,
entry.data.length.toLong,
now, false, entry.mode)))
tgz.write(entry.data, 0, entry.data.length)
}
tgz.close()
dest.toByteArray()
}
class TarIterator(s: TarInputStream) extends Iterator[(TarEntry, Array[Byte])] {
var _nextEntry: (TarEntry, Array[Byte]) = null
private def loadNext(): Unit =
_nextEntry = s.getNextEntry match {
case null => null
case entry => (entry, IOUtils.toByteArray(s))
}
def hasNext = _nextEntry != null
def next() = {
val nextResult = _nextEntry
loadNext()
nextResult
}
loadNext()
}
def readTgz(tgz: Array[Byte]): Iterator[(String, Array[Byte])] = {
val input = new ByteArrayInputStream(tgz)
val stream = new TarInputStream(new GZIPInputStream(input))
new TarIterator(stream).map {
case (entry, data) => entry.getName -> data
}
}
}
| vivint-smarthome/ceph-on-mesos | src/main/scala/com/vivint/ceph/lib/TgzHelper.scala | Scala | apache-2.0 | 2,122 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers.buffers
import monix.execution.Ack
import monix.execution.Ack.{Continue, Stop}
import monix.execution.internal.collection.{JSArrayQueue, _}
import scala.util.control.NonFatal
import monix.execution.exceptions.BufferOverflowException
import monix.reactive.observers.{BufferedSubscriber, Subscriber}
import scala.concurrent.Future
import scala.util.{Failure, Success}
/** A [[BufferedSubscriber]] implementation for the
* [[monix.reactive.OverflowStrategy.DropNew DropNew]] overflow strategy.
*/
private[observers] final class SyncBufferedSubscriber[-A] private
(out: Subscriber[A], queue: EvictingQueue[A], onOverflow: Long => Option[A] = null)
extends BufferedSubscriber[A] with Subscriber.Sync[A] {
implicit val scheduler = out.scheduler
// to be modified only in onError, before upstreamIsComplete
private[this] var errorThrown: Throwable = _
// to be modified only in onError / onComplete
private[this] var upstreamIsComplete = false
// to be modified only by consumer
private[this] var downstreamIsComplete = false
// represents an indicator that there's a loop in progress
private[this] var isLoopActive = false
// events being dropped
private[this] var droppedCount = 0L
// last acknowledgement received by consumer loop
private[this] var lastIterationAck: Future[Ack] = Continue
// Used on the consumer side to split big synchronous workloads in batches
private[this] val em = scheduler.executionModel
def onNext(elem: A): Ack = {
if (!upstreamIsComplete && !downstreamIsComplete) {
if (elem == null) {
onError(new NullPointerException("Null not supported in onNext"))
Stop
}
else try {
droppedCount += queue.offer(elem)
consume()
Continue
}
catch {
case ex if NonFatal(ex) =>
onError(ex)
Stop
}
}
else
Stop
}
def onError(ex: Throwable): Unit = {
if (!upstreamIsComplete && !downstreamIsComplete) {
errorThrown = ex
upstreamIsComplete = true
consume()
}
}
def onComplete(): Unit = {
if (!upstreamIsComplete && !downstreamIsComplete) {
upstreamIsComplete = true
consume()
}
}
private def consume(): Unit =
if (!isLoopActive) {
isLoopActive = true
scheduler.execute(consumerRunLoop)
}
private[this] val consumerRunLoop = new Runnable {
def run(): Unit = {
fastLoop(lastIterationAck, 0)
}
private final def signalNext(next: A): Future[Ack] =
try {
val ack = out.onNext(next)
// Tries flattening the Future[Ack] to a
// synchronous value
if (ack == Continue || ack == Stop)
ack
else ack.value match {
case Some(Success(success)) =>
success
case Some(Failure(ex)) =>
downstreamSignalComplete(ex)
Stop
case None =>
ack
}
} catch {
case ex if NonFatal(ex) =>
downstreamSignalComplete(ex)
Stop
}
private def downstreamSignalComplete(ex: Throwable = null): Unit = {
downstreamIsComplete = true
try {
if (ex != null) out.onError(ex)
else out.onComplete()
} catch {
case err if NonFatal(err) =>
scheduler.reportFailure(err)
}
}
private def goAsync(next: A, ack: Future[Ack]): Unit =
ack.onComplete {
case Success(Continue) =>
val nextAck = signalNext(next)
val isSync = ack == Continue || ack == Stop
val nextFrame = if (isSync) em.nextFrameIndex(0) else 0
fastLoop(nextAck, nextFrame)
case Success(Stop) =>
// ending loop
downstreamIsComplete = true
isLoopActive = false
case Failure(ex) =>
// ending loop
isLoopActive = false
downstreamSignalComplete(ex)
}
private def fastLoop(prevAck: Future[Ack], startIndex: Int): Unit = {
var ack = if (prevAck == null) Continue else prevAck
var isFirstIteration = ack == Continue
var nextIndex = startIndex
while (isLoopActive && !downstreamIsComplete) {
var streamErrors = true
try {
val next = {
// Do we have an overflow message to send?
val overflowMessage =
if (onOverflow == null || droppedCount == 0)
null.asInstanceOf[A]
else {
val msg = onOverflow(droppedCount) match {
case Some(value) => value
case None => null.asInstanceOf[A]
}
droppedCount = 0
msg
}
if (overflowMessage != null) overflowMessage else
queue.poll()
}
// Threshold after which we are no longer allowed to
// stream errors downstream if they happen
streamErrors = false
if (next != null) {
if (nextIndex > 0 || isFirstIteration) {
isFirstIteration = false
ack match {
case Continue =>
ack = signalNext(next)
if (ack == Stop) {
// ending loop
downstreamIsComplete = true
isLoopActive = false
return
} else {
val isSync = ack == Continue
nextIndex = if (isSync) em.nextFrameIndex(nextIndex) else 0
}
case Stop =>
// ending loop
downstreamIsComplete = true
isLoopActive = false
return
case _ =>
goAsync(next, ack)
return
}
}
else {
goAsync(next, ack)
return
}
}
else {
if (upstreamIsComplete) downstreamSignalComplete(errorThrown)
// ending loop
lastIterationAck = ack
isLoopActive = false
return
}
} catch {
case ex if NonFatal(ex) =>
if (streamErrors) {
// ending loop
downstreamSignalComplete(ex)
isLoopActive = false
return
} else {
scheduler.reportFailure(ex)
return
}
}
}
}
}
}
private[monix] object SyncBufferedSubscriber {
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def unbounded[A](underlying: Subscriber[A]): Subscriber.Sync[A] = {
val buffer = JSArrayQueue.unbounded[A]
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def bounded[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = JSArrayQueue.bounded[A](bufferSize, _ => {
BufferOverflowException(
s"Downstream observer is too slow, buffer over capacity with a " +
s"specified buffer size of $bufferSize")
})
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def dropNew[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = JSArrayQueue.bounded[A](bufferSize)
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def dropNewAndSignal[A](underlying: Subscriber[A], bufferSize: Int, onOverflow: Long => Option[A]): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = JSArrayQueue.bounded[A](bufferSize)
new SyncBufferedSubscriber[A](underlying, buffer, onOverflow)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropOld DropOld]]
* overflow strategy.
*/
def dropOld[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropHeadOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropOld DropOld]]
* overflow strategy, with signaling of the number of events that
* were dropped.
*/
def dropOldAndSignal[A](underlying: Subscriber[A], bufferSize: Int, onOverflow: Long => Option[A]): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropHeadOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, onOverflow)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]] for the
* [[monix.reactive.OverflowStrategy.ClearBuffer ClearBuffer]]
* overflow strategy.
*/
def clearBuffer[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropAllOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.ClearBuffer ClearBuffer]]
* overflow strategy, with signaling of the number of events that
* were dropped.
*/
def clearBufferAndSignal[A](underlying: Subscriber[A], bufferSize: Int, onOverflow: Long => Option[A]): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropAllOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, onOverflow)
}
}
| Wogan/monix | monix-reactive/js/src/main/scala/monix/reactive/observers/buffers/SyncBufferedSubscriber.scala | Scala | apache-2.0 | 11,268 |
// #Sireum
/*
Copyright (c) 2018, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.message
import org.sireum._
object Reporter {
@pure def create: Reporter = {
return Reporter(ISZ())
}
@pure def combine(r1: Reporter, r2: Reporter): Reporter = {
return Reporter(r1.messages ++ r2.messages)
}
}
@record class Reporter(var messages: ISZ[Message]) {
var ignore: B = F
def hasInternalError: B = {
for (m <- messages) {
m.level match {
case Level.InternalError => return T
case _ =>
}
}
return F
}
def hasError: B = {
for (m <- messages if m.isError || m.isInternalError) {
return T
}
return F
}
def hasWarning: B = {
for (m <- messages if m.isWarning) {
return T
}
return F
}
def hasIssue: B = {
for (m <- messages if m.isError || m.isWarning || m.isInternalError) {
return T
}
return F
}
def hasInfo: B = {
for (m <- messages if m.isInfo) {
return T
}
return F
}
def hasMessage: B = {
return messages.nonEmpty
}
def internalErrors: ISZ[Message] = {
return for (m <- messages if m.isInternalError) yield m
}
def errors: ISZ[Message] = {
return for (m <- messages if m.isError) yield m
}
def warnings: ISZ[Message] = {
return for (m <- messages if m.isWarning) yield m
}
def issues: ISZ[Message] = {
return for (m <- messages if m.isError || m.isWarning || m.isInternalError) yield m
}
def infos: ISZ[Message] = {
return for (m <- messages if m.isInfo) yield m
}
def report(m: Message): Unit = {
//assert(m.fileUriOpt.isEmpty || !ops.ISZOps(messages).contains(m))
if (!ignore) {
messages = messages :+ m
}
}
def messagesByFileUri: HashSMap[Option[String], ISZ[Message]] = {
var r = HashSMap.empty[Option[String], ISZ[Message]]
for (m <- messages) {
val key: Option[String] = m.fileUriOpt
r.get(key) match {
case Some(ms) => r = r + key ~> (ms :+ m)
case _ => r = r + key ~> ISZ(m)
}
}
return r
}
def printMessages(): Unit = {
@pure def sortMessages(ms: ISZ[Message]): ISZ[Message] = {
return ops
.ISZOps(ms)
.sortWith((m1, m2) => {
(m1.posOpt, m2.posOpt) match {
case (Some(m1pos), Some(m2pos)) =>
if (m1pos.beginLine < m2pos.beginLine) T
else if (m1pos.beginLine > m2pos.beginLine) F
else if (m1pos.beginColumn < m2pos.beginColumn) T
else if (m1pos.beginColumn > m2pos.beginColumn) F
else m1.text.size < m2.text.size
case _ => m1.text.size < m2.text.size
}
})
}
val map = messagesByFileUri
val err = hasError
var first = T
for (kv <- map.entries) {
if (!first) {
cprintln(err, "")
}
first = F
val fileUriOpt = kv._1
val ms = kv._2
fileUriOpt match {
case Some(fileUri) =>
cprintln(err, s"* $fileUri")
for (m <- sortMessages(ms)) {
cprint(err, " ")
val int: String = if (m.level == Level.InternalError) "INTERNAL ERROR -- " else ""
val mText: String = m.posOpt match {
case Some(pos) => s"- [${pos.beginLine}, ${pos.beginColumn}] $int${m.text}"
case _ => s"- ${m.text}"
}
cprintln(err, mText)
}
case _ =>
for (m <- sortMessages(ms)) {
val int: String = if (m.level == Level.InternalError) "INTERNAL ERROR -- " else ""
val mText: String = m.posOpt match {
case Some(pos) => s"- [${pos.beginLine}, ${pos.beginColumn}] $int${m.text}"
case _ => s"- ${m.text}"
}
cprintln(err, mText)
}
}
}
}
def internalError(posOpt: Option[Position], kind: String, message: String): Unit = {
if (!ignore) {
report(Message(Level.InternalError, posOpt, kind, message))
}
}
def error(posOpt: Option[Position], kind: String, message: String): Unit = {
if (!ignore) {
report(Message(Level.Error, posOpt, kind, message))
}
}
def warn(posOpt: Option[Position], kind: String, message: String): Unit = {
if (!ignore) {
report(Message(Level.Warning, posOpt, kind, message))
}
}
def info(posOpt: Option[Position], kind: String, message: String): Unit = {
if (!ignore) {
report(Message(Level.Info, posOpt, kind, message))
}
}
def reports(ms: ISZ[Message]): Unit = {
for (m <- ms) {
report(m)
}
}
}
| sireum/v3-runtime | library/shared/src/main/scala/org/sireum/message/Reporter.scala | Scala | bsd-2-clause | 5,896 |
package net.mkowalski.sparkfim.util
import net.mkowalski.sparkfim.model.FimDataTypes
object VerticalDbUtil extends FimDataTypes with Serializable {
val invalidItemIdWithEmptyTidList: ItemIdWithTidList = (-1, Array.empty[Int])
private val itemSeparator = '\t'
private val tidSeparator = ' '
def lineToItemsWithTidList(line: String): ItemIdWithTidList = {
val split = line.split(itemSeparator)
split match {
case Array(itemId, tidsText) =>
val tids = tidsText.split(tidSeparator).map(_.toInt)
(itemId.toInt, tids)
case Array(itemId) if StringUtil.probablyDigit(itemId) =>
(itemId.toInt, Array.empty[Int])
case _ => // invalid line
// no Option type for performance reasons
invalidItemIdWithEmptyTidList
}
}
}
| mjkowalski/spark-fim | src/main/scala/net/mkowalski/sparkfim/util/VerticalDbUtil.scala | Scala | mit | 793 |
package com.joypeg.scamandrill.models
/**
* The mail to be blacklisted
* @param key - a valid API key
* @param email - the email address to add to the blacklist
* @param comment - an optional comment describing the rejection
* @param subaccount an optional unique identifier for the subaccount to limit the blacklist entry
*/
case class MRejectAdd(key: String = DefaultConfig.defaultKeyFromConfig,
email: String,
comment: Option[String] = None,
subaccount: Option[String] = None) extends MandrillRequest
/**
* Information about the list of mail that are blacklisted to be retrieved
* @param key - a valid API key
* @param email - the email that is blacklisted
* @param include_expired - whether to include rejections that have already expired.
* @param subaccount an optional unique identifier for the subaccount to limit the blacklist entry
*/
case class MRejectList(key: String = DefaultConfig.defaultKeyFromConfig,
email: String,
include_expired: Boolean = false,
subaccount: Option[String] = None) extends MandrillRequest
/**
* The mail to be removed from the blacklist
* @param key - a valid API key
* @param email - the email address to remove from the blacklist
* @param subaccount an optional unique identifier for the subaccount to limit the blacklist entry
*/
case class MRejectDelete(key: String = DefaultConfig.defaultKeyFromConfig,
email: String,
subaccount: Option[String] = None) extends MandrillRequest
| AdAgility/scamandrill | src/main/scala/com/joypeg/scamandrill/models/MandrillRejectRequests.scala | Scala | apache-2.0 | 1,618 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.instruments.forward
/**
* @author Paul Bernard
*/
class FixedRateBondForward extends Forward {
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/instruments/forward/FixedRateBondForward.scala | Scala | apache-2.0 | 813 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util
import org.apache.spark.internal.Logging
import org.apache.spark.util.{Clock, SystemClock}
private[streaming]
class RecurringTimer(clock: Clock, period: Long, callback: (Long) => Unit, name: String)
extends Logging {
private val thread = new Thread("RecurringTimer - " + name) {
setDaemon(true)
override def run(): Unit = { loop }
}
@volatile private var prevTime = -1L
@volatile private var nextTime = -1L
@volatile private var stopped = false
/**
* Get the time when this timer will fire if it is started right now.
* The time will be a multiple of this timer's period and more than
* current system time.
*/
def getStartTime(): Long = {
(math.floor(clock.getTimeMillis().toDouble / period) + 1).toLong * period
}
/**
* Get the time when the timer will fire if it is restarted right now.
* This time depends on when the timer was started the first time, and was stopped
* for whatever reason. The time must be a multiple of this timer's period and
* more than current time.
*/
def getRestartTime(originalStartTime: Long): Long = {
val gap = clock.getTimeMillis() - originalStartTime
(math.floor(gap.toDouble / period).toLong + 1) * period + originalStartTime
}
/**
* Start at the given start time.
*/
def start(startTime: Long): Long = synchronized {
nextTime = startTime
thread.start()
logInfo("Started timer for " + name + " at time " + nextTime)
nextTime
}
/**
* Start at the earliest time it can start based on the period.
*/
def start(): Long = {
start(getStartTime())
}
/**
* Stop the timer, and return the last time the callback was made.
*
* @param interruptTimer True will interrupt the callback if it is in progress (not guaranteed to
* give correct time in this case). False guarantees that there will be at
* least one callback after `stop` has been called.
*/
def stop(interruptTimer: Boolean): Long = synchronized {
if (!stopped) {
stopped = true
if (interruptTimer) {
thread.interrupt()
}
thread.join()
logInfo("Stopped timer for " + name + " after time " + prevTime)
}
prevTime
}
private def triggerActionForNextInterval(): Unit = {
clock.waitTillTime(nextTime)
callback(nextTime)
prevTime = nextTime
nextTime += period
logDebug("Callback for " + name + " called at time " + prevTime)
}
/**
* Repeatedly call the callback every interval.
*/
private def loop(): Unit = {
try {
while (!stopped) {
triggerActionForNextInterval()
}
triggerActionForNextInterval()
} catch {
case e: InterruptedException =>
}
}
}
private[streaming]
object RecurringTimer extends Logging {
def main(args: Array[String]): Unit = {
var lastRecurTime = 0L
val period = 1000
def onRecur(time: Long): Unit = {
val currentTime = System.currentTimeMillis()
logInfo("" + currentTime + ": " + (currentTime - lastRecurTime))
lastRecurTime = currentTime
}
val timer = new RecurringTimer(new SystemClock(), period, onRecur, "Test")
timer.start()
Thread.sleep(30 * 1000)
timer.stop(true)
}
}
| maropu/spark | streaming/src/main/scala/org/apache/spark/streaming/util/RecurringTimer.scala | Scala | apache-2.0 | 4,098 |
package pl.newicom.dddd.office
import akka.actor.ActorRef
import pl.newicom.dddd.actor.BusinessEntityActorFactory
import pl.newicom.dddd.aggregate.BusinessEntity
import pl.newicom.dddd.messaging.correlation.EntityIdResolution
import scala.reflect.ClassTag
abstract class OfficeFactory[A <: BusinessEntity : BusinessEntityActorFactory : EntityIdResolution : ClassTag] {
def getOrCreate: ActorRef
def officeName = implicitly[ClassTag[A]].runtimeClass.getSimpleName
} | ahjohannessen/akka-ddd | akka-ddd-core/src/main/scala/pl/newicom/dddd/office/OfficeFactory.scala | Scala | mit | 473 |
package skinny.engine.context
import javax.servlet.ServletContext
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import skinny.engine.UnstableAccessValidation
import skinny.engine.implicits.{ CookiesImplicits, ServletApiImplicits, SessionImplicits }
import skinny.engine.request.StableHttpServletRequest
/**
* SkinnyEngine's context for each request.
*/
trait SkinnyEngineContext
extends ServletApiImplicits
with SessionImplicits
with CookiesImplicits {
val request: HttpServletRequest
val response: HttpServletResponse
val servletContext: ServletContext
val unstableAccessValidation: UnstableAccessValidation
def surelyStable(validation: UnstableAccessValidation): SkinnyEngineContext = {
SkinnyEngineContext.surelyStable(this, validation)
}
}
object SkinnyEngineContext {
private class StableSkinnyEngineContext(
implicit val request: HttpServletRequest,
val response: HttpServletResponse,
val servletContext: ServletContext,
val unstableAccessValidation: UnstableAccessValidation) extends SkinnyEngineContext {
}
def surelyStable(ctx: SkinnyEngineContext, validation: UnstableAccessValidation): SkinnyEngineContext = {
new StableSkinnyEngineContext()(StableHttpServletRequest(ctx.request, validation), ctx.response, ctx.servletContext, validation)
}
def build(ctx: ServletContext, req: HttpServletRequest, resp: HttpServletResponse, validation: UnstableAccessValidation): SkinnyEngineContext = {
new StableSkinnyEngineContext()(StableHttpServletRequest(req, validation), resp, ctx, validation)
}
def buildWithRequest(req: HttpServletRequest, validation: UnstableAccessValidation): SkinnyEngineContext = {
new StableSkinnyEngineContext()(StableHttpServletRequest(req, validation), null, null, validation)
}
def buildWithoutResponse(req: HttpServletRequest, ctx: ServletContext, validation: UnstableAccessValidation): SkinnyEngineContext = {
new StableSkinnyEngineContext()(StableHttpServletRequest(req, validation), null, ctx, validation)
}
}
| holycattle/skinny-framework | engine/src/main/scala/skinny/engine/context/SkinnyEngineContext.scala | Scala | mit | 2,074 |
package models.base
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import cache._
import db._
import io.megam.auth.funnel.FunnelErrors._
import controllers.Constants._
import io.megam.auth.stack.AccountResult
import io.megam.auth.stack.{ Name, Phone, Password, States, Approval, Dates, Suspend }
import io.megam.auth.stack.SecurePasswordHashing
import io.megam.auth.stack.SecurityActions
import io.megam.common.uid.UID
import io.megam.util.Time
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
import java.util.UUID
import com.datastax.driver.core.{ ResultSet, Row }
import com.websudos.phantom.dsl._
import scala.concurrent.{ Future => ScalaFuture }
import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef }
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.annotation.tailrec
import models.team._
import models.base.Events._
import models.billing._
import controllers.stack.ImplicitJsonFormats
/**
* @author rajthilak
*
*/
case class AccountInput(name: Name, phone: Phone, email: String, api_key: String, password: Password, states: States, approval: Approval, suspend: Suspend, registration_ip_address: String, dates: Dates) {
val json = "{\\"name\\":" +name.json+",\\"phone\\":" + phone.json + ",\\"email\\":\\"" + email + "\\",\\"api_key\\":\\"" + api_key + "\\",\\"password\\":" + password.json + ",\\"states\\":" + states.json + ",\\"approval\\":" + approval.json + ",\\"suspend\\":" + suspend.json + ",\\"registration_ip_address\\":\\"" + registration_ip_address + "\\",\\"dates\\":" + dates.json + "}"
}
case class AccountReset(password_reset_key: String, password_reset_sent_at: String) {
val password = Password(password_reset_key, password_reset_sent_at)
val json = "{\\"id\\":\\""+ "\\",\\"name\\":" + Name.empty.json+",\\"phone\\":" + Phone.empty.json + ",\\"email\\":\\"" + "\\",\\"api_key\\":\\"" + "\\",\\"password\\":" + password.json + ",\\"states\\":" + States.empty.json + ",\\"approval\\":" + Approval.empty.json + ",\\"suspend\\":" + Suspend.empty.json + ",\\"registration_ip_address\\":\\"" + "\\",\\"dates\\":" + Dates.empty.json + "}"
}
sealed class AccountSacks extends CassandraTable[AccountSacks, AccountResult] with ImplicitJsonFormats {
object id extends StringColumn(this)
object name extends JsonColumn[AccountSacks, AccountResult, Name](this) {
override def fromJson(obj: String): Name = {
JsonParser.parse(obj).
extract[Name]
}
override def toJson(obj: Name): String = {
compactRender(Extraction.decompose(obj))
}
}
object phone extends JsonColumn[AccountSacks, AccountResult, Phone](this) {
override def fromJson(obj: String): Phone = {
JsonParser.parse(obj).extract[Phone]
}
override def toJson(obj: Phone): String = {
compactRender(Extraction.decompose(obj))
}
}
object email extends StringColumn(this) with PrimaryKey[String]
object api_key extends StringColumn(this)
object password extends JsonColumn[AccountSacks, AccountResult, Password](this) {
override def fromJson(obj: String): Password = {
JsonParser.parse(obj).extract[Password]
}
override def toJson(obj: Password): String = {
compactRender(Extraction.decompose(obj))
}
}
object states extends JsonColumn[AccountSacks, AccountResult, States](this) {
override def fromJson(obj: String): States = {
JsonParser.parse(obj).extract[States]
}
override def toJson(obj: States): String = {
compactRender(Extraction.decompose(obj))
}
}
object approval extends JsonColumn[AccountSacks, AccountResult, Approval](this) {
override def fromJson(obj: String): Approval = {
JsonParser.parse(obj).extract[Approval]
}
override def toJson(obj: Approval): String = {
compactRender(Extraction.decompose(obj))
}
}
object suspend extends JsonColumn[AccountSacks, AccountResult, Suspend](this) {
override def fromJson(obj: String): Suspend = {
JsonParser.parse(obj).extract[Suspend]
}
override def toJson(obj: Suspend): String = {
compactRender(Extraction.decompose(obj))
}
}
object registration_ip_address extends StringColumn(this)
object dates extends JsonColumn[AccountSacks, AccountResult, Dates](this) {
override def fromJson(obj: String): Dates = {
JsonParser.parse(obj).extract[Dates]
}
override def toJson(obj: Dates): String = {
compactRender(Extraction.decompose(obj))
}
}
def fromRow(row: Row): AccountResult = {
AccountResult(
id(row),
name(row),
phone(row),
email(row),
api_key(row),
password(row),
states(row),
approval(row),
suspend(row),
registration_ip_address(row),
dates(row))
}
}
abstract class ConcreteAccounts extends AccountSacks with RootConnector {
override lazy val tableName = "accounts"
override implicit def space: KeySpace = scyllaConnection.space
override implicit def session: Session = scyllaConnection.session
def dbCount: ValidationNel[Throwable, Option[Long]] = {
val res = select.count.one
Await.result(res, 5.seconds).successNel
}
def dbInsert(account: AccountResult): ValidationNel[Throwable, ResultSet] = {
val res = insert.value(_.id, account.id)
.value(_.name, account.name)
.value(_.phone, account.phone)
.value(_.email, NilorNot(account.email, ""))
.value(_.api_key, NilorNot(account.api_key, ""))
.value(_.password, account.password)
.value(_.states, account.states)
.value(_.approval, account.approval)
.value(_.suspend, account.suspend)
.value(_.registration_ip_address, NilorNot(account.registration_ip_address, ""))
.value(_.dates, account.dates)
.future()
Await.result(res, 5.seconds).successNel
}
def dbGet(email: String): ValidationNel[Throwable, Option[AccountResult]] = {
val res = select.where(_.email eqs email).one()
Await.result(res, 5.seconds).successNel
}
def dbSelectAll: ValidationNel[Throwable, Seq[AccountResult]] = {
val res = select.fetch
Await.result(res, 5.seconds).successNel
}
def deleteRecords(email: String): ValidationNel[Throwable, ResultSet] = {
val res = delete.where(_.email eqs email).future()
Await.result(res, 5.seconds).successNel
}
def dbUpdate(email: String, rip: AccountResult, aor: Option[AccountResult]): ValidationNel[Throwable, ResultSet] = {
val res = update.where(_.email eqs NilorNot(email, aor.get.email))
.modify(_.id setTo NilorNot(rip.id, aor.get.id))
.and(_.name setTo new Name(NilorNot(rip.name.first_name, aor.get.name.first_name),
NilorNot(rip.name.last_name, aor.get.name.last_name)))
.and(_.phone setTo new Phone(NilorNot(rip.phone.phone, aor.get.phone.phone),
NilorNot(rip.phone.phone_verified, aor.get.phone.phone_verified)))
.and(_.api_key setTo NilorNot(rip.api_key, aor.get.api_key))
.and(_.password setTo new Password(NilorNot(rip.password.password_hash, aor.get.password.password_hash),
NilorNot(rip.password.password_reset_key, aor.get.password.password_reset_key),
NilorNot(rip.password.password_reset_sent_at, aor.get.password.password_reset_sent_at)))
.and(_.states setTo new States(NilorNot(rip.states.authority, aor.get.states.authority),
NilorNot(rip.states.active, aor.get.states.active),
NilorNot(rip.states.blocked, aor.get.states.blocked),
NilorNot(rip.states.staged, aor.get.states.staged)))
.and(_.approval setTo new Approval(NilorNot(rip.approval.approved, aor.get.approval.approved),
NilorNot(rip.approval.approved_by_id, aor.get.approval.approved_by_id),
NilorNot(rip.approval.approved_at, aor.get.approval.approved_at)))
.and(_.suspend setTo new Suspend(NilorNot(rip.suspend.suspended, aor.get.suspend.suspended),
NilorNot(rip.suspend.suspended_at, aor.get.suspend.suspended_at),
NilorNot(rip.suspend.suspended_till, aor.get.suspend.suspended_till)))
.and(_.registration_ip_address setTo NilorNot(rip.registration_ip_address, aor.get.registration_ip_address))
.and(_.dates setTo new Dates(NilorNot(rip.dates.last_posted_at, aor.get.dates.last_posted_at),
NilorNot(rip.dates.last_emailed_at, aor.get.dates.last_emailed_at),
NilorNot(rip.dates.previous_visit_at, aor.get.dates.previous_visit_at),
NilorNot(rip.dates.first_seen_at, aor.get.dates.first_seen_at),
NilorNot(rip.dates.created_at, aor.get.dates.created_at)))
.future()
Await.result(res, 5.seconds).successNel
}
private def NilorNot(rip: String, aor: String): String = {
rip == null || rip == "" match {
case true => return aor
case false => return rip
}
}
}
object Accounts extends ConcreteAccounts {
///////////////// All these conversion stuff should move out. ///////////
// 1. Get me an account input object from a string
// 2. Get me an account result object from account_input
// 3. Get me a clone of account result with password hashed
// 3. Get me a account result with passticket verified and mutated with new password hash
// 5. Get me a account result with passticket updated.
private def parseAccount(input: String): ValidationNel[Throwable, AccountInput] = {
(Validation.fromTryCatchThrowable[AccountInput, Throwable] {
parse(input).extract[AccountInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
}
private def mkAccountResult(id: String, m: AccountInput): ValidationNel[Throwable, AccountResult] = {
(Validation.fromTryCatchThrowable[AccountResult, Throwable] {
val dates = new Dates(m.dates.last_posted_at, m.dates.last_emailed_at, m.dates.previous_visit_at,
m.dates.first_seen_at, Time.now.toString)
val pwd = new Password(SecurePasswordHashing.hashPassword(m.password.password_hash),"","")
AccountResult(id, m.name, m.phone, m.email, m.api_key, pwd, m.states, m.approval, m.suspend, m.registration_ip_address, dates)
} leftMap { t: Throwable => new MalformedBodyError(m.json, t.getMessage) }).toValidationNel
}
private def mkAccountResultDup(m: AccountResult): ValidationNel[Throwable, AccountResult] = {
(Validation.fromTryCatchThrowable[AccountResult, Throwable] {
if(m.password!=null && m.password.password_hash!=null && m.password.password_hash.trim.length >0) {
val pwd = new Password(SecurePasswordHashing.hashPassword(m.password.password_hash),"","")
AccountResult(m.id, m.name, m.phone, m.email, m.api_key, pwd, m.states, m.approval, m.suspend, m.registration_ip_address, m.dates)
} else {
AccountResult(m.id, m.name, m.phone, m.email, m.api_key, m.password, m.states, m.approval, m.suspend, m.registration_ip_address, m.dates)
}
}).toValidationNel
}
private def mkAccountResultWithPassword(m: AccountResult, old: AccountResult): ValidationNel[Throwable, AccountResult] = {
if (m.password.password_reset_key == old.password.password_reset_key) {
val pwd = new Password(SecurePasswordHashing.hashPassword(m.password.password_hash),"","")
val mupd = AccountResult(m.id, m.name, m.phone, m.email, m.api_key, pwd, m.states, m.approval, m.suspend, m.registration_ip_address, m.dates)
Validation.success[Throwable, AccountResult](mupd).toValidationNel
} else {
Validation.failure[Throwable, AccountResult](new CannotAuthenticateError(m.email, "Password token didn't match.")).toValidationNel
}
}
private def mkAccountResultWithToken(t: String): ValidationNel[Throwable, AccountResult] = {
val pwd = new Password("",t, Time.now.toString)
val m = AccountResult("dum")
val mupd = AccountResult("", m.name, m.phone, "", m.api_key, pwd, m.states, m.approval, m.suspend, m.registration_ip_address, m.dates)
Validation.success[Throwable, AccountResult](mupd).toValidationNel
}
///////////////// All these conversion stuff should move out. ///////////
private def mkOrgIfEmpty(email: String, orgs: Seq[OrganizationsResult], acc: AccountResult): ValidationNel[Throwable, AccountResult] = {
val org_json = "{\\"name\\":\\"" + app.MConfig.org + "\\"}"
val domain_json = "{\\"name\\":\\"" + app.MConfig.domain + "\\"}"
if (!orgs.isEmpty)
return Validation.success[Throwable, AccountResult](acc).toValidationNel
else {
(models.team.Organizations.create(email, org_json.toString) leftMap { t: NonEmptyList[Throwable] =>
new ServiceUnavailableError(email, (t.list.map(m => m.getMessage)).mkString("\\n"))
}).toValidationNel.flatMap { xso: Option[OrganizationsResult] =>
xso match {
case Some(xs) => {
(models.team.Domains.create(xs.id, domain_json.toString) leftMap { t: NonEmptyList[Throwable] =>
new ServiceUnavailableError(xs.id, (t.list.map(m => m.getMessage)).mkString("\\n"))
}).toValidationNel.flatMap { dso: DomainsResult =>
Validation.success[Throwable, AccountResult](acc).toValidationNel
}
}
case None => Validation.success[Throwable, AccountResult](acc).toValidationNel
}
}
}
}
def login(input: String): ValidationNel[Throwable, AccountResult] = {
for {
p <- parseAccount(input)
a <- (Accounts.findByEmail(p.email) leftMap { t: NonEmptyList[Throwable] => t })
s <- SecurityActions.Validate(p.password.password_hash, a.get.password.password_hash)
e <- Events(a.get.id, EVENTUSER, Events.LOGIN, Map(EVTEMAIL -> p.email)).createAndPub()
} yield {
a.get
}
}
def create(input: String): ValidationNel[Throwable, AccountResult] = {
for {
p <- parseAccount(input)
uir <- (UID("act").get leftMap { err: NonEmptyList[Throwable] => err })
ast <- mkAccountResult(uir.get._1 + uir.get._2, p)
ins <- dbInsert(ast)
org <- Organizations.findByEmail(p.email)
res <- mkOrgIfEmpty(p.email, org, ast)
bal <- Balances.onboardAccountBalance(p.email)
evn <- Events(ast.id, EVENTUSER, Events.ONBOARD, Map(EVTEMAIL -> ast.email)).createAndPub()
} yield {
res
}
}
def update(email: String, input: String): ValidationNel[Throwable, Option[AccountResult]] = {
val accountResult: ValidationNel[Throwable, AccountResult] = (Validation.fromTryCatchThrowable[AccountResult, Throwable] {
parse(input).extract[AccountResult]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel //capture failure
for {
t <- accountResult
c <- mkAccountResultDup(t)
a <- (Accounts.findByEmail(email) leftMap { t: NonEmptyList[Throwable] => t })
d <- dbUpdate(email, c, a)
} yield {
a
}
}
def forgot(email: String): ValidationNel[Throwable, Option[AccountResult]] = {
val token = generateToken(26)
for {
a <- (Accounts.findByEmail(email) leftMap { t: NonEmptyList[Throwable] => t })
s <- mkAccountResultWithToken(token)
d <- dbUpdate(email,s, a)
e <- Events(a.get.id, EVENTUSER, Events.RESET, Map(EVTEMAIL -> a.get.email, EVTTOKEN -> token)).createAndPub()
} yield {
a
}
}
def password_reset(input: String): ValidationNel[Throwable, Option[AccountResult]] = {
val accountResult: ValidationNel[Throwable, AccountResult] = (Validation.fromTryCatchThrowable[AccountResult, Throwable] {
parse(input).extract[AccountResult]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel //capture failure
for {
c <- accountResult
a <- (Accounts.findByEmail(c.email) leftMap { t: NonEmptyList[Throwable] => t })
m <- (mkAccountResultWithPassword(c, a.get) leftMap { t: NonEmptyList[Throwable] => t })
u <- (dbUpdate(c.email, m, a) leftMap { t: NonEmptyList[Throwable] => t})
} yield {
c.some
}
}
def findByEmail(email: String): ValidationNel[Throwable, Option[AccountResult]] = {
InMemory[ValidationNel[Throwable, Option[AccountResult]]]({
name: String =>
{
play.api.Logger.debug(("%-20s -->[%s]").format("LIV", email))
(dbGet(email) leftMap { t: NonEmptyList[Throwable] =>
new ServiceUnavailableError(email, (t.list.map(m => m.getMessage)).mkString("\\n"))
}).toValidationNel.flatMap { xso: Option[AccountResult] =>
xso match {
case Some(xs) => {
Validation.success[Throwable, Option[AccountResult]](xs.some).toValidationNel
}
case None => Validation.failure[Throwable, Option[AccountResult]](new ResourceItemNotFound(email, "")).toValidationNel
}
}
}
}).get(email).eval(InMemoryCache[ValidationNel[Throwable, Option[AccountResult]]]())
}
//Admin authority can list users hack for 1.5.
def list: ValidationNel[Throwable, Seq[AccountResult]] = {
(dbSelectAll leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound("Admin", "Users = nothing found.")
}).toValidationNel.flatMap { nm: Seq[AccountResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[AccountResult]](nm).toValidationNel
else
Validation.failure[Throwable, Seq[AccountResult]](new ResourceItemNotFound("Admin", "Users = nothing found.")).toValidationNel
}
}
//Admin authority: scaffolding method to call update for another users
def update(input: String): ValidationNel[Throwable, Option[AccountResult]] = {
val accountResult: ValidationNel[Throwable, AccountResult] = (Validation.fromTryCatchThrowable[AccountResult, Throwable] {
parse(input).extract[AccountResult]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
for {
c <- accountResult
a <- update(c.email, input)
} yield {
c.some
}
}
def delete(email: String): ValidationNel[Throwable, Option[AccountResult]] = {
deleteRecords(email) match {
case Success(value) => Validation.success[Throwable, Option[AccountResult]](AccountResult("dum").some).toValidationNel
case Failure(err) => Validation.success[Throwable, Option[AccountResult]](none).toValidationNel
}
}
def countAll: ValidationNel[Throwable, String] = dbCount.map(l => l.getOrElse(0L).toString)
implicit val sedimentAccountEmail = new Sedimenter[ValidationNel[Throwable, Option[AccountResult]]] {
def sediment(maybeASediment: ValidationNel[Throwable, Option[AccountResult]]): Boolean = {
maybeASediment.isSuccess
}
}
private def generateToken(length: Int): String = {
val chars = ('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9')
generateTokenFrom(length, chars)
}
private def generateTokenFrom(length: Int, chars: Seq[Char]): String = {
val sb = new StringBuilder
for (i <- 1 to length) {
val randomNum = util.Random.nextInt(chars.length)
sb.append(chars(randomNum))
}
sb.toString
}
}
| indykish/vertice_gateway | app/models/base/Accounts.scala | Scala | mit | 19,178 |
package filodb.cli
import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import akka.util.Timeout
import com.opencsv.{CSVReader, CSVWriter}
import org.velvia.filo.{ArrayStringRowReader, RowReader}
import scala.concurrent.{Await, Future, ExecutionContext}
import scala.concurrent.duration._
import scala.language.postfixOps
import filodb.core.metadata.MetaStore
import filodb.core.reprojector.{MemTable, Scheduler}
import filodb.coordinator.CoordinatorActor
import filodb.coordinator.sources.CsvSourceActor
import filodb.core._
// Turn off style rules for CLI classes
//scalastyle:off
trait CsvImportExport {
val system: ActorSystem
val metaStore: MetaStore
val memTable: MemTable
val scheduler: Scheduler
val coordinatorActor: ActorRef
var exitCode = 0
implicit val ec: ExecutionContext
import scala.collection.JavaConversions._
protected def parseResponse[B](cmd: => Future[Response])(handler: PartialFunction[Response, B]): B = {
Await.result(cmd, 15 seconds) match {
case e: ErrorResponse =>
println("ERROR: " + e)
exitCode = 1
null.asInstanceOf[B]
case r: Response => handler(r)
}
}
protected def parse[T, B](cmd: => Future[T], awaitTimeout: FiniteDuration = 5 seconds)(func: T => B): B = {
func(Await.result(cmd, awaitTimeout))
}
protected def actorAsk[B](actor: ActorRef, msg: Any,
askTimeout: FiniteDuration = 5 seconds)(f: PartialFunction[Any, B]): B = {
implicit val timeout = Timeout(askTimeout)
parse(actor ? msg, askTimeout)(f)
}
protected def awaitSuccess(cmd: => Future[Response]) {
parseResponse(cmd) {
case Success => println("Succeeded.")
}
}
def ingestCSV(dataset: String, version: Int, csvPath: String) {
val fileReader = new java.io.FileReader(csvPath)
val reader = new CSVReader(fileReader, ',')
val columns = reader.readNext.toSeq
println(s"Ingesting CSV at $csvPath with columns $columns...")
val ingestCmd = CoordinatorActor.SetupIngestion(dataset, columns, version: Int)
actorAsk(coordinatorActor, ingestCmd, 10 seconds) {
case CoordinatorActor.IngestionReady =>
case CoordinatorActor.UnknownDataset =>
println(s"Dataset $dataset is not known, you need to --create it first!")
exitCode = 2
return
case CoordinatorActor.UndefinedColumns(undefCols) =>
println(s"Some columns $undefCols are not defined, please define them with --create first!")
exitCode = 2
return
case CoordinatorActor.BadSchema(msg) =>
println(s"BadSchema - $msg")
exitCode = 2
return
}
var linesIngested = 0
reader.iterator.grouped(100).foreach { lines =>
val mappedLines = lines.toSeq.map(ArrayStringRowReader)
var resp: MemTable.IngestionResponse = MemTable.PleaseWait
do {
resp = memTable.ingestRows(dataset, version, mappedLines)
if (resp == MemTable.PleaseWait) {
do {
println("Waiting for MemTable to be able to ingest again...")
Thread sleep 10000
} while (!memTable.canIngest(dataset, version))
}
} while (resp != MemTable.Ingested)
linesIngested += mappedLines.length
if (linesIngested % 10000 == 0) println(s"Ingested $linesIngested lines!")
}
// val csvActor = system.actorOf(CsvSourceActor.props(fileReader, dataset, version, coordinatorActor))
// actorAsk(csvActor, RowSource.Start, 61 minutes) {
// case RowSource.SetupError(err) =>
// println(s"ERROR: $err")
// exitCode = 2
// case RowSource.AllDone =>
coordinatorActor ! CoordinatorActor.Flush(dataset, version)
println("Waiting for scheduler/memTable to finish flushing everything")
Thread sleep 5000
while (memTable.flushingDatasets.nonEmpty) {
print(".")
Thread sleep 1000
}
println("ingestCSV finished!")
exitCode = 0
// }
}
def exportCSV(dataset: String, version: Int,
columnNames: Seq[String], limit: Int,
outFile: Option[String]) {
val columns = parse(metaStore.getSchema(dataset, version)) { schema =>
columnNames.map(schema)
}
val outStream = outFile.map(new java.io.FileOutputStream(_)).getOrElse(System.out)
val writer = new CSVWriter(new java.io.OutputStreamWriter(outStream))
writer.writeNext(columnNames.toArray, false)
println("Sorry, exportCSV functionality is temporarily unavailable")
// val extractor = new ReadRowExtractor(datastore, partObj, version, columns, ArrayStringRowSetter)(system)
// val row = Array.fill(columns.length)("")
// var rowNo = 0
// while (rowNo < limit && extractor.hasNext) {
// extractor.next(row)
// writer.writeNext(row, false)
// rowNo += 1
// }
// writer.flush()
}
}
| YanjieGao/FiloDB | cli/src/main/scala/filodb.cli/CsvImportExport.scala | Scala | apache-2.0 | 4,904 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode}
import org.apache.spark.sql.types.{DataType, LongType}
/**
* Returns monotonically increasing 64-bit integers.
*
* The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
* The current implementation puts the partition ID in the upper 31 bits, and the lower 33 bits
* represent the record number within each partition. The assumption is that the data frame has
* less than 1 billion partitions, and each partition has less than 8 billion records.
*
* Since this expression is stateful, it cannot be a case object.
*/
@ExpressionDescription(
usage = """
_FUNC_() - Returns monotonically increasing 64-bit integers. The generated ID is guaranteed
to be monotonically increasing and unique, but not consecutive. The current implementation
puts the partition ID in the upper 31 bits, and the lower 33 bits represent the record number
within each partition. The assumption is that the data frame has less than 1 billion
partitions, and each partition has less than 8 billion records.
""")
case class MonotonicallyIncreasingID() extends LeafExpression with Stateful {
/**
* Record ID within each partition. By being transient, count's value is reset to 0 every time
* we serialize and deserialize and initialize it.
*/
@transient private[this] var count: Long = _
@transient private[this] var partitionMask: Long = _
override protected def initializeInternal(partitionIndex: Int): Unit = {
count = 0L
partitionMask = partitionIndex.toLong << 33
}
override def nullable: Boolean = false
override def dataType: DataType = LongType
override protected def evalInternal(input: InternalRow): Long = {
val currentCount = count
count += 1
partitionMask + currentCount
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val countTerm = ctx.addMutableState(CodeGenerator.JAVA_LONG, "count")
val partitionMaskTerm = "partitionMask"
ctx.addImmutableStateIfNotExists(CodeGenerator.JAVA_LONG, partitionMaskTerm)
ctx.addPartitionInitializationStatement(s"$countTerm = 0L;")
ctx.addPartitionInitializationStatement(s"$partitionMaskTerm = ((long) partitionIndex) << 33;")
ev.copy(code = s"""
final ${CodeGenerator.javaType(dataType)} ${ev.value} = $partitionMaskTerm + $countTerm;
$countTerm++;""", isNull = "false")
}
override def prettyName: String = "monotonically_increasing_id"
override def sql: String = s"$prettyName()"
override def freshCopy(): MonotonicallyIncreasingID = MonotonicallyIncreasingID()
}
| brad-kaiser/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/MonotonicallyIncreasingID.scala | Scala | apache-2.0 | 3,597 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro.serde
import java.io.{File, FileInputStream, FileOutputStream}
import java.text.SimpleDateFormat
import java.util.UUID
import org.apache.avro.io.DecoderFactory
import org.geotools.filter.identity.FeatureIdImpl
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.avro.FeatureSpecificReader
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.mutable.ListBuffer
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class Version3CompatTest extends Specification {
"Version3 ASF" should {
val schema = "f0:String,f1:Integer,f2:Double,f3:Float,f4:Boolean,f5:UUID,f6:Date,f7:Point:srid=4326,f8:Polygon:srid=4326"
val sft = SimpleFeatureTypes.createType("test", schema)
def createV2Features(numFeatures : Int): Seq[Version2ASF] = {
val r = new Random()
r.setSeed(0)
(0 until numFeatures).map { i =>
val fid = new FeatureIdImpl(r.nextString(5))
val sf = new Version2ASF(fid, sft)
sf.setAttribute("f0", r.nextString(10).asInstanceOf[Object])
sf.setAttribute("f1", r.nextInt().asInstanceOf[Object])
sf.setAttribute("f2", r.nextDouble().asInstanceOf[Object])
sf.setAttribute("f3", r.nextFloat().asInstanceOf[Object])
sf.setAttribute("f4", r.nextBoolean().asInstanceOf[Object])
sf.setAttribute("f5", UUID.fromString("12345678-1234-1234-1234-123456789012"))
sf.setAttribute("f6", new SimpleDateFormat("yyyyMMdd").parse("20140102"))
sf.setAttribute("f7", WKTUtils.read("POINT(45.0 49.0)"))
sf.setAttribute("f8", WKTUtils.read("POLYGON((-80 30,-80 23,-70 30,-70 40,-80 40,-80 30))"))
sf
}
}
"read version 2 avro" >> {
val v2Features = createV2Features(20)
val f = File.createTempFile("avro", ".tmp")
f.deleteOnExit()
val fos = new FileOutputStream(f)
v2Features.foreach { sf => sf.write(fos) }
fos.close()
val fis = new FileInputStream(f)
val decoder = DecoderFactory.get().binaryDecoder(fis, null)
val fsr = FeatureSpecificReader(sft)
val sfList = new ListBuffer[SimpleFeature]()
do {
sfList += fsr.read(null, decoder)
} while(!decoder.isEnd)
fis.close()
sfList.zip(v2Features).forall { case (v3, v2) =>
v3.getAttributes mustEqual v2.getAttributes
}
f.delete
}
}
}
| locationtech/geomesa | geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/serde/Version3CompatTest.scala | Scala | apache-2.0 | 3,077 |
package com.oradian.autofuture
import scala.meta._
import parsers.Parsed.{Success, Error}
import tokens.Token
import scala.annotation.tailrec
object ProcedureSyntaxConversion extends AutoFuture {
private[this] case class Injection(offset: Int, defn: Boolean)
/* We want to track the last token after names and parameters to place the explicit type definition.
* If we just use the `rt.pos` we may end up with suboptimal placement such as:
*
* trait Foo {
* def x
* }
*
* becoming
*
* trait Foo {
* def x
* : Unit}
*
* Also, we cannot just trim whitespaces to the right because of potential line comments.
* See the accompanying spec for more details */
private[this] def locateInjection(tree: Tree, rt: Type.Name, defn: Boolean): Injection = {
val tokens = tree.tokens
val lastOffset = rt.pos.start.offset
@tailrec
def findOffset(index: Int, lastSignificant: Int): Int = {
val token = tokens(index)
val tokenOffset = token.pos.start.offset
if (tokenOffset >= lastOffset) {
tokens(lastSignificant).pos.start.offset
} else {
token match {
case _: Token.Space
| _: Token.Tab
| _: Token.LF
| _: Token.CR
| _: Token.FF
| _: Token.Comment =>
findOffset(index + 1, lastSignificant)
case _ => findOffset(index + 1, index + 1)
}
}
}
val offset = findOffset(0, -1)
assert(offset > -1, "Could not locate last offset!")
Injection(offset, defn)
}
def apply(source: String): AutoFuture.Result = {
source.parse[Source] match {
case Success(parsed) =>
val injections = parsed collect {
/* Transform abstract definitions, match if return type tokens are empty (procedure syntax) */
case tree @ Decl.Def(_, _, _, _, rt @ Type.Name("Unit")) if rt.tokens.isEmpty =>
locateInjection(tree, rt, false)
/* Transform definitions, match if return type is defined and tokens are empty (procedure syntax) */
case tree @ Defn.Def(_, _, _, _, Some(rt @ Type.Name("Unit")), _) if rt.tokens.isEmpty =>
locateInjection(tree, rt, true)
}
if (injections.isEmpty) {
AutoFuture.Result.Noop
} else {
val sb = new StringBuilder
var last = 0
for (injection <- injections.sortBy(_.offset)) {
val before = source.substring(last, injection.offset)
(sb ++= before
++= (if (injection.defn) ": Unit =" else ": Unit"))
last = injection.offset
}
sb ++= source.substring(last)
AutoFuture.Result.Success(sb.toString)
}
case Error(pos, message, details) =>
AutoFuture.Result.Error(s"At line ${pos.start.line}: $message")
}
}
}
| oradian/sbt-auto-future | core/src/main/scala/com/oradian/autofuture/ProcedureSyntaxConversion.scala | Scala | mit | 2,882 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.sort
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.data.binary.BinaryRowData
import org.apache.flink.table.data.{DecimalData, TimestampData}
import org.apache.flink.table.planner.codegen.CodeGenUtils.{ROW_DATA, SEGMENT, newName}
import org.apache.flink.table.planner.codegen.Indenter.toISC
import org.apache.flink.table.planner.plan.nodes.exec.spec.SortSpec
import org.apache.flink.table.runtime.generated.{GeneratedNormalizedKeyComputer, GeneratedRecordComparator, NormalizedKeyComputer, RecordComparator}
import org.apache.flink.table.runtime.operators.sort.SortUtil
import org.apache.flink.table.runtime.types.PlannerTypeUtils
import org.apache.flink.table.types.logical.LogicalTypeRoot._
import org.apache.flink.table.types.logical.{DecimalType, LogicalType, RowType, TimestampType}
import scala.collection.mutable
/**
* A code generator for generating [[NormalizedKeyComputer]] and [[RecordComparator]].
*
* @param tableConfig config of the planner.
* @param input input type.
* @param sortSpec sort specification.
*/
class SortCodeGenerator(
tableConfig: TableConfig,
val input: RowType,
val sortSpec: SortSpec) {
private val MAX_NORMALIZED_KEY_LEN = 16
private val SORT_UTIL = classOf[SortUtil].getCanonicalName
/** Chunks for long, int, short, byte */
private val POSSIBLE_CHUNK_SIZES = Array(8, 4, 2, 1)
/** For get${operator} set${operator} of [[org.apache.flink.core.memory.MemorySegment]] */
private val BYTE_OPERATOR_MAPPING = Map(8 -> "Long", 4 -> "Int", 2 -> "Short", 1 -> "")
/** For primitive define */
private val BYTE_DEFINE_MAPPING = Map(8 -> "long", 4 -> "int", 2 -> "short", 1 -> "byte")
/** For Class of primitive type */
private val BYTE_CLASS_MAPPING = Map(8 -> "Long", 4 -> "Integer", 2 -> "Short", 1 -> "Byte")
/** Normalized meta */
val (nullAwareNormalizedKeyLen, normalizedKeyNum, invertNormalizedKey, normalizedKeyLengths) = {
var keyLen = 0
var keyNum = 0
var inverted = false
val keyLengths = new mutable.ArrayBuffer[Int]
var break = false
var i = 0
while (i < sortSpec.getFieldSize && !break) {
val fieldSpec = sortSpec.getFieldSpec(i)
val t = input.getTypeAt(fieldSpec.getFieldIndex)
if (supportNormalizedKey(t)) {
val invert = !fieldSpec.getIsAscendingOrder
if (i == 0) {
// the first comparator decides whether we need to invert the key direction
inverted = invert
}
if (invert != inverted) {
// if a successor does not agree on the inversion direction,
// it cannot be part of the normalized key
break = true
} else {
keyNum += 1
// Need add null aware 1 byte
val len = safeAddLength(getNormalizeKeyLen(t), 1)
if (len < 0) {
throw new RuntimeException(
s"$t specifies an invalid length for the normalized key: " + len)
}
keyLengths += len
keyLen = safeAddLength(keyLen, len)
if (keyLen == Integer.MAX_VALUE) {
break = true
}
}
} else {
break = true
}
i += 1
}
(keyLen, keyNum, inverted, keyLengths)
}
def getKeyFullyDeterminesAndBytes: (Boolean, Int) = {
if (nullAwareNormalizedKeyLen > 18) {
// The maximum setting is 18 because want to put two null aware long as much as possible.
// Anyway, we can't fit it, so align the most efficient 8 bytes.
(false, Math.min(MAX_NORMALIZED_KEY_LEN, 8 * normalizedKeyNum))
} else {
(normalizedKeyNum == sortSpec.getFieldSize, nullAwareNormalizedKeyLen)
}
}
/**
* Generates a [[NormalizedKeyComputer]] that can be passed to a Java compiler.
*
* @param name Class name of the function.
* Does not need to be unique but has to be a valid Java class identifier.
* @return A GeneratedNormalizedKeyComputer
*/
def generateNormalizedKeyComputer(name: String): GeneratedNormalizedKeyComputer = {
val className = newName(name)
val (keyFullyDetermines, numKeyBytes) = getKeyFullyDeterminesAndBytes
val putKeys = generatePutNormalizedKeys(numKeyBytes)
val chunks = calculateChunks(numKeyBytes)
val reverseKeys = generateReverseNormalizedKeys(chunks)
val compareKeys = generateCompareNormalizedKeys(chunks)
val swapKeys = generateSwapNormalizedKeys(chunks)
val baseClass = classOf[NormalizedKeyComputer]
val code =
j"""
public class $className implements ${baseClass.getCanonicalName} {
public $className(Object[] references) {
// useless
}
@Override
public void putKey($ROW_DATA record, $SEGMENT target, int offset) {
${putKeys.mkString}
${reverseKeys.mkString}
}
@Override
public int compareKey($SEGMENT segI, int offsetI, $SEGMENT segJ, int offsetJ) {
${compareKeys.mkString}
}
@Override
public void swapKey($SEGMENT segI, int offsetI, $SEGMENT segJ, int offsetJ) {
${swapKeys.mkString}
}
@Override
public int getNumKeyBytes() {
return $numKeyBytes;
}
@Override
public boolean isKeyFullyDetermines() {
return $keyFullyDetermines;
}
@Override
public boolean invertKey() {
return $invertNormalizedKey;
}
}
""".stripMargin
new GeneratedNormalizedKeyComputer(className, code, tableConfig.getConfiguration)
}
def generatePutNormalizedKeys(numKeyBytes: Int): mutable.ArrayBuffer[String] = {
/* Example generated code, for int:
if (record.isNullAt(0)) {
org.apache.flink.table.data.binary.BinaryRowDataUtil.minNormalizedKey(target, offset+0, 5);
} else {
target.put(offset+0, (byte) 1);
org.apache.flink.table.data.binary.BinaryRowDataUtil.putIntNormalizedKey(
record.getInt(0), target, offset+1, 4);
}
*/
val putKeys = new mutable.ArrayBuffer[String]
var bytesLeft = numKeyBytes
var currentOffset = 0
var keyIndex = 0
while (bytesLeft > 0 && keyIndex < normalizedKeyNum) {
var len = normalizedKeyLengths(keyIndex)
val fieldSpec = sortSpec.getFieldSpec(keyIndex)
val index = fieldSpec.getFieldIndex
val nullIsMaxValue = fieldSpec.getIsAscendingOrder == fieldSpec.getNullIsLast
len = if (bytesLeft >= len) len else bytesLeft
val t = input.getTypeAt(fieldSpec.getFieldIndex)
val prefix = prefixGetFromBinaryRow(t)
val putCode = t match {
case _ if getNormalizeKeyLen(t) != Int.MaxValue =>
val get = getter(t, index)
s"""
|target.put(offset+$currentOffset, (byte) 1);
|$SORT_UTIL.put${prefixPutNormalizedKey(t)}NormalizedKey(
| record.$get, target, offset+${currentOffset + 1}, ${len - 1});
|
""".stripMargin
case _ =>
// It is StringData/byte[].., we can omit the null aware byte(zero is the smallest),
// because there is no other field behind, and is not keyFullyDetermines.
s"""
|$SORT_UTIL.put${prefixPutNormalizedKey(t)}NormalizedKey(
| record.get$prefix($index), target, offset+$currentOffset, $len);
|""".stripMargin
}
val nullCode = if (nullIsMaxValue) {
s"$SORT_UTIL.maxNormalizedKey(target, offset+$currentOffset, $len);"
} else {
s"$SORT_UTIL.minNormalizedKey(target, offset+$currentOffset, $len);"
}
val code =
s"""
|if (record.isNullAt($index)) {
| $nullCode
|} else {
| $putCode
|}
|""".stripMargin
putKeys += code
bytesLeft -= len
currentOffset += len
keyIndex += 1
}
putKeys
}
/**
* In order to better performance and not use MemorySegment's compare() and swap(),
* we CodeGen more efficient chunk method.
*/
def calculateChunks(numKeyBytes: Int): Array[Int] = {
/* Example chunks, for int:
calculateChunks(5) = Array(4, 1)
*/
val chunks = new mutable.ArrayBuffer[Int]
var i = 0
var remainBytes = numKeyBytes
while (remainBytes > 0) {
val bytes = POSSIBLE_CHUNK_SIZES(i)
if (bytes <= remainBytes) {
chunks += bytes
remainBytes -= bytes
} else {
i += 1
}
}
chunks.toArray
}
/**
* Because we put normalizedKeys in big endian way, if we are the little endian,
* we need to reverse these data with chunks for comparation.
*/
def generateReverseNormalizedKeys(chunks: Array[Int]): mutable.ArrayBuffer[String] = {
/* Example generated code, for int:
target.putInt(offset+0, Integer.reverseBytes(target.getInt(offset+0)));
//byte don't need reverse.
*/
val reverseKeys = new mutable.ArrayBuffer[String]
// If it is big endian, it would be better, no reverse.
if (BinaryRowData.LITTLE_ENDIAN) {
var reverseOffset = 0
for (chunk <- chunks) {
val operator = BYTE_OPERATOR_MAPPING(chunk)
val className = BYTE_CLASS_MAPPING(chunk)
if (chunk != 1) {
val reverseKey =
s"""
|target.put$operator(offset+$reverseOffset,
| $className.reverseBytes(target.get$operator(offset+$reverseOffset)));
""".stripMargin
reverseKeys += reverseKey
}
reverseOffset += chunk
}
}
reverseKeys
}
/**
* Compare bytes with chunks and nsigned.
*/
def generateCompareNormalizedKeys(chunks: Array[Int]): mutable.ArrayBuffer[String] = {
/* Example generated code, for int:
int l_0_1 = segI.getInt(offsetI+0);
int l_0_2 = segJ.getInt(offsetJ+0);
if (l_0_1 != l_0_2) {
return ((l_0_1 < l_0_2) ^ (l_0_1 < 0) ^
(l_0_2 < 0) ? -1 : 1);
}
byte l_1_1 = segI.get(offsetI+4);
byte l_1_2 = segJ.get(offsetJ+4);
if (l_1_1 != l_1_2) {
return ((l_1_1 < l_1_2) ^ (l_1_1 < 0) ^
(l_1_2 < 0) ? -1 : 1);
}
return 0;
*/
val compareKeys = new mutable.ArrayBuffer[String]
var compareOffset = 0
for (i <- chunks.indices) {
val chunk = chunks(i)
val operator = BYTE_OPERATOR_MAPPING(chunk)
val define = BYTE_DEFINE_MAPPING(chunk)
val compareKey =
s"""
|$define l_${i}_1 = segI.get$operator(offsetI+$compareOffset);
|$define l_${i}_2 = segJ.get$operator(offsetJ+$compareOffset);
|if (l_${i}_1 != l_${i}_2) {
| return ((l_${i}_1 < l_${i}_2) ^ (l_${i}_1 < 0) ^
| (l_${i}_2 < 0) ? -1 : 1);
|}
""".stripMargin
compareKeys += compareKey
compareOffset += chunk
}
compareKeys += "return 0;"
compareKeys
}
/**
* Swap bytes with chunks.
*/
def generateSwapNormalizedKeys(chunks: Array[Int]): mutable.ArrayBuffer[String] = {
/* Example generated code, for int:
int temp0 = segI.getInt(offsetI+0);
segI.putInt(offsetI+0, segJ.getInt(offsetJ+0));
segJ.putInt(offsetJ+0, temp0);
byte temp1 = segI.get(offsetI+4);
segI.put(offsetI+4, segJ.get(offsetJ+4));
segJ.put(offsetJ+4, temp1);
*/
val swapKeys = new mutable.ArrayBuffer[String]
var swapOffset = 0
for (i <- chunks.indices) {
val chunk = chunks(i)
val operator = BYTE_OPERATOR_MAPPING(chunk)
val define = BYTE_DEFINE_MAPPING(chunk)
val swapKey =
s"""
|$define temp$i = segI.get$operator(offsetI+$swapOffset);
|segI.put$operator(offsetI+$swapOffset, segJ.get$operator(offsetJ+$swapOffset));
|segJ.put$operator(offsetJ+$swapOffset, temp$i);
""".stripMargin
swapKeys += swapKey
swapOffset += chunk
}
swapKeys
}
/**
* Generates a [[RecordComparator]] that can be passed to a Java compiler.
*
* @param name Class name of the function.
* Does not need to be unique but has to be a valid Java class identifier.
* @return A GeneratedRecordComparator
*/
def generateRecordComparator(name: String): GeneratedRecordComparator = {
ComparatorCodeGenerator.gen(
tableConfig,
name,
input,
sortSpec)
}
def getter(t: LogicalType, index: Int): String = {
val prefix = prefixGetFromBinaryRow(t)
t match {
case dt: DecimalType =>
s"get$prefix($index, ${dt.getPrecision}, ${dt.getScale})"
case dt: TimestampType =>
s"get$prefix($index, ${dt.getPrecision})"
case _ =>
s"get$prefix($index)"
}
}
/**
* For put${prefix}NormalizedKey() and compare$prefix() of [[SortUtil]].
*/
def prefixPutNormalizedKey(t: LogicalType): String = prefixGetFromBinaryRow(t)
/**
* For get$prefix() of [[org.apache.flink.table.dataformat.TypeGetterSetters]].
*/
def prefixGetFromBinaryRow(t: LogicalType): String = t.getTypeRoot match {
case INTEGER => "Int"
case BIGINT => "Long"
case SMALLINT => "Short"
case TINYINT => "Byte"
case FLOAT => "Float"
case DOUBLE => "Double"
case BOOLEAN => "Boolean"
case VARCHAR | CHAR => "String"
case VARBINARY | BINARY => "Binary"
case DECIMAL => "Decimal"
case DATE => "Int"
case TIME_WITHOUT_TIME_ZONE => "Int"
case TIMESTAMP_WITHOUT_TIME_ZONE => "Timestamp"
case INTERVAL_YEAR_MONTH => "Int"
case INTERVAL_DAY_TIME => "Long"
case _ => null
}
/**
* Preventing overflow.
*/
def safeAddLength(i: Int, j: Int): Int = {
val sum = i + j
if (sum < i || sum < j) {
Integer.MAX_VALUE
} else {
sum
}
}
def supportNormalizedKey(t: LogicalType): Boolean = {
t.getTypeRoot match {
case _ if PlannerTypeUtils.isPrimitive(t) => true
case VARCHAR | CHAR | VARBINARY | BINARY |
DATE | TIME_WITHOUT_TIME_ZONE => true
case TIMESTAMP_WITHOUT_TIME_ZONE =>
// TODO: support normalize key for non-compact timestamp
TimestampData.isCompact(t.asInstanceOf[TimestampType].getPrecision)
case DECIMAL => DecimalData.isCompact(t.asInstanceOf[DecimalType].getPrecision)
case _ => false
}
}
def getNormalizeKeyLen(t: LogicalType): Int = {
t.getTypeRoot match {
case BOOLEAN => 1
case TINYINT => 1
case SMALLINT => 2
case INTEGER => 4
case FLOAT => 4
case DOUBLE => 8
case BIGINT => 8
case TIMESTAMP_WITHOUT_TIME_ZONE
if TimestampData.isCompact(t.asInstanceOf[TimestampType].getPrecision) => 8
case INTERVAL_YEAR_MONTH => 4
case INTERVAL_DAY_TIME => 8
case DATE => 4
case TIME_WITHOUT_TIME_ZONE => 4
case DECIMAL if DecimalData.isCompact(t.asInstanceOf[DecimalType].getPrecision) => 8
case VARCHAR | CHAR | VARBINARY | BINARY => Int.MaxValue
}
}
}
| xccui/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/sort/SortCodeGenerator.scala | Scala | apache-2.0 | 15,811 |
/*
* Part of NDLA article-api
* Copyright (C) 2019 NDLA
*
* See LICENSE
*/
package db.migration
import no.ndla.articleapi.model.domain.ArticleMetaDescription
import org.flywaydb.core.api.migration.{BaseJavaMigration, Context}
import org.json4s.Extraction.decompose
import org.json4s.native.JsonMethods.{compact, parse, render}
import org.json4s.{Formats, JArray, JValue}
import org.postgresql.util.PGobject
import scalikejdbc.{DB, DBSession, _}
class R__RemoveDummyMetaDescription extends BaseJavaMigration {
implicit val formats: Formats = org.json4s.DefaultFormats
override def getChecksum: Integer = 1 // Change this to something else if you want to repeat migration
override def migrate(context: Context): Unit = {
val db = DB(context.getConnection)
db.autoClose(false)
db.withinTx { implicit session =>
migrateArticles
}
}
def migrateArticles(implicit session: DBSession): Unit = {
val count = countAllArticles.get
var numPagesLeft = (count / 1000) + 1
var offset = 0L
while (numPagesLeft > 0) {
allArticles(offset * 1000).map {
case (id, document) => updateArticle(convertArticle(document), id)
}
numPagesLeft -= 1
offset += 1
}
}
def countAllArticles(implicit session: DBSession): Option[Long] = {
sql"""select count(*) from contentdata where document is not NULL"""
.map(rs => rs.long("count"))
.single()
}
def allArticles(offset: Long)(implicit session: DBSession): Seq[(Long, String)] = {
sql"""
select id, document from contentdata
where document is not null
order by id limit 1000 offset $offset
"""
.map(rs => {
(rs.long("id"), rs.string("document"))
})
.list()
}
def convertMetaDescription(metaDescription: List[ArticleMetaDescription]): JValue = {
val newMetaDescriptions = metaDescription.map(meta => {
meta.content match {
case "Beskrivelse mangler" => ArticleMetaDescription("", meta.language)
case _ => ArticleMetaDescription(meta.content, meta.language)
}
})
decompose(newMetaDescriptions)
}
def convertArticle(document: String): String = {
val oldArticle = parse(document)
val newArticle = oldArticle.mapField {
case ("metaDescription", metaDescription: JArray) =>
"metaDescription" -> convertMetaDescription(metaDescription.extract[List[ArticleMetaDescription]])
case x => x
}
compact(render(newArticle))
}
private def updateArticle(document: String, id: Long)(implicit session: DBSession): Int = {
val dataObject = new PGobject()
dataObject.setType("jsonb")
dataObject.setValue(document)
sql"update contentdata set document = $dataObject where id = $id"
.update()
}
}
| NDLANO/article-api | src/main/scala/db/migration/R__RemoveDummyMetaDescription.scala | Scala | gpl-3.0 | 2,813 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import java.lang.{Byte => JByte}
import java.lang.management.{LockInfo, ManagementFactory, MonitorInfo, ThreadInfo}
import java.lang.reflect.InvocationTargetException
import java.math.{MathContext, RoundingMode}
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.{Channels, FileChannel, WritableByteChannel}
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.security.SecureRandom
import java.util.{Locale, Properties, Random, UUID}
import java.util.concurrent._
import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.zip.GZIPInputStream
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
import scala.util.control.{ControlThrowable, NonFatal}
import scala.util.matching.Regex
import _root_.io.netty.channel.unix.Errors.NativeIoException
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.google.common.hash.HashCodes
import com.google.common.io.{ByteStreams, Files => GFiles}
import com.google.common.net.InetAddresses
import org.apache.commons.lang3.SystemUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.hadoop.io.compress.{CompressionCodecFactory, SplittableCompressionCodec}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.eclipse.jetty.util.MultiException
import org.slf4j.Logger
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Streaming._
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.config.Worker._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
import org.apache.spark.status.api.v1.{StackTrace, ThreadStackTrace}
import org.apache.spark.util.io.ChunkedByteBufferOutputStream
/** CallSite represents a place in user code. It can have a short and a long form. */
private[spark] case class CallSite(shortForm: String, longForm: String)
private[spark] object CallSite {
val SHORT_FORM = "callSite.short"
val LONG_FORM = "callSite.long"
val empty = CallSite("", "")
}
/**
* Various utility methods used by Spark.
*/
private[spark] object Utils extends Logging {
val random = new Random()
private val sparkUncaughtExceptionHandler = new SparkUncaughtExceptionHandler
@volatile private var cachedLocalDir: String = ""
/**
* Define a default value for driver memory here since this value is referenced across the code
* base and nearly all files already use Utils.scala
*/
val DEFAULT_DRIVER_MEM_MB = JavaUtils.DEFAULT_DRIVER_MEM_MB.toInt
private val MAX_DIR_CREATION_ATTEMPTS: Int = 10
@volatile private var localRootDirs: Array[String] = null
/** Scheme used for files that are locally available on worker nodes in the cluster. */
val LOCAL_SCHEME = "local"
private val PATTERN_FOR_COMMAND_LINE_ARG = "-D(.+?)=(.+)".r
/** Serialize an object using Java serialization */
def serialize[T](o: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(o)
oos.close()
bos.toByteArray
}
/** Deserialize an object using Java serialization */
def deserialize[T](bytes: Array[Byte]): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis)
ois.readObject.asInstanceOf[T]
}
/** Deserialize an object using Java serialization and the given ClassLoader */
def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis) {
override def resolveClass(desc: ObjectStreamClass): Class[_] = {
// scalastyle:off classforname
Class.forName(desc.getName, false, loader)
// scalastyle:on classforname
}
}
ois.readObject.asInstanceOf[T]
}
/** Deserialize a Long value (used for [[org.apache.spark.api.python.PythonPartitioner]]) */
def deserializeLongValue(bytes: Array[Byte]) : Long = {
// Note: we assume that we are given a Long value encoded in network (big-endian) byte order
var result = bytes(7) & 0xFFL
result = result + ((bytes(6) & 0xFFL) << 8)
result = result + ((bytes(5) & 0xFFL) << 16)
result = result + ((bytes(4) & 0xFFL) << 24)
result = result + ((bytes(3) & 0xFFL) << 32)
result = result + ((bytes(2) & 0xFFL) << 40)
result = result + ((bytes(1) & 0xFFL) << 48)
result + ((bytes(0) & 0xFFL) << 56)
}
/** Serialize via nested stream using specific serializer */
def serializeViaNestedStream(os: OutputStream, ser: SerializerInstance)(
f: SerializationStream => Unit): Unit = {
val osWrapper = ser.serializeStream(new OutputStream {
override def write(b: Int): Unit = os.write(b)
override def write(b: Array[Byte], off: Int, len: Int): Unit = os.write(b, off, len)
})
try {
f(osWrapper)
} finally {
osWrapper.close()
}
}
/** Deserialize via nested stream using specific serializer */
def deserializeViaNestedStream(is: InputStream, ser: SerializerInstance)(
f: DeserializationStream => Unit): Unit = {
val isWrapper = ser.deserializeStream(new InputStream {
override def read(): Int = is.read()
override def read(b: Array[Byte], off: Int, len: Int): Int = is.read(b, off, len)
})
try {
f(isWrapper)
} finally {
isWrapper.close()
}
}
/**
* Get the ClassLoader which loaded Spark.
*/
def getSparkClassLoader: ClassLoader = getClass.getClassLoader
/**
* Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
* loaded Spark.
*
* This should be used whenever passing a ClassLoader to Class.ForName or finding the currently
* active loader when setting up ClassLoader delegation chains.
*/
def getContextOrSparkClassLoader: ClassLoader =
Option(Thread.currentThread().getContextClassLoader).getOrElse(getSparkClassLoader)
/** Determines whether the provided class is loadable in the current thread. */
def classIsLoadable(clazz: String): Boolean = {
Try { classForName(clazz, initialize = false) }.isSuccess
}
// scalastyle:off classforname
/**
* Preferred alternative to Class.forName(className), as well as
* Class.forName(className, initialize, loader) with current thread's ContextClassLoader.
*/
def classForName[C](
className: String,
initialize: Boolean = true,
noSparkClassLoader: Boolean = false): Class[C] = {
if (!noSparkClassLoader) {
Class.forName(className, initialize, getContextOrSparkClassLoader).asInstanceOf[Class[C]]
} else {
Class.forName(className, initialize, Thread.currentThread().getContextClassLoader).
asInstanceOf[Class[C]]
}
// scalastyle:on classforname
}
/**
* Run a segment of code using a different context class loader in the current thread
*/
def withContextClassLoader[T](ctxClassLoader: ClassLoader)(fn: => T): T = {
val oldClassLoader = Thread.currentThread().getContextClassLoader()
try {
Thread.currentThread().setContextClassLoader(ctxClassLoader)
fn
} finally {
Thread.currentThread().setContextClassLoader(oldClassLoader)
}
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.DataOutput]]
*/
def writeByteBuffer(bb: ByteBuffer, out: DataOutput): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.OutputStream]]
*/
def writeByteBuffer(bb: ByteBuffer, out: OutputStream): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* JDK equivalent of `chmod 700 file`.
*
* @param file the file whose permissions will be modified
* @return true if the permissions were successfully changed, false otherwise.
*/
def chmod700(file: File): Boolean = {
file.setReadable(false, false) &&
file.setReadable(true, true) &&
file.setWritable(false, false) &&
file.setWritable(true, true) &&
file.setExecutable(false, false) &&
file.setExecutable(true, true)
}
/**
* Create a directory given the abstract pathname
* @return true, if the directory is successfully created; otherwise, return false.
*/
def createDirectory(dir: File): Boolean = {
try {
// This sporadically fails - not sure why ... !dir.exists() && !dir.mkdirs()
// So attempting to create and then check if directory was created or not.
dir.mkdirs()
if ( !dir.exists() || !dir.isDirectory) {
logError(s"Failed to create directory " + dir)
}
dir.isDirectory
} catch {
case e: Exception =>
logError(s"Failed to create directory " + dir, e)
false
}
}
/**
* Create a directory inside the given parent directory. The directory is guaranteed to be
* newly created, and is not marked for automatic deletion.
*/
def createDirectory(root: String, namePrefix: String = "spark"): File = {
var attempts = 0
val maxAttempts = MAX_DIR_CREATION_ATTEMPTS
var dir: File = null
while (dir == null) {
attempts += 1
if (attempts > maxAttempts) {
throw new IOException("Failed to create a temp directory (under " + root + ") after " +
maxAttempts + " attempts!")
}
try {
dir = new File(root, namePrefix + "-" + UUID.randomUUID.toString)
if (dir.exists() || !dir.mkdirs()) {
dir = null
}
} catch { case e: SecurityException => dir = null; }
}
dir.getCanonicalFile
}
/**
* Create a temporary directory inside the given parent directory. The directory will be
* automatically deleted when the VM shuts down.
*/
def createTempDir(
root: String = System.getProperty("java.io.tmpdir"),
namePrefix: String = "spark"): File = {
val dir = createDirectory(root, namePrefix)
ShutdownHookManager.registerShutdownDeleteDir(dir)
dir
}
/**
* Copy all data from an InputStream to an OutputStream. NIO way of file stream to file stream
* copying is disabled by default unless explicitly set transferToEnabled as true,
* the parameter transferToEnabled should be configured by spark.file.transferTo = [true|false].
*/
def copyStream(
in: InputStream,
out: OutputStream,
closeStreams: Boolean = false,
transferToEnabled: Boolean = false): Long = {
tryWithSafeFinally {
if (in.isInstanceOf[FileInputStream] && out.isInstanceOf[FileOutputStream]
&& transferToEnabled) {
// When both streams are File stream, use transferTo to improve copy performance.
val inChannel = in.asInstanceOf[FileInputStream].getChannel()
val outChannel = out.asInstanceOf[FileOutputStream].getChannel()
val size = inChannel.size()
copyFileStreamNIO(inChannel, outChannel, 0, size)
size
} else {
var count = 0L
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
out.write(buf, 0, n)
count += n
}
}
count
}
} {
if (closeStreams) {
try {
in.close()
} finally {
out.close()
}
}
}
}
/**
* Copy the first `maxSize` bytes of data from the InputStream to an in-memory
* buffer, primarily to check for corruption.
*
* This returns a new InputStream which contains the same data as the original input stream.
* It may be entirely on in-memory buffer, or it may be a combination of in-memory data, and then
* continue to read from the original stream. The only real use of this is if the original input
* stream will potentially detect corruption while the data is being read (eg. from compression).
* This allows for an eager check of corruption in the first maxSize bytes of data.
*
* @return An InputStream which includes all data from the original stream (combining buffered
* data and remaining data in the original stream)
*/
def copyStreamUpTo(in: InputStream, maxSize: Long): InputStream = {
var count = 0L
val out = new ChunkedByteBufferOutputStream(64 * 1024, ByteBuffer.allocate)
val fullyCopied = tryWithSafeFinally {
val bufSize = Math.min(8192L, maxSize)
val buf = new Array[Byte](bufSize.toInt)
var n = 0
while (n != -1 && count < maxSize) {
n = in.read(buf, 0, Math.min(maxSize - count, bufSize).toInt)
if (n != -1) {
out.write(buf, 0, n)
count += n
}
}
count < maxSize
} {
try {
if (count < maxSize) {
in.close()
}
} finally {
out.close()
}
}
if (fullyCopied) {
out.toChunkedByteBuffer.toInputStream(dispose = true)
} else {
new SequenceInputStream( out.toChunkedByteBuffer.toInputStream(dispose = true), in)
}
}
def copyFileStreamNIO(
input: FileChannel,
output: WritableByteChannel,
startPosition: Long,
bytesToCopy: Long): Unit = {
val outputInitialState = output match {
case outputFileChannel: FileChannel =>
Some((outputFileChannel.position(), outputFileChannel))
case _ => None
}
var count = 0L
// In case transferTo method transferred less data than we have required.
while (count < bytesToCopy) {
count += input.transferTo(count + startPosition, bytesToCopy - count, output)
}
assert(count == bytesToCopy,
s"request to copy $bytesToCopy bytes, but actually copied $count bytes.")
// Check the position after transferTo loop to see if it is in the right position and
// give user information if not.
// Position will not be increased to the expected length after calling transferTo in
// kernel version 2.6.32, this issue can be seen in
// https://bugs.openjdk.java.net/browse/JDK-7052359
// This will lead to stream corruption issue when using sort-based shuffle (SPARK-3948).
outputInitialState.foreach { case (initialPos, outputFileChannel) =>
val finalPos = outputFileChannel.position()
val expectedPos = initialPos + bytesToCopy
assert(finalPos == expectedPos,
s"""
|Current position $finalPos do not equal to expected position $expectedPos
|after transferTo, please check your kernel version to see if it is 2.6.32,
|this is a kernel bug which will lead to unexpected behavior when using transferTo.
|You can set spark.file.transferTo = false to disable this NIO feature.
""".stripMargin)
}
}
/**
* A file name may contain some invalid URI characters, such as " ". This method will convert the
* file name to a raw path accepted by `java.net.URI(String)`.
*
* Note: the file name must not contain "/" or "\\"
*/
def encodeFileNameToURIRawPath(fileName: String): String = {
require(!fileName.contains("/") && !fileName.contains("\\\\"))
// `file` and `localhost` are not used. Just to prevent URI from parsing `fileName` as
// scheme or host. The prefix "/" is required because URI doesn't accept a relative path.
// We should remove it after we get the raw path.
new URI("file", null, "localhost", -1, "/" + fileName, null, null).getRawPath.substring(1)
}
/**
* Get the file name from uri's raw path and decode it. If the raw path of uri ends with "/",
* return the name before the last "/".
*/
def decodeFileNameInURI(uri: URI): String = {
val rawPath = uri.getRawPath
val rawFileName = rawPath.split("/").last
new URI("file:///" + rawFileName).getPath.substring(1)
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* If `useCache` is true, first attempts to fetch the file to a local cache that's shared
* across executors running the same application. `useCache` is used mainly for
* the executors, and not in local mode.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def fetchFile(
url: String,
targetDir: File,
conf: SparkConf,
securityMgr: SecurityManager,
hadoopConf: Configuration,
timestamp: Long,
useCache: Boolean): File = {
val fileName = decodeFileNameInURI(new URI(url))
val targetFile = new File(targetDir, fileName)
val fetchCacheEnabled = conf.getBoolean("spark.files.useFetchCache", defaultValue = true)
if (useCache && fetchCacheEnabled) {
val cachedFileName = s"${url.hashCode}${timestamp}_cache"
val lockFileName = s"${url.hashCode}${timestamp}_lock"
// Set the cachedLocalDir for the first time and re-use it later
if (cachedLocalDir.isEmpty) {
this.synchronized {
if (cachedLocalDir.isEmpty) {
cachedLocalDir = getLocalDir(conf)
}
}
}
val localDir = new File(cachedLocalDir)
val lockFile = new File(localDir, lockFileName)
val lockFileChannel = new RandomAccessFile(lockFile, "rw").getChannel()
// Only one executor entry.
// The FileLock is only used to control synchronization for executors download file,
// it's always safe regardless of lock type (mandatory or advisory).
val lock = lockFileChannel.lock()
val cachedFile = new File(localDir, cachedFileName)
try {
if (!cachedFile.exists()) {
doFetchFile(url, localDir, cachedFileName, conf, securityMgr, hadoopConf)
}
} finally {
lock.release()
lockFileChannel.close()
}
copyFile(
url,
cachedFile,
targetFile,
conf.getBoolean("spark.files.overwrite", false)
)
} else {
doFetchFile(url, targetDir, fileName, conf, securityMgr, hadoopConf)
}
// Decompress the file if it's a .tar or .tar.gz
if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) {
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xzf", fileName), targetDir)
} else if (fileName.endsWith(".tar")) {
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xf", fileName), targetDir)
}
// Make the file executable - That's necessary for scripts
FileUtil.chmod(targetFile.getAbsolutePath, "a+x")
// Windows does not grant read permission by default to non-admin users
// Add read permission to owner explicitly
if (isWindows) {
FileUtil.chmod(targetFile.getAbsolutePath, "u+r")
}
targetFile
}
/** Records the duration of running `body`. */
def timeTakenMs[T](body: => T): (T, Long) = {
val startTime = System.nanoTime()
val result = body
val endTime = System.nanoTime()
(result, math.max(NANOSECONDS.toMillis(endTime - startTime), 0))
}
/**
* Download `in` to `tempFile`, then move it to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param in InputStream to download.
* @param destFile File path to move `tempFile` to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
*/
private def downloadFile(
url: String,
in: InputStream,
destFile: File,
fileOverwrite: Boolean): Unit = {
val tempFile = File.createTempFile("fetchFileTemp", null,
new File(destFile.getParentFile.getAbsolutePath))
logInfo(s"Fetching $url to $tempFile")
try {
val out = new FileOutputStream(tempFile)
Utils.copyStream(in, out, closeStreams = true)
copyFile(url, tempFile, destFile, fileOverwrite, removeSourceFile = true)
} finally {
// Catch-all for the couple of cases where for some reason we didn't move `tempFile` to
// `destFile`.
if (tempFile.exists()) {
tempFile.delete()
}
}
}
/**
* Copy `sourceFile` to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param sourceFile File path to copy/move from.
* @param destFile File path to copy/move to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
* @param removeSourceFile Whether to remove `sourceFile` after / as part of moving/copying it to
* `destFile`.
*/
private def copyFile(
url: String,
sourceFile: File,
destFile: File,
fileOverwrite: Boolean,
removeSourceFile: Boolean = false): Unit = {
if (destFile.exists) {
if (!filesEqualRecursive(sourceFile, destFile)) {
if (fileOverwrite) {
logInfo(
s"File $destFile exists and does not match contents of $url, replacing it with $url"
)
if (!destFile.delete()) {
throw new SparkException(
"Failed to delete %s while attempting to overwrite it with %s".format(
destFile.getAbsolutePath,
sourceFile.getAbsolutePath
)
)
}
} else {
throw new SparkException(
s"File $destFile exists and does not match contents of $url")
}
} else {
// Do nothing if the file contents are the same, i.e. this file has been copied
// previously.
logInfo(
"%s has been previously copied to %s".format(
sourceFile.getAbsolutePath,
destFile.getAbsolutePath
)
)
return
}
}
// The file does not exist in the target directory. Copy or move it there.
if (removeSourceFile) {
Files.move(sourceFile.toPath, destFile.toPath)
} else {
logInfo(s"Copying ${sourceFile.getAbsolutePath} to ${destFile.getAbsolutePath}")
copyRecursive(sourceFile, destFile)
}
}
private def filesEqualRecursive(file1: File, file2: File): Boolean = {
if (file1.isDirectory && file2.isDirectory) {
val subfiles1 = file1.listFiles()
val subfiles2 = file2.listFiles()
if (subfiles1.size != subfiles2.size) {
return false
}
subfiles1.sortBy(_.getName).zip(subfiles2.sortBy(_.getName)).forall {
case (f1, f2) => filesEqualRecursive(f1, f2)
}
} else if (file1.isFile && file2.isFile) {
GFiles.equal(file1, file2)
} else {
false
}
}
private def copyRecursive(source: File, dest: File): Unit = {
if (source.isDirectory) {
if (!dest.mkdir()) {
throw new IOException(s"Failed to create directory ${dest.getPath}")
}
val subfiles = source.listFiles()
subfiles.foreach(f => copyRecursive(f, new File(dest, f.getName)))
} else {
Files.copy(source.toPath, dest.toPath)
}
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def doFetchFile(
url: String,
targetDir: File,
filename: String,
conf: SparkConf,
securityMgr: SecurityManager,
hadoopConf: Configuration): File = {
val targetFile = new File(targetDir, filename)
val uri = new URI(url)
val fileOverwrite = conf.getBoolean("spark.files.overwrite", defaultValue = false)
Option(uri.getScheme).getOrElse("file") match {
case "spark" =>
if (SparkEnv.get == null) {
throw new IllegalStateException(
"Cannot retrieve files with 'spark' scheme without an active SparkEnv.")
}
val source = SparkEnv.get.rpcEnv.openChannel(url)
val is = Channels.newInputStream(source)
downloadFile(url, is, targetFile, fileOverwrite)
case "http" | "https" | "ftp" =>
val uc = new URL(url).openConnection()
val timeoutMs =
conf.getTimeAsSeconds("spark.files.fetchTimeout", "60s").toInt * 1000
uc.setConnectTimeout(timeoutMs)
uc.setReadTimeout(timeoutMs)
uc.connect()
val in = uc.getInputStream()
downloadFile(url, in, targetFile, fileOverwrite)
case "file" =>
// In the case of a local file, copy the local file to the target directory.
// Note the difference between uri vs url.
val sourceFile = if (uri.isAbsolute) new File(uri) else new File(url)
copyFile(url, sourceFile, targetFile, fileOverwrite)
case _ =>
val fs = getHadoopFileSystem(uri, hadoopConf)
val path = new Path(uri)
fetchHcfsFile(path, targetDir, fs, conf, hadoopConf, fileOverwrite,
filename = Some(filename))
}
targetFile
}
/**
* Fetch a file or directory from a Hadoop-compatible filesystem.
*
* Visible for testing
*/
private[spark] def fetchHcfsFile(
path: Path,
targetDir: File,
fs: FileSystem,
conf: SparkConf,
hadoopConf: Configuration,
fileOverwrite: Boolean,
filename: Option[String] = None): Unit = {
if (!targetDir.exists() && !targetDir.mkdir()) {
throw new IOException(s"Failed to create directory ${targetDir.getPath}")
}
val dest = new File(targetDir, filename.getOrElse(path.getName))
if (fs.isFile(path)) {
val in = fs.open(path)
try {
downloadFile(path.toString, in, dest, fileOverwrite)
} finally {
in.close()
}
} else {
fs.listStatus(path).foreach { fileStatus =>
fetchHcfsFile(fileStatus.getPath(), dest, fs, conf, hadoopConf, fileOverwrite)
}
}
}
/**
* Validate that a given URI is actually a valid URL as well.
* @param uri The URI to validate
*/
@throws[MalformedURLException]("when the URI is an invalid URL")
def validateURL(uri: URI): Unit = {
Option(uri.getScheme).getOrElse("file") match {
case "http" | "https" | "ftp" =>
try {
uri.toURL
} catch {
case e: MalformedURLException =>
val ex = new MalformedURLException(s"URI (${uri.toString}) is not a valid URL.")
ex.initCause(e)
throw ex
}
case _ => // will not be turned into a URL anyway
}
}
/**
* Get the path of a temporary directory. Spark's local directories can be configured through
* multiple settings, which are used with the following precedence:
*
* - If called from inside of a YARN container, this will return a directory chosen by YARN.
* - If the SPARK_LOCAL_DIRS environment variable is set, this will return a directory from it.
* - Otherwise, if the spark.local.dir is set, this will return a directory from it.
* - Otherwise, this will return java.io.tmpdir.
*
* Some of these configuration options might be lists of multiple paths, but this method will
* always return a single directory. The return directory is chosen randomly from the array
* of directories it gets from getOrCreateLocalRootDirs.
*/
def getLocalDir(conf: SparkConf): String = {
val localRootDirs = getOrCreateLocalRootDirs(conf)
if (localRootDirs.isEmpty) {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
throw new IOException(
s"Failed to get a temp directory under [${configuredLocalDirs.mkString(",")}].")
} else {
localRootDirs(scala.util.Random.nextInt(localRootDirs.length))
}
}
private[spark] def isRunningInYarnContainer(conf: SparkConf): Boolean = {
// These environment variables are set by YARN.
conf.getenv("CONTAINER_ID") != null
}
/**
* Gets or creates the directories listed in spark.local.dir or SPARK_LOCAL_DIRS,
* and returns only the directories that exist / could be created.
*
* If no directories could be created, this will return an empty list.
*
* This method will cache the local directories for the application when it's first invoked.
* So calling it multiple times with a different configuration will always return the same
* set of directories.
*/
private[spark] def getOrCreateLocalRootDirs(conf: SparkConf): Array[String] = {
if (localRootDirs == null) {
this.synchronized {
if (localRootDirs == null) {
localRootDirs = getOrCreateLocalRootDirsImpl(conf)
}
}
}
localRootDirs
}
/**
* Return the configured local directories where Spark can write files. This
* method does not create any directories on its own, it only encapsulates the
* logic of locating the local directories according to deployment mode.
*/
def getConfiguredLocalDirs(conf: SparkConf): Array[String] = {
val shuffleServiceEnabled = conf.get(config.SHUFFLE_SERVICE_ENABLED)
if (isRunningInYarnContainer(conf)) {
// If we are in yarn mode, systems can have different disk layouts so we must set it
// to what Yarn on this system said was available. Note this assumes that Yarn has
// created the directories already, and that they are secured so that only the
// user has access to them.
randomizeInPlace(getYarnLocalDirs(conf).split(","))
} else if (conf.getenv("SPARK_EXECUTOR_DIRS") != null) {
conf.getenv("SPARK_EXECUTOR_DIRS").split(File.pathSeparator)
} else if (conf.getenv("SPARK_LOCAL_DIRS") != null) {
conf.getenv("SPARK_LOCAL_DIRS").split(",")
} else if (conf.getenv("MESOS_SANDBOX") != null && !shuffleServiceEnabled) {
// Mesos already creates a directory per Mesos task. Spark should use that directory
// instead so all temporary files are automatically cleaned up when the Mesos task ends.
// Note that we don't want this if the shuffle service is enabled because we want to
// continue to serve shuffle files after the executors that wrote them have already exited.
Array(conf.getenv("MESOS_SANDBOX"))
} else {
if (conf.getenv("MESOS_SANDBOX") != null && shuffleServiceEnabled) {
logInfo("MESOS_SANDBOX available but not using provided Mesos sandbox because " +
s"${config.SHUFFLE_SERVICE_ENABLED.key} is enabled.")
}
// In non-Yarn mode (or for the driver in yarn-client mode), we cannot trust the user
// configuration to point to a secure directory. So create a subdirectory with restricted
// permissions under each listed directory.
conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")).split(",")
}
}
private def getOrCreateLocalRootDirsImpl(conf: SparkConf): Array[String] = {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
val uris = configuredLocalDirs.filter { root =>
// Here, we guess if the given value is a URI at its best - check if scheme is set.
Try(new URI(root).getScheme != null).getOrElse(false)
}
if (uris.nonEmpty) {
logWarning(
"The configured local directories are not expected to be URIs; however, got suspicious " +
s"values [${uris.mkString(", ")}]. Please check your configured local directories.")
}
configuredLocalDirs.flatMap { root =>
try {
val rootDir = new File(root)
if (rootDir.exists || rootDir.mkdirs()) {
val dir = createTempDir(root)
chmod700(dir)
Some(dir.getAbsolutePath)
} else {
logError(s"Failed to create dir in $root. Ignoring this directory.")
None
}
} catch {
case e: IOException =>
logError(s"Failed to create local root dir in $root. Ignoring this directory.")
None
}
}
}
/** Get the Yarn approved local directories. */
private def getYarnLocalDirs(conf: SparkConf): String = {
val localDirs = Option(conf.getenv("LOCAL_DIRS")).getOrElse("")
if (localDirs.isEmpty) {
throw new Exception("Yarn Local dirs can't be empty")
}
localDirs
}
/** Used by unit tests. Do not call from other places. */
private[spark] def clearLocalRootDirs(): Unit = {
localRootDirs = null
}
/**
* Shuffle the elements of a collection into a random order, returning the
* result in a new collection. Unlike scala.util.Random.shuffle, this method
* uses a local random number generator, avoiding inter-thread contention.
*/
def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
randomizeInPlace(seq.toArray)
}
/**
* Shuffle the elements of an array into a random order, modifying the
* original array. Returns the original array.
*/
def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
for (i <- (arr.length - 1) to 1 by -1) {
val j = rand.nextInt(i + 1)
val tmp = arr(j)
arr(j) = arr(i)
arr(i) = tmp
}
arr
}
/**
* Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
* Note, this is typically not used from within core spark.
*/
private lazy val localIpAddress: InetAddress = findLocalInetAddress()
private def findLocalInetAddress(): InetAddress = {
val defaultIpOverride = System.getenv("SPARK_LOCAL_IP")
if (defaultIpOverride != null) {
InetAddress.getByName(defaultIpOverride)
} else {
val address = InetAddress.getLocalHost
if (address.isLoopbackAddress) {
// Address resolves to something like 127.0.1.1, which happens on Debian; try to find
// a better address using the local network interfaces
// getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
// on unix-like system. On windows, it returns in index order.
// It's more proper to pick ip address following system output order.
val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq
val reOrderedNetworkIFs = if (isWindows) activeNetworkIFs else activeNetworkIFs.reverse
for (ni <- reOrderedNetworkIFs) {
val addresses = ni.getInetAddresses.asScala
.filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
if (addresses.nonEmpty) {
val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
// because of Inet6Address.toHostName may add interface at the end if it knows about it
val strippedAddress = InetAddress.getByAddress(addr.getAddress)
// We've found an address that looks reasonable!
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + "; using " +
strippedAddress.getHostAddress + " instead (on interface " + ni.getName + ")")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
return strippedAddress
}
}
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + ", but we couldn't find any" +
" external IP address!")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
}
address
}
}
private var customHostname: Option[String] = sys.env.get("SPARK_LOCAL_HOSTNAME")
/**
* Allow setting a custom host name because when we run on Mesos we need to use the same
* hostname it reports to the master.
*/
def setCustomHostname(hostname: String) {
// DEBUG code
Utils.checkHost(hostname)
customHostname = Some(hostname)
}
/**
* Get the local machine's FQDN.
*/
def localCanonicalHostName(): String = {
customHostname.getOrElse(localIpAddress.getCanonicalHostName)
}
/**
* Get the local machine's hostname.
*/
def localHostName(): String = {
customHostname.getOrElse(localIpAddress.getHostAddress)
}
/**
* Get the local machine's URI.
*/
def localHostNameForURI(): String = {
customHostname.getOrElse(InetAddresses.toUriString(localIpAddress))
}
def checkHost(host: String) {
assert(host != null && host.indexOf(':') == -1, s"Expected hostname (not IP) but got $host")
}
def checkHostPort(hostPort: String) {
assert(hostPort != null && hostPort.indexOf(':') != -1,
s"Expected host and port but got $hostPort")
}
// Typically, this will be of order of number of nodes in cluster
// If not, we should change it to LRUCache or something.
private val hostPortParseResults = new ConcurrentHashMap[String, (String, Int)]()
def parseHostPort(hostPort: String): (String, Int) = {
// Check cache first.
val cached = hostPortParseResults.get(hostPort)
if (cached != null) {
return cached
}
val indx: Int = hostPort.lastIndexOf(':')
// This is potentially broken - when dealing with ipv6 addresses for example, sigh ...
// but then hadoop does not support ipv6 right now.
// For now, we assume that if port exists, then it is valid - not check if it is an int > 0
if (-1 == indx) {
val retval = (hostPort, 0)
hostPortParseResults.put(hostPort, retval)
return retval
}
val retval = (hostPort.substring(0, indx).trim(), hostPort.substring(indx + 1).trim().toInt)
hostPortParseResults.putIfAbsent(hostPort, retval)
hostPortParseResults.get(hostPort)
}
/**
* Return the string to tell how long has passed in milliseconds.
* @param startTimeNs - a timestamp in nanoseconds returned by `System.nanoTime`.
*/
def getUsedTimeNs(startTimeNs: Long): String = {
s"${TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNs)} ms"
}
/**
* Delete a file or directory and its contents recursively.
* Don't follow directories if they are symlinks.
* Throws an exception if deletion is unsuccessful.
*/
def deleteRecursively(file: File): Unit = {
if (file != null) {
JavaUtils.deleteRecursively(file)
ShutdownHookManager.removeShutdownDeleteDir(file)
}
}
/**
* Determines if a directory contains any files newer than cutoff seconds.
*
* @param dir must be the path to a directory, or IllegalArgumentException is thrown
* @param cutoff measured in seconds. Returns true if there are any files or directories in the
* given directory whose last modified time is later than this many seconds ago
*/
def doesDirectoryContainAnyNewFiles(dir: File, cutoff: Long): Boolean = {
if (!dir.isDirectory) {
throw new IllegalArgumentException(s"$dir is not a directory!")
}
val filesAndDirs = dir.listFiles()
val cutoffTimeInMillis = System.currentTimeMillis - (cutoff * 1000)
filesAndDirs.exists(_.lastModified() > cutoffTimeInMillis) ||
filesAndDirs.filter(_.isDirectory).exists(
subdir => doesDirectoryContainAnyNewFiles(subdir, cutoff)
)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to milliseconds for internal use. If
* no suffix is provided, the passed number is assumed to be in ms.
*/
def timeStringAsMs(str: String): Long = {
JavaUtils.timeStringAsMs(str)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If
* no suffix is provided, the passed number is assumed to be in seconds.
*/
def timeStringAsSeconds(str: String): Long = {
JavaUtils.timeStringAsSec(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in bytes.
*/
def byteStringAsBytes(str: String): Long = {
JavaUtils.byteStringAsBytes(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in kibibytes.
*/
def byteStringAsKb(str: String): Long = {
JavaUtils.byteStringAsKb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in mebibytes.
*/
def byteStringAsMb(str: String): Long = {
JavaUtils.byteStringAsMb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m, 500g) to gibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in gibibytes.
*/
def byteStringAsGb(str: String): Long = {
JavaUtils.byteStringAsGb(str)
}
/**
* Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of mebibytes.
*/
def memoryStringToMb(str: String): Int = {
// Convert to bytes, rather than directly to MiB, because when no units are specified the unit
// is assumed to be bytes
(JavaUtils.byteStringAsBytes(str) / 1024 / 1024).toInt
}
/**
* Convert a quantity in bytes to a human-readable string such as "4.0 MiB".
*/
def bytesToString(size: Long): String = bytesToString(BigInt(size))
def bytesToString(size: BigInt): String = {
val EiB = 1L << 60
val PiB = 1L << 50
val TiB = 1L << 40
val GiB = 1L << 30
val MiB = 1L << 20
val KiB = 1L << 10
if (size >= BigInt(1L << 11) * EiB) {
// The number is too large, show it in scientific notation.
BigDecimal(size, new MathContext(3, RoundingMode.HALF_UP)).toString() + " B"
} else {
val (value, unit) = {
if (size >= 2 * EiB) {
(BigDecimal(size) / EiB, "EiB")
} else if (size >= 2 * PiB) {
(BigDecimal(size) / PiB, "PiB")
} else if (size >= 2 * TiB) {
(BigDecimal(size) / TiB, "TiB")
} else if (size >= 2 * GiB) {
(BigDecimal(size) / GiB, "GiB")
} else if (size >= 2 * MiB) {
(BigDecimal(size) / MiB, "MiB")
} else if (size >= 2 * KiB) {
(BigDecimal(size) / KiB, "KiB")
} else {
(BigDecimal(size), "B")
}
}
"%.1f %s".formatLocal(Locale.US, value, unit)
}
}
/**
* Returns a human-readable string representing a duration such as "35ms"
*/
def msDurationToString(ms: Long): String = {
val second = 1000
val minute = 60 * second
val hour = 60 * minute
val locale = Locale.US
ms match {
case t if t < second =>
"%d ms".formatLocal(locale, t)
case t if t < minute =>
"%.1f s".formatLocal(locale, t.toFloat / second)
case t if t < hour =>
"%.1f m".formatLocal(locale, t.toFloat / minute)
case t =>
"%.2f h".formatLocal(locale, t.toFloat / hour)
}
}
/**
* Convert a quantity in megabytes to a human-readable string such as "4.0 MiB".
*/
def megabytesToString(megabytes: Long): String = {
bytesToString(megabytes * 1024L * 1024L)
}
/**
* Execute a command and return the process running the command.
*/
def executeCommand(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): Process = {
val builder = new ProcessBuilder(command: _*).directory(workingDir)
val environment = builder.environment()
for ((key, value) <- extraEnvironment) {
environment.put(key, value)
}
val process = builder.start()
if (redirectStderr) {
val threadName = "redirect stderr for command " + command(0)
def log(s: String): Unit = logInfo(s)
processStreamByLine(threadName, process.getErrorStream, log)
}
process
}
/**
* Execute a command and get its output, throwing an exception if it yields a code other than 0.
*/
def executeAndGetOutput(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): String = {
val process = executeCommand(command, workingDir, extraEnvironment, redirectStderr)
val output = new StringBuilder
val threadName = "read stdout for " + command(0)
def appendToOutput(s: String): Unit = output.append(s).append("\\n")
val stdoutThread = processStreamByLine(threadName, process.getInputStream, appendToOutput)
val exitCode = process.waitFor()
stdoutThread.join() // Wait for it to finish reading output
if (exitCode != 0) {
logError(s"Process $command exited with code $exitCode: $output")
throw new SparkException(s"Process $command exited with code $exitCode")
}
output.toString
}
/**
* Return and start a daemon thread that processes the content of the input stream line by line.
*/
def processStreamByLine(
threadName: String,
inputStream: InputStream,
processLine: String => Unit): Thread = {
val t = new Thread(threadName) {
override def run() {
for (line <- Source.fromInputStream(inputStream).getLines()) {
processLine(line)
}
}
}
t.setDaemon(true)
t.start()
t
}
/**
* Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
* default UncaughtExceptionHandler
*
* NOTE: This method is to be called by the spark-started JVM process.
*/
def tryOrExit(block: => Unit) {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable => sparkUncaughtExceptionHandler.uncaughtException(t)
}
}
/**
* Execute a block of code that evaluates to Unit, stop SparkContext if there is any uncaught
* exception
*
* NOTE: This method is to be called by the driver-side components to avoid stopping the
* user-started JVM process completely; in contrast, tryOrExit is to be called in the
* spark-started JVM process .
*/
def tryOrStopSparkContext(sc: SparkContext)(block: => Unit) {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable =>
val currentThreadName = Thread.currentThread().getName
if (sc != null) {
logError(s"uncaught error in thread $currentThreadName, stopping SparkContext", t)
sc.stopInNewThread()
}
if (!NonFatal(t)) {
logError(s"throw uncaught fatal error in thread $currentThreadName", t)
throw t
}
}
}
/**
* Execute a block of code that returns a value, re-throwing any non-fatal uncaught
* exceptions as IOException. This is used when implementing Externalizable and Serializable's
* read and write methods, since Java's serializer will not report non-IOExceptions properly;
* see SPARK-4080 for more context.
*/
def tryOrIOException[T](block: => T): T = {
try {
block
} catch {
case e: IOException =>
logError("Exception encountered", e)
throw e
case NonFatal(e) =>
logError("Exception encountered", e)
throw new IOException(e)
}
}
/** Executes the given block. Log non-fatal errors if any, and only throw fatal errors */
def tryLogNonFatalError(block: => Unit) {
try {
block
} catch {
case NonFatal(t) =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
}
}
/**
* Execute a block of code, then a finally block, but if exceptions happen in
* the finally block, do not suppress the original exception.
*
* This is primarily an issue with `finally { out.close() }` blocks, where
* close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinally[T](block: => T)(finallyBlock: => Unit): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case t: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = t
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
/**
* Execute a block of code and call the failure callbacks in the catch block. If exceptions occur
* in either the catch or the finally block, they are appended to the list of suppressed
* exceptions in original exception which is then rethrown.
*
* This is primarily an issue with `catch { abort() }` or `finally { out.close() }` blocks,
* where the abort/close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `abort` or `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinallyAndFailureCallbacks[T](block: => T)
(catchBlock: => Unit = (), finallyBlock: => Unit = ()): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case cause: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = cause
try {
logError("Aborting task", originalThrowable)
if (TaskContext.get() != null) {
TaskContext.get().markTaskFailed(originalThrowable)
}
catchBlock
} catch {
case t: Throwable =>
if (originalThrowable != t) {
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in catch: ${t.getMessage}", t)
}
}
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
// A regular expression to match classes of the internal Spark API's
// that we want to skip when finding the call site of a method.
private val SPARK_CORE_CLASS_REGEX =
"""^org\\.apache\\.spark(\\.api\\.java)?(\\.util)?(\\.rdd)?(\\.broadcast)?\\.[A-Z]""".r
private val SPARK_SQL_CLASS_REGEX = """^org\\.apache\\.spark\\.sql.*""".r
/** Default filtering function for finding call sites using `getCallSite`. */
private def sparkInternalExclusionFunction(className: String): Boolean = {
val SCALA_CORE_CLASS_PREFIX = "scala"
val isSparkClass = SPARK_CORE_CLASS_REGEX.findFirstIn(className).isDefined ||
SPARK_SQL_CLASS_REGEX.findFirstIn(className).isDefined
val isScalaClass = className.startsWith(SCALA_CORE_CLASS_PREFIX)
// If the class is a Spark internal class or a Scala class, then exclude.
isSparkClass || isScalaClass
}
/**
* When called inside a class in the spark package, returns the name of the user code class
* (outside the spark package) that called into Spark, as well as which Spark method they called.
* This is used, for example, to tell users where in their code each RDD got created.
*
* @param skipClass Function that is used to exclude non-user-code classes.
*/
def getCallSite(skipClass: String => Boolean = sparkInternalExclusionFunction): CallSite = {
// Keep crawling up the stack trace until we find the first function not inside of the spark
// package. We track the last (shallowest) contiguous Spark method. This might be an RDD
// transformation, a SparkContext function (such as parallelize), or anything else that leads
// to instantiation of an RDD. We also track the first (deepest) user method, file, and line.
var lastSparkMethod = "<unknown>"
var firstUserFile = "<unknown>"
var firstUserLine = 0
var insideSpark = true
val callStack = new ArrayBuffer[String]() :+ "<unknown>"
Thread.currentThread.getStackTrace().foreach { ste: StackTraceElement =>
// When running under some profilers, the current stack trace might contain some bogus
// frames. This is intended to ensure that we don't crash in these situations by
// ignoring any frames that we can't examine.
if (ste != null && ste.getMethodName != null
&& !ste.getMethodName.contains("getStackTrace")) {
if (insideSpark) {
if (skipClass(ste.getClassName)) {
lastSparkMethod = if (ste.getMethodName == "<init>") {
// Spark method is a constructor; get its class name
ste.getClassName.substring(ste.getClassName.lastIndexOf('.') + 1)
} else {
ste.getMethodName
}
callStack(0) = ste.toString // Put last Spark method on top of the stack trace.
} else {
if (ste.getFileName != null) {
firstUserFile = ste.getFileName
if (ste.getLineNumber >= 0) {
firstUserLine = ste.getLineNumber
}
}
callStack += ste.toString
insideSpark = false
}
} else {
callStack += ste.toString
}
}
}
val callStackDepth = System.getProperty("spark.callstack.depth", "20").toInt
val shortForm =
if (firstUserFile == "HiveSessionImpl.java") {
// To be more user friendly, show a nicer string for queries submitted from the JDBC
// server.
"Spark JDBC Server Query"
} else {
s"$lastSparkMethod at $firstUserFile:$firstUserLine"
}
val longForm = callStack.take(callStackDepth).mkString("\\n")
CallSite(shortForm, longForm)
}
private var compressedLogFileLengthCache: LoadingCache[String, java.lang.Long] = null
private def getCompressedLogFileLengthCache(
sparkConf: SparkConf): LoadingCache[String, java.lang.Long] = this.synchronized {
if (compressedLogFileLengthCache == null) {
val compressedLogFileLengthCacheSize = sparkConf.get(
UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF)
compressedLogFileLengthCache = CacheBuilder.newBuilder()
.maximumSize(compressedLogFileLengthCacheSize)
.build[String, java.lang.Long](new CacheLoader[String, java.lang.Long]() {
override def load(path: String): java.lang.Long = {
Utils.getCompressedFileLength(new File(path))
}
})
}
compressedLogFileLengthCache
}
/**
* Return the file length, if the file is compressed it returns the uncompressed file length.
* It also caches the uncompressed file size to avoid repeated decompression. The cache size is
* read from workerConf.
*/
def getFileLength(file: File, workConf: SparkConf): Long = {
if (file.getName.endsWith(".gz")) {
getCompressedLogFileLengthCache(workConf).get(file.getAbsolutePath)
} else {
file.length
}
}
/** Return uncompressed file length of a compressed file. */
private def getCompressedFileLength(file: File): Long = {
var gzInputStream: GZIPInputStream = null
try {
// Uncompress .gz file to determine file size.
var fileSize = 0L
gzInputStream = new GZIPInputStream(new FileInputStream(file))
val bufSize = 1024
val buf = new Array[Byte](bufSize)
var numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
while (numBytes > 0) {
fileSize += numBytes
numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
}
fileSize
} catch {
case e: Throwable =>
logError(s"Cannot get file length of ${file}", e)
throw e
} finally {
if (gzInputStream != null) {
gzInputStream.close()
}
}
}
/** Return a string containing part of a file from byte 'start' to 'end'. */
def offsetBytes(path: String, length: Long, start: Long, end: Long): String = {
val file = new File(path)
val effectiveEnd = math.min(length, end)
val effectiveStart = math.max(0, start)
val buff = new Array[Byte]((effectiveEnd-effectiveStart).toInt)
val stream = if (path.endsWith(".gz")) {
new GZIPInputStream(new FileInputStream(file))
} else {
new FileInputStream(file)
}
try {
ByteStreams.skipFully(stream, effectiveStart)
ByteStreams.readFully(stream, buff)
} finally {
stream.close()
}
Source.fromBytes(buff).mkString
}
/**
* Return a string containing data across a set of files. The `startIndex`
* and `endIndex` is based on the cumulative size of all the files take in
* the given order. See figure below for more details.
*/
def offsetBytes(files: Seq[File], fileLengths: Seq[Long], start: Long, end: Long): String = {
assert(files.length == fileLengths.length)
val startIndex = math.max(start, 0)
val endIndex = math.min(end, fileLengths.sum)
val fileToLength = files.zip(fileLengths).toMap
logDebug("Log files: \\n" + fileToLength.mkString("\\n"))
val stringBuffer = new StringBuffer((endIndex - startIndex).toInt)
var sum = 0L
files.zip(fileLengths).foreach { case (file, fileLength) =>
val startIndexOfFile = sum
val endIndexOfFile = sum + fileToLength(file)
logDebug(s"Processing file $file, " +
s"with start index = $startIndexOfFile, end index = $endIndex")
/*
____________
range 1: | |
| case A |
files: |==== file 1 ====|====== file 2 ======|===== file 3 =====|
| case B . case C . case D |
range 2: |___________.____________________.______________|
*/
if (startIndex <= startIndexOfFile && endIndex >= endIndexOfFile) {
// Case C: read the whole file
stringBuffer.append(offsetBytes(file.getAbsolutePath, fileLength, 0, fileToLength(file)))
} else if (startIndex > startIndexOfFile && startIndex < endIndexOfFile) {
// Case A and B: read from [start of required range] to [end of file / end of range]
val effectiveStartIndex = startIndex - startIndexOfFile
val effectiveEndIndex = math.min(endIndex - startIndexOfFile, fileToLength(file))
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
} else if (endIndex > startIndexOfFile && endIndex < endIndexOfFile) {
// Case D: read from [start of file] to [end of require range]
val effectiveStartIndex = math.max(startIndex - startIndexOfFile, 0)
val effectiveEndIndex = endIndex - startIndexOfFile
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
}
sum += fileToLength(file)
logDebug(s"After processing file $file, string built is ${stringBuffer.toString}")
}
stringBuffer.toString
}
/**
* Clone an object using a Spark serializer.
*/
def clone[T: ClassTag](value: T, serializer: SerializerInstance): T = {
serializer.deserialize[T](serializer.serialize(value))
}
private def isSpace(c: Char): Boolean = {
" \\t\\r\\n".indexOf(c) != -1
}
/**
* Split a string of potentially quoted arguments from the command line the way that a shell
* would do it to determine arguments to a command. For example, if the string is 'a "b c" d',
* then it would be parsed as three arguments: 'a', 'b c' and 'd'.
*/
def splitCommandString(s: String): Seq[String] = {
val buf = new ArrayBuffer[String]
var inWord = false
var inSingleQuote = false
var inDoubleQuote = false
val curWord = new StringBuilder
def endWord() {
buf += curWord.toString
curWord.clear()
}
var i = 0
while (i < s.length) {
val nextChar = s.charAt(i)
if (inDoubleQuote) {
if (nextChar == '"') {
inDoubleQuote = false
} else if (nextChar == '\\\\') {
if (i < s.length - 1) {
// Append the next character directly, because only " and \\ may be escaped in
// double quotes after the shell's own expansion
curWord.append(s.charAt(i + 1))
i += 1
}
} else {
curWord.append(nextChar)
}
} else if (inSingleQuote) {
if (nextChar == '\\'') {
inSingleQuote = false
} else {
curWord.append(nextChar)
}
// Backslashes are not treated specially in single quotes
} else if (nextChar == '"') {
inWord = true
inDoubleQuote = true
} else if (nextChar == '\\'') {
inWord = true
inSingleQuote = true
} else if (!isSpace(nextChar)) {
curWord.append(nextChar)
inWord = true
} else if (inWord && isSpace(nextChar)) {
endWord()
inWord = false
}
i += 1
}
if (inWord || inDoubleQuote || inSingleQuote) {
endWord()
}
buf
}
/* Calculates 'x' modulo 'mod', takes to consideration sign of x,
* i.e. if 'x' is negative, than 'x' % 'mod' is negative too
* so function return (x % mod) + mod in that case.
*/
def nonNegativeMod(x: Int, mod: Int): Int = {
val rawMod = x % mod
rawMod + (if (rawMod < 0) mod else 0)
}
// Handles idiosyncrasies with hash (add more as required)
// This method should be kept in sync with
// org.apache.spark.network.util.JavaUtils#nonNegativeHash().
def nonNegativeHash(obj: AnyRef): Int = {
// Required ?
if (obj eq null) return 0
val hash = obj.hashCode
// math.abs fails for Int.MinValue
val hashAbs = if (Int.MinValue != hash) math.abs(hash) else 0
// Nothing else to guard against ?
hashAbs
}
/**
* NaN-safe version of `java.lang.Double.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN double.
*/
def nanSafeCompareDoubles(x: Double, y: Double): Int = {
val xIsNan: Boolean = java.lang.Double.isNaN(x)
val yIsNan: Boolean = java.lang.Double.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* NaN-safe version of `java.lang.Float.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN float.
*/
def nanSafeCompareFloats(x: Float, y: Float): Int = {
val xIsNan: Boolean = java.lang.Float.isNaN(x)
val yIsNan: Boolean = java.lang.Float.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* Returns the system properties map that is thread-safe to iterator over. It gets the
* properties which have been set explicitly, as well as those for which only a default value
* has been defined.
*/
def getSystemProperties: Map[String, String] = {
System.getProperties.stringPropertyNames().asScala
.map(key => (key, System.getProperty(key))).toMap
}
/**
* Method executed for repeating a task for side effects.
* Unlike a for comprehension, it permits JVM JIT optimization
*/
def times(numIters: Int)(f: => Unit): Unit = {
var i = 0
while (i < numIters) {
f
i += 1
}
}
/**
* Timing method based on iterations that permit JVM JIT optimization.
*
* @param numIters number of iterations
* @param f function to be executed. If prepare is not None, the running time of each call to f
* must be an order of magnitude longer than one nanosecond for accurate timing.
* @param prepare function to be executed before each call to f. Its running time doesn't count.
* @return the total time across all iterations (not counting preparation time) in nanoseconds.
*/
def timeIt(numIters: Int)(f: => Unit, prepare: Option[() => Unit] = None): Long = {
if (prepare.isEmpty) {
val startNs = System.nanoTime()
times(numIters)(f)
System.nanoTime() - startNs
} else {
var i = 0
var sum = 0L
while (i < numIters) {
prepare.get.apply()
val startNs = System.nanoTime()
f
sum += System.nanoTime() - startNs
i += 1
}
sum
}
}
/**
* Counts the number of elements of an iterator using a while loop rather than calling
* [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower
* in the current version of Scala.
*/
def getIteratorSize(iterator: Iterator[_]): Long = {
var count = 0L
while (iterator.hasNext) {
count += 1L
iterator.next()
}
count
}
/**
* Generate a zipWithIndex iterator, avoid index value overflowing problem
* in scala's zipWithIndex
*/
def getIteratorZipWithIndex[T](iterator: Iterator[T], startIndex: Long): Iterator[(T, Long)] = {
new Iterator[(T, Long)] {
require(startIndex >= 0, "startIndex should be >= 0.")
var index: Long = startIndex - 1L
def hasNext: Boolean = iterator.hasNext
def next(): (T, Long) = {
index += 1L
(iterator.next(), index)
}
}
}
/**
* Creates a symlink.
*
* @param src absolute path to the source
* @param dst relative path for the destination
*/
def symlink(src: File, dst: File): Unit = {
if (!src.isAbsolute()) {
throw new IOException("Source must be absolute")
}
if (dst.isAbsolute()) {
throw new IOException("Destination must be relative")
}
Files.createSymbolicLink(dst.toPath, src.toPath)
}
/** Return the class name of the given object, removing all dollar signs */
def getFormattedClassName(obj: AnyRef): String = {
getSimpleName(obj.getClass).replace("$", "")
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: URI, conf: Configuration): FileSystem = {
FileSystem.get(path, conf)
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: String, conf: Configuration): FileSystem = {
getHadoopFileSystem(new URI(path), conf)
}
/**
* Whether the underlying operating system is Windows.
*/
val isWindows = SystemUtils.IS_OS_WINDOWS
/**
* Whether the underlying operating system is Mac OS X.
*/
val isMac = SystemUtils.IS_OS_MAC_OSX
/**
* Pattern for matching a Windows drive, which contains only a single alphabet character.
*/
val windowsDrive = "([a-zA-Z])".r
/**
* Indicates whether Spark is currently running unit tests.
*/
def isTesting: Boolean = {
sys.env.contains("SPARK_TESTING") || sys.props.contains(IS_TESTING.key)
}
/**
* Terminates a process waiting for at most the specified duration.
*
* @return the process exit value if it was successfully terminated, else None
*/
def terminateProcess(process: Process, timeoutMs: Long): Option[Int] = {
// Politely destroy first
process.destroy()
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
// Successful exit
Option(process.exitValue())
} else {
try {
process.destroyForcibly()
} catch {
case NonFatal(e) => logWarning("Exception when attempting to kill process", e)
}
// Wait, again, although this really should return almost immediately
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
Option(process.exitValue())
} else {
logWarning("Timed out waiting to forcibly kill process")
None
}
}
}
/**
* Return the stderr of a process after waiting for the process to terminate.
* If the process does not terminate within the specified timeout, return None.
*/
def getStderr(process: Process, timeoutMs: Long): Option[String] = {
val terminated = process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)
if (terminated) {
Some(Source.fromInputStream(process.getErrorStream).getLines().mkString("\\n"))
} else {
None
}
}
/**
* Execute the given block, logging and re-throwing any uncaught exception.
* This is particularly useful for wrapping code that runs in a thread, to ensure
* that exceptions are printed, and to avoid having to catch Throwable.
*/
def logUncaughtExceptions[T](f: => T): T = {
try {
f
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
throw t
}
}
/** Executes the given block in a Try, logging any uncaught exceptions. */
def tryLog[T](f: => T): Try[T] = {
try {
val res = f
scala.util.Success(res)
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
scala.util.Failure(t)
}
}
/** Returns true if the given exception was fatal. See docs for scala.util.control.NonFatal. */
def isFatalError(e: Throwable): Boolean = {
e match {
case NonFatal(_) |
_: InterruptedException |
_: NotImplementedError |
_: ControlThrowable |
_: LinkageError =>
false
case _ =>
true
}
}
/**
* Return a well-formed URI for the file described by a user input string.
*
* If the supplied path does not contain a scheme, or is a relative path, it will be
* converted into an absolute path with a file:// scheme.
*/
def resolveURI(path: String): URI = {
try {
val uri = new URI(path)
if (uri.getScheme() != null) {
return uri
}
// make sure to handle if the path has a fragment (applies to yarn
// distributed cache)
if (uri.getFragment() != null) {
val absoluteURI = new File(uri.getPath()).getAbsoluteFile().toURI()
return new URI(absoluteURI.getScheme(), absoluteURI.getHost(), absoluteURI.getPath(),
uri.getFragment())
}
} catch {
case e: URISyntaxException =>
}
new File(path).getAbsoluteFile().toURI()
}
/** Resolve a comma-separated list of paths. */
def resolveURIs(paths: String): String = {
if (paths == null || paths.trim.isEmpty) {
""
} else {
paths.split(",").filter(_.trim.nonEmpty).map { p => Utils.resolveURI(p) }.mkString(",")
}
}
/** Return all non-local paths from a comma-separated list of paths. */
def nonLocalPaths(paths: String, testWindows: Boolean = false): Array[String] = {
val windows = isWindows || testWindows
if (paths == null || paths.trim.isEmpty) {
Array.empty
} else {
paths.split(",").filter { p =>
val uri = resolveURI(p)
Option(uri.getScheme).getOrElse("file") match {
case windowsDrive(d) if windows => false
case "local" | "file" => false
case _ => true
}
}
}
}
/**
* Load default Spark properties from the given file. If no file is provided,
* use the common defaults file. This mutates state in the given SparkConf and
* in this JVM's system properties if the config specified in the file is not
* already set. Return the path of the properties file used.
*/
def loadDefaultSparkProperties(conf: SparkConf, filePath: String = null): String = {
val path = Option(filePath).getOrElse(getDefaultPropertiesFile())
Option(path).foreach { confFile =>
getPropertiesFromFile(confFile).filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.setIfMissing(k, v)
sys.props.getOrElseUpdate(k, v)
}
}
path
}
/**
* Updates Spark config with properties from a set of Properties.
* Provided properties have the highest priority.
*/
def updateSparkConfigFromProperties(
conf: SparkConf,
properties: Map[String, String]) : Unit = {
properties.filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.set(k, v)
}
}
/**
* Implements the same logic as JDK `java.lang.String#trim` by removing leading and trailing
* non-printable characters less or equal to '\\u0020' (SPACE) but preserves natural line
* delimiters according to [[java.util.Properties]] load method. The natural line delimiters are
* removed by JDK during load. Therefore any remaining ones have been specifically provided and
* escaped by the user, and must not be ignored
*
* @param str
* @return the trimmed value of str
*/
private[util] def trimExceptCRLF(str: String): String = {
val nonSpaceOrNaturalLineDelimiter: Char => Boolean = { ch =>
ch > ' ' || ch == '\\r' || ch == '\\n'
}
val firstPos = str.indexWhere(nonSpaceOrNaturalLineDelimiter)
val lastPos = str.lastIndexWhere(nonSpaceOrNaturalLineDelimiter)
if (firstPos >= 0 && lastPos >= 0) {
str.substring(firstPos, lastPos + 1)
} else {
""
}
}
/** Load properties present in the given file. */
def getPropertiesFromFile(filename: String): Map[String, String] = {
val file = new File(filename)
require(file.exists(), s"Properties file $file does not exist")
require(file.isFile(), s"Properties file $file is not a normal file")
val inReader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)
try {
val properties = new Properties()
properties.load(inReader)
properties.stringPropertyNames().asScala
.map { k => (k, trimExceptCRLF(properties.getProperty(k))) }
.toMap
} catch {
case e: IOException =>
throw new SparkException(s"Failed when loading Spark properties from $filename", e)
} finally {
inReader.close()
}
}
/** Return the path of the default Spark properties file. */
def getDefaultPropertiesFile(env: Map[String, String] = sys.env): String = {
env.get("SPARK_CONF_DIR")
.orElse(env.get("SPARK_HOME").map { t => s"$t${File.separator}conf" })
.map { t => new File(s"$t${File.separator}spark-defaults.conf")}
.filter(_.isFile)
.map(_.getAbsolutePath)
.orNull
}
/**
* Return a nice string representation of the exception. It will call "printStackTrace" to
* recursively generate the stack trace including the exception and its causes.
*/
def exceptionString(e: Throwable): String = {
if (e == null) {
""
} else {
// Use e.printStackTrace here because e.getStackTrace doesn't include the cause
val stringWriter = new StringWriter()
e.printStackTrace(new PrintWriter(stringWriter))
stringWriter.toString
}
}
private implicit class Lock(lock: LockInfo) {
def lockString: String = {
lock match {
case monitor: MonitorInfo =>
s"Monitor(${lock.getClassName}@${lock.getIdentityHashCode}})"
case _ =>
s"Lock(${lock.getClassName}@${lock.getIdentityHashCode}})"
}
}
}
/** Return a thread dump of all threads' stacktraces. Used to capture dumps for the web UI */
def getThreadDump(): Array[ThreadStackTrace] = {
// We need to filter out null values here because dumpAllThreads() may return null array
// elements for threads that are dead / don't exist.
val threadInfos = ManagementFactory.getThreadMXBean.dumpAllThreads(true, true).filter(_ != null)
threadInfos.sortWith { case (threadTrace1, threadTrace2) =>
val v1 = if (threadTrace1.getThreadName.contains("Executor task launch")) 1 else 0
val v2 = if (threadTrace2.getThreadName.contains("Executor task launch")) 1 else 0
if (v1 == v2) {
val name1 = threadTrace1.getThreadName().toLowerCase(Locale.ROOT)
val name2 = threadTrace2.getThreadName().toLowerCase(Locale.ROOT)
val nameCmpRes = name1.compareTo(name2)
if (nameCmpRes == 0) {
threadTrace1.getThreadId < threadTrace2.getThreadId
} else {
nameCmpRes < 0
}
} else {
v1 > v2
}
}.map(threadInfoToThreadStackTrace)
}
def getThreadDumpForThread(threadId: Long): Option[ThreadStackTrace] = {
if (threadId <= 0) {
None
} else {
// The Int.MaxValue here requests the entire untruncated stack trace of the thread:
val threadInfo =
Option(ManagementFactory.getThreadMXBean.getThreadInfo(threadId, Int.MaxValue))
threadInfo.map(threadInfoToThreadStackTrace)
}
}
private def threadInfoToThreadStackTrace(threadInfo: ThreadInfo): ThreadStackTrace = {
val monitors = threadInfo.getLockedMonitors.map(m => m.getLockedStackFrame -> m).toMap
val stackTrace = StackTrace(threadInfo.getStackTrace.map { frame =>
monitors.get(frame) match {
case Some(monitor) =>
monitor.getLockedStackFrame.toString + s" => holding ${monitor.lockString}"
case None =>
frame.toString
}
})
// use a set to dedup re-entrant locks that are held at multiple places
val heldLocks =
(threadInfo.getLockedSynchronizers ++ threadInfo.getLockedMonitors).map(_.lockString).toSet
ThreadStackTrace(
threadId = threadInfo.getThreadId,
threadName = threadInfo.getThreadName,
threadState = threadInfo.getThreadState,
stackTrace = stackTrace,
blockedByThreadId =
if (threadInfo.getLockOwnerId < 0) None else Some(threadInfo.getLockOwnerId),
blockedByLock = Option(threadInfo.getLockInfo).map(_.lockString).getOrElse(""),
holdingLocks = heldLocks.toSeq)
}
/**
* Convert all spark properties set in the given SparkConf to a sequence of java options.
*/
def sparkJavaOpts(conf: SparkConf, filterKey: (String => Boolean) = _ => true): Seq[String] = {
conf.getAll
.filter { case (k, _) => filterKey(k) }
.map { case (k, v) => s"-D$k=$v" }
}
/**
* Maximum number of retries when binding to a port before giving up.
*/
def portMaxRetries(conf: SparkConf): Int = {
val maxRetries = conf.getOption("spark.port.maxRetries").map(_.toInt)
if (conf.contains(IS_TESTING)) {
// Set a higher number of retries for tests...
maxRetries.getOrElse(100)
} else {
maxRetries.getOrElse(16)
}
}
/**
* Returns the user port to try when trying to bind a service. Handles wrapping and skipping
* privileged ports.
*/
def userPort(base: Int, offset: Int): Int = {
(base + offset - 1024) % (65536 - 1024) + 1024
}
/**
* Attempt to start a service on the given port, or fail after a number of attempts.
* Each subsequent attempt uses 1 + the port used in the previous attempt (unless the port is 0).
*
* @param startPort The initial port to start the service on.
* @param startService Function to start service on a given port.
* This is expected to throw java.net.BindException on port collision.
* @param conf A SparkConf used to get the maximum number of retries when binding to a port.
* @param serviceName Name of the service.
* @return (service: T, port: Int)
*/
def startServiceOnPort[T](
startPort: Int,
startService: Int => (T, Int),
conf: SparkConf,
serviceName: String = ""): (T, Int) = {
require(startPort == 0 || (1024 <= startPort && startPort < 65536),
"startPort should be between 1024 and 65535 (inclusive), or 0 for a random free port.")
val serviceString = if (serviceName.isEmpty) "" else s" '$serviceName'"
val maxRetries = portMaxRetries(conf)
for (offset <- 0 to maxRetries) {
// Do not increment port if startPort is 0, which is treated as a special port
val tryPort = if (startPort == 0) {
startPort
} else {
userPort(startPort, offset)
}
try {
val (service, port) = startService(tryPort)
logInfo(s"Successfully started service$serviceString on port $port.")
return (service, port)
} catch {
case e: Exception if isBindCollision(e) =>
if (offset >= maxRetries) {
val exceptionMessage = if (startPort == 0) {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (on a random free port)! " +
s"Consider explicitly setting the appropriate binding address for " +
s"the service$serviceString (for example ${DRIVER_BIND_ADDRESS.key} " +
s"for SparkDriver) to the correct binding address."
} else {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (starting from $startPort)! Consider explicitly setting " +
s"the appropriate port for the service$serviceString (for example spark.ui.port " +
s"for SparkUI) to an available port or increasing spark.port.maxRetries."
}
val exception = new BindException(exceptionMessage)
// restore original stack trace
exception.setStackTrace(e.getStackTrace)
throw exception
}
if (startPort == 0) {
// As startPort 0 is for a random free port, it is most possibly binding address is
// not correct.
logWarning(s"Service$serviceString could not bind on a random free port. " +
"You may check whether configuring an appropriate binding address.")
} else {
logWarning(s"Service$serviceString could not bind on port $tryPort. " +
s"Attempting port ${tryPort + 1}.")
}
}
}
// Should never happen
throw new SparkException(s"Failed to start service$serviceString on port $startPort")
}
/**
* Return whether the exception is caused by an address-port collision when binding.
*/
def isBindCollision(exception: Throwable): Boolean = {
exception match {
case e: BindException =>
if (e.getMessage != null) {
return true
}
isBindCollision(e.getCause)
case e: MultiException =>
e.getThrowables.asScala.exists(isBindCollision)
case e: NativeIoException =>
(e.getMessage != null && e.getMessage.startsWith("bind() failed: ")) ||
isBindCollision(e.getCause)
case e: Exception => isBindCollision(e.getCause)
case _ => false
}
}
/**
* configure a new log4j level
*/
def setLogLevel(l: org.apache.log4j.Level) {
val rootLogger = org.apache.log4j.Logger.getRootLogger()
rootLogger.setLevel(l)
// Setting threshold to null as rootLevel will define log level for spark-shell
Logging.sparkShellThresholdLevel = null
}
/**
* Return the current system LD_LIBRARY_PATH name
*/
def libraryPathEnvName: String = {
if (isWindows) {
"PATH"
} else if (isMac) {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
/**
* Return the prefix of a command that appends the given library paths to the
* system-specific library path environment variable. On Unix, for instance,
* this returns the string LD_LIBRARY_PATH="path1:path2:$LD_LIBRARY_PATH".
*/
def libraryPathEnvPrefix(libraryPaths: Seq[String]): String = {
val libraryPathScriptVar = if (isWindows) {
s"%${libraryPathEnvName}%"
} else {
"$" + libraryPathEnvName
}
val libraryPath = (libraryPaths :+ libraryPathScriptVar).mkString("\\"",
File.pathSeparator, "\\"")
val ampersand = if (Utils.isWindows) {
" &"
} else {
""
}
s"$libraryPathEnvName=$libraryPath$ampersand"
}
/**
* Return the value of a config either through the SparkConf or the Hadoop configuration.
* We Check whether the key is set in the SparkConf before look at any Hadoop configuration.
* If the key is set in SparkConf, no matter whether it is running on YARN or not,
* gets the value from SparkConf.
* Only when the key is not set in SparkConf and running on YARN,
* gets the value from Hadoop configuration.
*/
def getSparkOrYarnConfig(conf: SparkConf, key: String, default: String): String = {
if (conf.contains(key)) {
conf.get(key, default)
} else if (conf.get(SparkLauncher.SPARK_MASTER, null) == "yarn") {
new YarnConfiguration(SparkHadoopUtil.get.newConfiguration(conf)).get(key, default)
} else {
default
}
}
/**
* Return a pair of host and port extracted from the `sparkUrl`.
*
* A spark url (`spark://host:port`) is a special URI that its scheme is `spark` and only contains
* host and port.
*
* @throws org.apache.spark.SparkException if sparkUrl is invalid.
*/
@throws(classOf[SparkException])
def extractHostPortFromSparkUrl(sparkUrl: String): (String, Int) = {
try {
val uri = new java.net.URI(sparkUrl)
val host = uri.getHost
val port = uri.getPort
if (uri.getScheme != "spark" ||
host == null ||
port < 0 ||
(uri.getPath != null && !uri.getPath.isEmpty) || // uri.getPath returns "" instead of null
uri.getFragment != null ||
uri.getQuery != null ||
uri.getUserInfo != null) {
throw new SparkException("Invalid master URL: " + sparkUrl)
}
(host, port)
} catch {
case e: java.net.URISyntaxException =>
throw new SparkException("Invalid master URL: " + sparkUrl, e)
}
}
/**
* Returns the current user name. This is the currently logged in user, unless that's been
* overridden by the `SPARK_USER` environment variable.
*/
def getCurrentUserName(): String = {
Option(System.getenv("SPARK_USER"))
.getOrElse(UserGroupInformation.getCurrentUser().getShortUserName())
}
val EMPTY_USER_GROUPS = Set.empty[String]
// Returns the groups to which the current user belongs.
def getCurrentUserGroups(sparkConf: SparkConf, username: String): Set[String] = {
val groupProviderClassName = sparkConf.get(USER_GROUPS_MAPPING)
if (groupProviderClassName != "") {
try {
val groupMappingServiceProvider = classForName(groupProviderClassName).
getConstructor().newInstance().
asInstanceOf[org.apache.spark.security.GroupMappingServiceProvider]
val currentUserGroups = groupMappingServiceProvider.getGroups(username)
return currentUserGroups
} catch {
case e: Exception => logError(s"Error getting groups for user=$username", e)
}
}
EMPTY_USER_GROUPS
}
/**
* Split the comma delimited string of master URLs into a list.
* For instance, "spark://abc,def" becomes [spark://abc, spark://def].
*/
def parseStandaloneMasterUrls(masterUrls: String): Array[String] = {
masterUrls.stripPrefix("spark://").split(",").map("spark://" + _)
}
/** An identifier that backup masters use in their responses. */
val BACKUP_STANDALONE_MASTER_PREFIX = "Current state is not alive"
/** Return true if the response message is sent from a backup Master on standby. */
def responseFromBackup(msg: String): Boolean = {
msg.startsWith(BACKUP_STANDALONE_MASTER_PREFIX)
}
/**
* To avoid calling `Utils.getCallSite` for every single RDD we create in the body,
* set a dummy call site that RDDs use instead. This is for performance optimization.
*/
def withDummyCallSite[T](sc: SparkContext)(body: => T): T = {
val oldShortCallSite = sc.getLocalProperty(CallSite.SHORT_FORM)
val oldLongCallSite = sc.getLocalProperty(CallSite.LONG_FORM)
try {
sc.setLocalProperty(CallSite.SHORT_FORM, "")
sc.setLocalProperty(CallSite.LONG_FORM, "")
body
} finally {
// Restore the old ones here
sc.setLocalProperty(CallSite.SHORT_FORM, oldShortCallSite)
sc.setLocalProperty(CallSite.LONG_FORM, oldLongCallSite)
}
}
/**
* Return whether the specified file is a parent directory of the child file.
*/
@tailrec
def isInDirectory(parent: File, child: File): Boolean = {
if (child == null || parent == null) {
return false
}
if (!child.exists() || !parent.exists() || !parent.isDirectory()) {
return false
}
if (parent.equals(child)) {
return true
}
isInDirectory(parent, child.getParentFile)
}
/**
*
* @return whether it is local mode
*/
def isLocalMaster(conf: SparkConf): Boolean = {
val master = conf.get("spark.master", "")
master == "local" || master.startsWith("local[")
}
/**
* Return whether dynamic allocation is enabled in the given conf.
*/
def isDynamicAllocationEnabled(conf: SparkConf): Boolean = {
val dynamicAllocationEnabled = conf.get(DYN_ALLOCATION_ENABLED)
dynamicAllocationEnabled &&
(!isLocalMaster(conf) || conf.get(DYN_ALLOCATION_TESTING))
}
def isStreamingDynamicAllocationEnabled(conf: SparkConf): Boolean = {
val streamingDynamicAllocationEnabled = conf.get(STREAMING_DYN_ALLOCATION_ENABLED)
streamingDynamicAllocationEnabled &&
(!isLocalMaster(conf) || conf.get(STREAMING_DYN_ALLOCATION_TESTING))
}
/**
* Return the initial number of executors for dynamic allocation.
*/
def getDynamicAllocationInitialExecutors(conf: SparkConf): Int = {
if (conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
if (conf.get(EXECUTOR_INSTANCES).getOrElse(0) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${EXECUTOR_INSTANCES.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
val initialExecutors = Seq(
conf.get(DYN_ALLOCATION_MIN_EXECUTORS),
conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS),
conf.get(EXECUTOR_INSTANCES).getOrElse(0)).max
logInfo(s"Using initial executors = $initialExecutors, max of " +
s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key}, ${DYN_ALLOCATION_MIN_EXECUTORS.key} and " +
s"${EXECUTOR_INSTANCES.key}")
initialExecutors
}
def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
val resource = createResource
try f.apply(resource) finally resource.close()
}
/**
* Returns a path of temporary file which is in the same directory with `path`.
*/
def tempFileWith(path: File): File = {
new File(path.getAbsolutePath + "." + UUID.randomUUID())
}
/**
* Given a process id, return true if the process is still running.
*/
def isProcessRunning(pid: Int): Boolean = {
val process = executeCommand(Seq("kill", "-0", pid.toString))
process.waitFor(10, TimeUnit.SECONDS)
process.exitValue() == 0
}
/**
* Returns the pid of this JVM process.
*/
def getProcessId: Int = {
val PROCESS = "(\\\\d+)@(.*)".r
val name = getProcessName()
name match {
case PROCESS(pid, _) => pid.toInt
case _ =>
throw new SparkException(s"Unexpected process name: $name, expected to be PID@hostname.")
}
}
/**
* Returns the name of this JVM process. This is OS dependent but typically (OSX, Linux, Windows),
* this is formatted as PID@hostname.
*/
def getProcessName(): String = {
ManagementFactory.getRuntimeMXBean().getName()
}
/**
* Utility function that should be called early in `main()` for daemons to set up some common
* diagnostic state.
*/
def initDaemon(log: Logger): Unit = {
log.info(s"Started daemon with process name: ${Utils.getProcessName()}")
SignalUtils.registerLogger(log)
}
/**
* Return the jar files pointed by the "spark.jars" property. Spark internally will distribute
* these jars through file server. In the YARN mode, it will return an empty list, since YARN
* has its own mechanism to distribute jars.
*/
def getUserJars(conf: SparkConf): Seq[String] = {
conf.get(JARS).filter(_.nonEmpty)
}
/**
* Return the local jar files which will be added to REPL's classpath. These jar files are
* specified by --jars (spark.jars) or --packages, remote jars will be downloaded to local by
* SparkSubmit at first.
*/
def getLocalUserJarsForShell(conf: SparkConf): Seq[String] = {
val localJars = conf.getOption("spark.repl.local.jars")
localJars.map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
}
private[spark] val REDACTION_REPLACEMENT_TEXT = "*********(redacted)"
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact(conf: SparkConf, kvs: Seq[(String, String)]): Seq[(String, String)] = {
val redactionPattern = conf.get(SECRET_REDACTION_PATTERN)
redact(redactionPattern, kvs)
}
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact[K, V](regex: Option[Regex], kvs: Seq[(K, V)]): Seq[(K, V)] = {
regex match {
case None => kvs
case Some(r) => redact(r, kvs)
}
}
/**
* Redact the sensitive information in the given string.
*/
def redact(regex: Option[Regex], text: String): String = {
regex match {
case None => text
case Some(r) =>
if (text == null || text.isEmpty) {
text
} else {
r.replaceAllIn(text, REDACTION_REPLACEMENT_TEXT)
}
}
}
private def redact[K, V](redactionPattern: Regex, kvs: Seq[(K, V)]): Seq[(K, V)] = {
// If the sensitive information regex matches with either the key or the value, redact the value
// While the original intent was to only redact the value if the key matched with the regex,
// we've found that especially in verbose mode, the value of the property may contain sensitive
// information like so:
// "sun.java.command":"org.apache.spark.deploy.SparkSubmit ... \\
// --conf spark.executorEnv.HADOOP_CREDSTORE_PASSWORD=secret_password ...
//
// And, in such cases, simply searching for the sensitive information regex in the key name is
// not sufficient. The values themselves have to be searched as well and redacted if matched.
// This does mean we may be accounting more false positives - for example, if the value of an
// arbitrary property contained the term 'password', we may redact the value from the UI and
// logs. In order to work around it, user would have to make the spark.redaction.regex property
// more specific.
kvs.map {
case (key: String, value: String) =>
redactionPattern.findFirstIn(key)
.orElse(redactionPattern.findFirstIn(value))
.map { _ => (key, REDACTION_REPLACEMENT_TEXT) }
.getOrElse((key, value))
case (key, value: String) =>
redactionPattern.findFirstIn(value)
.map { _ => (key, REDACTION_REPLACEMENT_TEXT) }
.getOrElse((key, value))
case (key, value) =>
(key, value)
}.asInstanceOf[Seq[(K, V)]]
}
/**
* Looks up the redaction regex from within the key value pairs and uses it to redact the rest
* of the key value pairs. No care is taken to make sure the redaction property itself is not
* redacted. So theoretically, the property itself could be configured to redact its own value
* when printing.
*/
def redact(kvs: Map[String, String]): Seq[(String, String)] = {
val redactionPattern = kvs.getOrElse(
SECRET_REDACTION_PATTERN.key,
SECRET_REDACTION_PATTERN.defaultValueString
).r
redact(redactionPattern, kvs.toArray)
}
def redactCommandLineArgs(conf: SparkConf, commands: Seq[String]): Seq[String] = {
val redactionPattern = conf.get(SECRET_REDACTION_PATTERN)
commands.map {
case PATTERN_FOR_COMMAND_LINE_ARG(key, value) =>
val (_, newValue) = redact(redactionPattern, Seq((key, value))).head
s"-D$key=$newValue"
case cmd => cmd
}
}
def stringToSeq(str: String): Seq[String] = {
str.split(",").map(_.trim()).filter(_.nonEmpty)
}
/**
* Create instances of extension classes.
*
* The classes in the given list must:
* - Be sub-classes of the given base class.
* - Provide either a no-arg constructor, or a 1-arg constructor that takes a SparkConf.
*
* The constructors are allowed to throw "UnsupportedOperationException" if the extension does not
* want to be registered; this allows the implementations to check the Spark configuration (or
* other state) and decide they do not need to be added. A log message is printed in that case.
* Other exceptions are bubbled up.
*/
def loadExtensions[T <: AnyRef](
extClass: Class[T], classes: Seq[String], conf: SparkConf): Seq[T] = {
classes.flatMap { name =>
try {
val klass = classForName[T](name)
require(extClass.isAssignableFrom(klass),
s"$name is not a subclass of ${extClass.getName()}.")
val ext = Try(klass.getConstructor(classOf[SparkConf])) match {
case Success(ctor) =>
ctor.newInstance(conf)
case Failure(_) =>
klass.getConstructor().newInstance()
}
Some(ext.asInstanceOf[T])
} catch {
case _: NoSuchMethodException =>
throw new SparkException(
s"$name did not have a zero-argument constructor or a" +
" single-argument constructor that accepts SparkConf. Note: if the class is" +
" defined inside of another Scala class, then its constructors may accept an" +
" implicit parameter that references the enclosing class; in this case, you must" +
" define the class as a top-level class in order to prevent this extra" +
" parameter from breaking Spark's ability to find a valid constructor.")
case e: InvocationTargetException =>
e.getCause() match {
case uoe: UnsupportedOperationException =>
logDebug(s"Extension $name not being initialized.", uoe)
logInfo(s"Extension $name not being initialized.")
None
case null => throw e
case cause => throw cause
}
}
}
}
/**
* Check the validity of the given Kubernetes master URL and return the resolved URL. Prefix
* "k8s://" is appended to the resolved URL as the prefix is used by KubernetesClusterManager
* in canCreate to determine if the KubernetesClusterManager should be used.
*/
def checkAndGetK8sMasterUrl(rawMasterURL: String): String = {
require(rawMasterURL.startsWith("k8s://"),
"Kubernetes master URL must start with k8s://.")
val masterWithoutK8sPrefix = rawMasterURL.substring("k8s://".length)
// To handle master URLs, e.g., k8s://host:port.
if (!masterWithoutK8sPrefix.contains("://")) {
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
return s"k8s://$resolvedURL"
}
val masterScheme = new URI(masterWithoutK8sPrefix).getScheme
val resolvedURL = masterScheme.toLowerCase(Locale.ROOT) match {
case "https" =>
masterWithoutK8sPrefix
case "http" =>
logWarning("Kubernetes master URL uses HTTP instead of HTTPS.")
masterWithoutK8sPrefix
case null =>
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
resolvedURL
case _ =>
throw new IllegalArgumentException("Invalid Kubernetes master scheme: " + masterScheme)
}
s"k8s://$resolvedURL"
}
/**
* Replaces all the {{EXECUTOR_ID}} occurrences with the Executor Id
* and {{APP_ID}} occurrences with the App Id.
*/
def substituteAppNExecIds(opt: String, appId: String, execId: String): String = {
opt.replace("{{APP_ID}}", appId).replace("{{EXECUTOR_ID}}", execId)
}
/**
* Replaces all the {{APP_ID}} occurrences with the App Id.
*/
def substituteAppId(opt: String, appId: String): String = {
opt.replace("{{APP_ID}}", appId)
}
def createSecret(conf: SparkConf): String = {
val bits = conf.get(AUTH_SECRET_BIT_LENGTH)
val rnd = new SecureRandom()
val secretBytes = new Array[Byte](bits / JByte.SIZE)
rnd.nextBytes(secretBytes)
HashCodes.fromBytes(secretBytes).toString()
}
/**
* Safer than Class obj's getSimpleName which may throw Malformed class name error in scala.
* This method mimics scalatest's getSimpleNameOfAnObjectsClass.
*/
def getSimpleName(cls: Class[_]): String = {
try {
cls.getSimpleName
} catch {
// TODO: the value returned here isn't even quite right; it returns simple names
// like UtilsSuite$MalformedClassObject$MalformedClass instead of MalformedClass
// The exact value may not matter much as it's used in log statements
case _: InternalError =>
stripDollars(stripPackages(cls.getName))
}
}
/**
* Remove the packages from full qualified class name
*/
private def stripPackages(fullyQualifiedName: String): String = {
fullyQualifiedName.split("\\\\.").takeRight(1)(0)
}
/**
* Remove trailing dollar signs from qualified class name,
* and return the trailing part after the last dollar sign in the middle
*/
private def stripDollars(s: String): String = {
val lastDollarIndex = s.lastIndexOf('$')
if (lastDollarIndex < s.length - 1) {
// The last char is not a dollar sign
if (lastDollarIndex == -1 || !s.contains("$iw")) {
// The name does not have dollar sign or is not an intepreter
// generated class, so we should return the full string
s
} else {
// The class name is intepreter generated,
// return the part after the last dollar sign
// This is the same behavior as getClass.getSimpleName
s.substring(lastDollarIndex + 1)
}
}
else {
// The last char is a dollar sign
// Find last non-dollar char
val lastNonDollarChar = s.reverse.find(_ != '$')
lastNonDollarChar match {
case None => s
case Some(c) =>
val lastNonDollarIndex = s.lastIndexOf(c)
if (lastNonDollarIndex == -1) {
s
} else {
// Strip the trailing dollar signs
// Invoke stripDollars again to get the simple name
stripDollars(s.substring(0, lastNonDollarIndex + 1))
}
}
}
}
/**
* Regular expression matching full width characters.
*
* Looked at all the 0x0000-0xFFFF characters (unicode) and showed them under Xshell.
* Found all the full width characters, then get the regular expression.
*/
private val fullWidthRegex = ("""[""" +
// scalastyle:off nonascii
"""\\u1100-\\u115F""" +
"""\\u2E80-\\uA4CF""" +
"""\\uAC00-\\uD7A3""" +
"""\\uF900-\\uFAFF""" +
"""\\uFE10-\\uFE19""" +
"""\\uFE30-\\uFE6F""" +
"""\\uFF00-\\uFF60""" +
"""\\uFFE0-\\uFFE6""" +
// scalastyle:on nonascii
"""]""").r
/**
* Return the number of half widths in a given string. Note that a full width character
* occupies two half widths.
*
* For a string consisting of 1 million characters, the execution of this method requires
* about 50ms.
*/
def stringHalfWidth(str: String): Int = {
if (str == null) 0 else str.length + fullWidthRegex.findAllIn(str).size
}
def sanitizeDirName(str: String): String = {
str.replaceAll("[ :/]", "-").replaceAll("[.${}'\\"]", "_").toLowerCase(Locale.ROOT)
}
def isClientMode(conf: SparkConf): Boolean = {
"client".equals(conf.get(SparkLauncher.DEPLOY_MODE, "client"))
}
/** Returns whether the URI is a "local:" URI. */
def isLocalUri(uri: String): Boolean = {
uri.startsWith(s"$LOCAL_SCHEME:")
}
/** Check whether the file of the path is splittable. */
def isFileSplittable(path: Path, codecFactory: CompressionCodecFactory): Boolean = {
val codec = codecFactory.getCodec(path)
codec == null || codec.isInstanceOf[SplittableCompressionCodec]
}
/** Create a new properties object with the same values as `props` */
def cloneProperties(props: Properties): Properties = {
val resultProps = new Properties()
props.forEach((k, v) => resultProps.put(k, v))
resultProps
}
}
private[util] object CallerContext extends Logging {
val callerContextSupported: Boolean = {
SparkHadoopUtil.get.conf.getBoolean("hadoop.caller.context.enabled", false) && {
try {
Utils.classForName("org.apache.hadoop.ipc.CallerContext")
Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
true
} catch {
case _: ClassNotFoundException =>
false
case NonFatal(e) =>
logWarning("Fail to load the CallerContext class", e)
false
}
}
}
}
/**
* An utility class used to set up Spark caller contexts to HDFS and Yarn. The `context` will be
* constructed by parameters passed in.
* When Spark applications run on Yarn and HDFS, its caller contexts will be written into Yarn RM
* audit log and hdfs-audit.log. That can help users to better diagnose and understand how
* specific applications impacting parts of the Hadoop system and potential problems they may be
* creating (e.g. overloading NN). As HDFS mentioned in HDFS-9184, for a given HDFS operation, it's
* very helpful to track which upper level job issues it.
*
* @param from who sets up the caller context (TASK, CLIENT, APPMASTER)
*
* The parameters below are optional:
* @param upstreamCallerContext caller context the upstream application passes in
* @param appId id of the app this task belongs to
* @param appAttemptId attempt id of the app this task belongs to
* @param jobId id of the job this task belongs to
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param taskId task id
* @param taskAttemptNumber task attempt id
*/
private[spark] class CallerContext(
from: String,
upstreamCallerContext: Option[String] = None,
appId: Option[String] = None,
appAttemptId: Option[String] = None,
jobId: Option[Int] = None,
stageId: Option[Int] = None,
stageAttemptId: Option[Int] = None,
taskId: Option[Long] = None,
taskAttemptNumber: Option[Int] = None) extends Logging {
private val context = prepareContext("SPARK_" +
from +
appId.map("_" + _).getOrElse("") +
appAttemptId.map("_" + _).getOrElse("") +
jobId.map("_JId_" + _).getOrElse("") +
stageId.map("_SId_" + _).getOrElse("") +
stageAttemptId.map("_" + _).getOrElse("") +
taskId.map("_TId_" + _).getOrElse("") +
taskAttemptNumber.map("_" + _).getOrElse("") +
upstreamCallerContext.map("_" + _).getOrElse(""))
private def prepareContext(context: String): String = {
// The default max size of Hadoop caller context is 128
lazy val len = SparkHadoopUtil.get.conf.getInt("hadoop.caller.context.max.size", 128)
if (context == null || context.length <= len) {
context
} else {
val finalContext = context.substring(0, len)
logWarning(s"Truncated Spark caller context from $context to $finalContext")
finalContext
}
}
/**
* Set up the caller context [[context]] by invoking Hadoop CallerContext API of
* [[org.apache.hadoop.ipc.CallerContext]], which was added in hadoop 2.8.
*/
def setCurrentContext(): Unit = {
if (CallerContext.callerContextSupported) {
try {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
val builder: Class[AnyRef] =
Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
val builderInst = builder.getConstructor(classOf[String]).newInstance(context)
val hdfsContext = builder.getMethod("build").invoke(builderInst)
callerContext.getMethod("setCurrent", callerContext).invoke(null, hdfsContext)
} catch {
case NonFatal(e) =>
logWarning("Fail to set Spark caller context", e)
}
}
}
}
/**
* A utility class to redirect the child process's stdout or stderr.
*/
private[spark] class RedirectThread(
in: InputStream,
out: OutputStream,
name: String,
propagateEof: Boolean = false)
extends Thread(name) {
setDaemon(true)
override def run() {
scala.util.control.Exception.ignoring(classOf[IOException]) {
// FIXME: We copy the stream on the level of bytes to avoid encoding problems.
Utils.tryWithSafeFinally {
val buf = new Array[Byte](1024)
var len = in.read(buf)
while (len != -1) {
out.write(buf, 0, len)
out.flush()
len = in.read(buf)
}
} {
if (propagateEof) {
out.close()
}
}
}
}
}
/**
* An [[OutputStream]] that will store the last 10 kilobytes (by default) written to it
* in a circular buffer. The current contents of the buffer can be accessed using
* the toString method.
*/
private[spark] class CircularBuffer(sizeInBytes: Int = 10240) extends java.io.OutputStream {
private var pos: Int = 0
private var isBufferFull = false
private val buffer = new Array[Byte](sizeInBytes)
def write(input: Int): Unit = {
buffer(pos) = input.toByte
pos = (pos + 1) % buffer.length
isBufferFull = isBufferFull || (pos == 0)
}
override def toString: String = {
if (!isBufferFull) {
return new String(buffer, 0, pos, StandardCharsets.UTF_8)
}
val nonCircularBuffer = new Array[Byte](sizeInBytes)
System.arraycopy(buffer, pos, nonCircularBuffer, 0, buffer.length - pos)
System.arraycopy(buffer, 0, nonCircularBuffer, buffer.length - pos, pos)
new String(nonCircularBuffer, StandardCharsets.UTF_8)
}
}
| bdrillard/spark | core/src/main/scala/org/apache/spark/util/Utils.scala | Scala | apache-2.0 | 113,297 |
package blended.file
import java.io.File
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.util.ByteString
import blended.streams.jms.{JmsProducerSettings, JmsStreamSupport}
import blended.streams.message.{FlowEnvelope, FlowMessage}
import blended.util.logging.Logger
import scala.concurrent.ExecutionContext
import scala.io.Source
import scala.util.{Success, Try}
class JMSFilePollHandler(
settings : JmsProducerSettings,
header : FlowMessage.FlowMessageProps
) extends FilePollHandler with JmsStreamSupport {
private val log : Logger = Logger[JMSFilePollHandler]
private def createEnvelope(cmd : FileProcessCmd, file : File) : FlowEnvelope = {
val body : ByteString = ByteString(Source.fromFile(file).mkString)
FlowEnvelope(FlowMessage(body)(header))
.withHeader("BlendedFileName", cmd.f.getName()).get
.withHeader("BlendedFilePath", cmd.f.getAbsolutePath()).get
}
override def processFile(cmd: FileProcessCmd, f : File)(implicit system: ActorSystem): Try[Unit] = Try {
implicit val materializer : Materializer = ActorMaterializer()
implicit val eCtxt : ExecutionContext = system.dispatcher
val env : FlowEnvelope = createEnvelope(cmd, f)
log.trace(s"Handling polled file in JMSHandler : [${env.flowMessage.header.mkString(",")}]")
sendMessages(
producerSettings = settings,
log = log,
env
) match {
case Success(s) => s.shutdown()
case _ => // do nothing as the stream is already closed
}
}
}
| lefou/blended | blended.file/src/main/scala/blended/file/JMSFilePollHandler.scala | Scala | apache-2.0 | 1,545 |
package io.fintrospect.formats
import java.math.BigInteger
/**
* Capability to create and parse JSON message formats in a generic way. Used to serialize and deserialize
* request parameters and bodies.
*/
trait JsonFormat[ROOT_NODETYPE <: NODETYPE, NODETYPE] {
type Field = (String, NODETYPE)
/**
* Attempt to parse the JSON into the root node type. Implementations should throw an exception
* if the parsing fails, which is dealt with by the surrounding deserialisation mechanism, so you
* don't need to worry about having to muddy your own code with the exception handling.
*/
def parse(in: String): ROOT_NODETYPE
/**
* Pretty printed JSON
*/
def pretty(in: ROOT_NODETYPE): String
/**
* Compact printed JSON
*/
def compact(in: ROOT_NODETYPE): String
/**
* Create a JSON object from the passed String -> Node pairs
*/
def obj(fields: Iterable[Field]): ROOT_NODETYPE
/**
* Create a JSON object from the passed String -> Node pairs
*/
final def obj(fields: Field*): ROOT_NODETYPE = obj(fields)
/**
* Create a JSON object from the passed Symbol -> Node pairs
*/
def objSym(fields: (Symbol, NODETYPE)*): ROOT_NODETYPE = obj(fields.map(p => p._1.name -> p._2): _*)
/**
* Create a JSON array from the passed elements
*/
def array(elements: Iterable[NODETYPE]): ROOT_NODETYPE
/**
* Create a JSON array from the passed elements
*/
final def array(elements: NODETYPE*): ROOT_NODETYPE = array(elements)
/**
* Create a JSON string node
*/
def string(value: String): NODETYPE
/**
* Create a JSON number node
*/
def number(value: Int): NODETYPE
/**
* Create a JSON number node
*/
def number(value: Double): NODETYPE
/**
* Create a JSON number node
*/
def number(value: BigDecimal): NODETYPE
/**
* Create a JSON number node
*/
def number(value: Long): NODETYPE
/**
* Create a JSON number node
*/
def number(value: BigInteger): NODETYPE
/**
* Create a JSON boolean node
*/
def boolean(value: Boolean): NODETYPE
/**
* Create a null JSON node
*/
def nullNode(): NODETYPE
}
object JsonFormat {
/**
* Thrown when an invalid string is passed for conversion to JSON
*/
class InvalidJson extends Exception
/**
* Thrown when an JSON node cannot be auto-decoded to a particular type
*/
class InvalidJsonForDecoding extends Exception
}
| daviddenton/fintrospect | core/src/main/scala/io/fintrospect/formats/JsonFormat.scala | Scala | apache-2.0 | 2,467 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.test1
import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.spark.mllib.tree.RandomForest // CHANGED
import org.apache.spark.mllib.tree.model.RandomForestModel // CHANGED
import org.apache.spark.mllib.linalg.Vectors
// CHANGED
case class RandomForestAlgorithmParams(
numClasses: Int,
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int
) extends Params
class PIORandomForestModel(
val gendersMap: Map[String, Double],
val educationMap: Map[String, Double],
val randomForestModel: RandomForestModel
) extends Serializable
// extends P2LAlgorithm because the MLlib's RandomForestModel doesn't
// contain RDD.
class RandomForestAlgorithm(val ap: RandomForestAlgorithmParams) // CHANGED
extends P2LAlgorithm[PreparedData, PIORandomForestModel, // CHANGED
Query, PredictedResult] {
def train(data: PreparedData): PIORandomForestModel = { // CHANGED
// CHANGED
// Empty categoricalFeaturesInfo indicates all features are continuous.
val categoricalFeaturesInfo = Map[Int, Int]()
val m = RandomForest.trainClassifier(
data.labeledPoints,
ap.numClasses,
categoricalFeaturesInfo,
ap.numTrees,
ap.featureSubsetStrategy,
ap.impurity,
ap.maxDepth,
ap.maxBins)
new PIORandomForestModel(
gendersMap = data.gendersMap,
educationMap = data.educationMap,
randomForestModel = m
)
}
def predict(
model: PIORandomForestModel, // CHANGED
query: Query): PredictedResult = {
val gendersMap = model.gendersMap
val educationMap = model.educationMap
val randomForestModel = model.randomForestModel
val label = randomForestModel.predict(
Vectors.dense(Array(
gendersMap(query.gender),
query.age.toDouble,
educationMap(query.education))
))
new PredictedResult(label)
}
}
| himanshudhami/PredictionIO | examples/scala-parallel-classification/custom-attributes/src/main/scala/RandomForestAlgorithm.scala | Scala | apache-2.0 | 2,749 |
package com.taig.tmpltr.engine.html
import com.taig.tmpltr._
import play.api.mvc.Content
class h2( attributes: Attributes, content: Content )
extends h( 2, attributes, content )
with Tag.Body[h2, Content]
object h2
extends Tag.Body.Appliable[h2, Content] | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/h2.scala | Scala | mit | 258 |
/*
* Copyright 2017 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.faces.image
import scalismo.faces.FacesTestSuite
import scalismo.faces.image.filter.DistanceTransform
import scalismo.geometry.EuclideanVector2D
class DistanceTransformTests extends FacesTestSuite {
/** slow and explicit test implementation of distance transform: explicit calculation everywhere */
def stupidDistanceTransform(image: PixelImage[Boolean]): PixelImage[Double] = {
PixelImage(image.width, image.height, (x, y) => {
val distToEachImagePoint = PixelImage.view(image.width, image.height, (x1, y1) => EuclideanVector2D(x - x1, y - y1).norm)
val maskedDistances = distToEachImagePoint.zip(image).mapLazy{case(dist, obj) => if (obj) dist else Double.PositiveInfinity}
val minDistance = maskedDistances.values.min
minDistance
})
}
describe("In DistanceTransform") {
val simpleImage = PixelImage(50, 50, (x, y) => EuclideanVector2D(x - 25, y - 15).norm <= 5)
val twoObjects = PixelImage(50, 50, (x, y) => EuclideanVector2D(x - 25, y - 15).norm <= 5 || EuclideanVector2D(x - 45, y - 45).norm <= 10)
it("our reference distance transform is correct for the simple image") {
val distanceImage = stupidDistanceTransform(simpleImage)
distanceImage(12, 15) shouldBe 8.0 +- 1e-5
distanceImage(12, 35) shouldBe math.sqrt(13*13 + 20*20) - 5 +- 0.5
distanceImage(25, 35) shouldBe 15.0 +- 1e-5
distanceImage(32, 35) shouldBe math.sqrt(7*7 + 20*20) - 5 +- 0.5
distanceImage(32, 15) shouldBe 2.0 +- 1e-5
}
it("our reference distance transform is correct for two objects") {
val distanceImage = stupidDistanceTransform(twoObjects)
distanceImage(12, 15) shouldBe 8.0 +- 1e-5
distanceImage(12, 35) shouldBe math.sqrt(13*13 + 20*20) - 5 +- 1
distanceImage(25, 35) shouldBe (EuclideanVector2D(25, 35) - EuclideanVector2D(45, 45)).norm - 10 +- 0.5
distanceImage(32, 35) shouldBe (EuclideanVector2D(32, 35) - EuclideanVector2D(45, 45)).norm - 10 +- 0.5
distanceImage(32, 15) shouldBe 2.0 +- 1e-5
}
describe("a euclidianDistanceTransform") {
it("of a simple 2D image is the same as the slow, explicit distance transform") {
val distanceImage = DistanceTransform.euclidian(simpleImage)
val stupidDistanceImage = stupidDistanceTransform(simpleImage)
distanceImage shouldBe stupidDistanceImage
}
it("of two objects is the same as the slow, explicit distance transform") {
val distanceImage = DistanceTransform.euclidian(twoObjects)
val stupidDistanceImage = stupidDistanceTransform(twoObjects)
distanceImage shouldBe stupidDistanceImage
}
}
describe("a signedDistanceTransform"){
describe("of a simple image") {
val signedDistance = DistanceTransform.signedEuclidian(simpleImage)
it("is correct outside") {
val distanceTransform = DistanceTransform.euclidian(simpleImage)
assert(
signedDistance.zip(distanceTransform).values.forall{
case(signed, dist) => if (dist > 0.0) signed == dist else true}
)
}
it("is correct inside") {
val distanceTransform = DistanceTransform.euclidian(simpleImage.map{!_})
assert(
signedDistance.zip(distanceTransform).values.forall{
case(signed, dist) => if (dist < 0.0) signed == -dist else true}
)
}
}
describe("of two objects") {
val signedDistance = DistanceTransform.signedEuclidian(twoObjects)
it("is correct outside") {
val distanceTransform = DistanceTransform.euclidian(twoObjects)
assert(
signedDistance.zip(distanceTransform).values.forall{
case(signed, dist) => if (dist > 0.0) signed == dist else true}
)
}
it("is correct inside") {
val distanceTransform = DistanceTransform.euclidian(twoObjects.map{!_})
assert(
signedDistance.zip(distanceTransform).values.forall{
case(signed, dist) => if (dist < 0.0) signed == -dist else true}
)
}
}
}
}
}
| unibas-gravis/scalismo-faces | src/test/scala/scalismo/faces/image/DistanceTransformTests.scala | Scala | apache-2.0 | 4,795 |
package utils.exceptions
class InvalidToken(val message: String = "") extends KiwiERPException {
val error = "invalid_token"
val status = 401
}
| KIWIKIGMBH/kiwierp | kiwierp-backend/app/utils/exceptions/InvalidToken.scala | Scala | mpl-2.0 | 152 |
package argonaut
import Json._
sealed abstract class Context {
val toList: List[ContextElement]
def +:(e: ContextElement): Context =
Context.build(e :: toList)
}
object Context extends Contexts {
def empty: Context =
new Context {
val toList = Nil
}
}
trait Contexts {
private[argonaut] def build(x: List[ContextElement]): Context =
new Context {
val toList = x
}
}
sealed abstract class ContextElement extends Product with Serializable {
def json: Json =
this match {
case ArrayContext(_, j) => j
case ObjectContext(_, j) => j
}
def field: Option[JsonField] =
this match {
case ArrayContext(_, _) => None
case ObjectContext(f, _) => Some(f)
}
def index: Option[Int] =
this match {
case ArrayContext(n, _) => Some(n)
case ObjectContext(_, _) => None
}
}
private case class ArrayContext(n: Int, j: Json) extends ContextElement
private case class ObjectContext(f: JsonField, j: Json) extends ContextElement
object ContextElement extends ContextElements
trait ContextElements {
def arrayContext(n: Int, j: Json): ContextElement =
ArrayContext(n, j)
def objectContext(f: JsonField, j: Json): ContextElement =
ObjectContext(f, j)
}
| jedws/argonaut | argonaut/src/main/scala/argonaut/Context.scala | Scala | bsd-3-clause | 1,256 |
/*
* Copyright 2019 CJWW Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.user
import com.cjwwdev.auth.connectors.AuthConnector
import com.cjwwdev.auth.models.CurrentUser
import com.cjwwdev.config.ConfigurationLoader
import com.cjwwdev.featuremanagement.services.FeatureService
import common.helpers.AuthController
import common.{FeatureManagement, RedirectUrls}
import javax.inject.Inject
import models.accounts.{BasicDetails, DeversityEnrolment, Settings}
import models.deversity.{OrgDetails, TeacherDetails}
import models.feed.FeedItem
import play.api.mvc.{Action, AnyContent, ControllerComponents, Request}
import services.DashboardService
import views.html.user.{Dashboard, OrgDashboard}
import scala.concurrent.{ExecutionContext, Future}
class DefaultDashboardController @Inject()(val dashboardService: DashboardService,
val controllerComponents: ControllerComponents,
val authConnector: AuthConnector,
val config: ConfigurationLoader,
val featureService: FeatureService,
implicit val ec: ExecutionContext) extends DashboardController with RedirectUrls
trait DashboardController extends AuthController with FeatureManagement {
val dashboardService: DashboardService
private def buildOrgDashboard(implicit req: Request[_], currentUser: CurrentUser): Future[(OrgDetails, List[TeacherDetails])] = {
for {
Some(details) <- dashboardService.getOrgBasicDetails
teacherList <- dashboardService.getTeacherList
} yield (details, teacherList)
}
private def buildUserDashboard(implicit req: Request[_],
currentUser: CurrentUser): Future[(BasicDetails, Settings, List[FeedItem], Option[DeversityEnrolment])] = {
for {
Some(details) <- dashboardService.getBasicDetails
settings <- dashboardService.getSettings
feed <- dashboardService.getFeed
enrolment <- dashboardService.getDeversityEnrolment
} yield (details, settings, feed, enrolment)
}
def show : Action[AnyContent] = isAuthorised { implicit req => implicit user =>
user.credentialType match {
case "organisation" => buildOrgDashboard map { case (details, teacherList) =>
Ok(OrgDashboard(details, teacherList, deversityEnabled))
}
case "individual" => buildUserDashboard map { case (details, settings, feed, enrolment) =>
Ok(Dashboard(feed, details, settings, deversityEnabled, enrolment))
}
}
}
}
| cjww-development/auth-service | app/controllers/user/DashboardController.scala | Scala | apache-2.0 | 3,184 |
package org.ensime.config
import org.scalatest._
import org.ensime.sexp._
import org.ensime.sexp.formats._
import scalariform.formatter.preferences._
class ScalariformFormatSpec extends FlatSpec {
object ScalariformProtocol extends DefaultSexpProtocol with ScalariformFormat
import ScalariformProtocol._
val prefs = FormattingPreferences().
setPreference(DoubleIndentClassDeclaration, true).
setPreference(IndentSpaces, 13)
"ScalariformFormat" should "parse some example config" in {
val text = """(:doubleIndentClassDeclaration t
:indentSpaces 13)"""
val recover = text.parseSexp.convertTo[FormattingPreferences]
assert(recover.preferencesMap == prefs.preferencesMap)
}
it should "create valid output" in {
assert(prefs.toSexp === SexpList(
SexpSymbol(":doubleIndentClassDeclaration"), SexpSymbol("t"),
SexpSymbol(":indentSpaces"), SexpNumber(13)
))
}
}
| jacobono/ensime-server | core/src/test/scala/org/ensime/config/ScalariformFormatSpec.scala | Scala | gpl-3.0 | 938 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable
import cc.factorie.util.{Cubbie, Attr}
import cc.factorie.variable.CategoricalVar
import cc.factorie.util.UniqueId
import cc.factorie.app.nlp.coref.WithinDocCoref
/** A portion of the string contents of a Document.
@author Andrew McCallum */
trait DocumentSubstring {
/** The Document of which this DocumentSubstring is a part. */
def document: Document
/** The character offset into the Document.string at which this DocumentSubstring begins. */
def stringStart: Int
/** The character offset into the Document.string at which this DocumentSubstring is over.
In other words, the last character of the DocumentSubstring is Document.string(this.stringEnd-1). */
def stringEnd: Int
/** The substring of the Document encompassed by this DocumentSubstring. */
def string: String
}
/** A Document holds a String containing the original raw string contents
of a natural language document to be processed. The Document also holds
a sequence of Sections, each of which is delineated by character offsets
into the Document's string, and each of which contains a sequence of Tokens,
Sentences and other TokenSpans which may be annotated.
Documents may be constructed with their full string contents, or they may
have their string contents augmented by the appendString method.
Documents also have an optional "name" which can be set by Document.setName.
This is typically used to hold a filename in the file system, or some other similar identifier.
The Document.stringLength method may be a faster alternative to Document.string.length
when you are in the middle of multiple appendString calls because it will
efficiently use the underlying string buffer length, rather than flushing the buffer
to create a string.
The canonical sequence of Sections in the Document is available through
the Document.sections method.
By default the canonical sequence of Sections holds a single Section that covers the
entire string contents of the Document (even as the Document grows). This canonical sequence
of Sections may be modified by the user, but this special all-encompassing Section
instance will always be available as Document.asSection.
Even though Tokens, Sentences and TokenSpans are really stored in the Sections,
Document has basic convenience methods for obtaining iterable collections of these
by concatenating them from the canonical sequence of Sections. These iterable
collections are of type Iterable[Token], not Seq[Token], however.
If you need the Tokens as a Seq[Token] rather than an Iterable[Token], or you need
more advanced queries for TokenSpan types, you should use methods on a Section,
not on the Document. In this case typical processing looks like:
"for (section <- document.sections) section.tokens.someMethodOnSeq()...".
@author Andrew McCallum */
class Document extends DocumentSubstring with Attr with UniqueId with Serializable {
/** Create a new Document, initializing it to have contents given by the argument. */
def this(stringContents:String) = { this(); _string = stringContents }
/** Return the "name" assigned to this Document by the 'setName' method.
This may be any String, but is typically a filename or other similar identifier. */
def name: String = { val dn = this.attr[DocumentName]; if (dn ne null) dn.string else null }
/** Set the value that will be returned by the 'name' method.
It accomplishes this by setting the DocumentName attr on Document.
If the String argument is null, it will remove DocumentName attr if present. */
def setName(s:String): this.type = { if (s ne null) this.attr += DocumentName(s) else this.attr.remove[DocumentName]; this }
/** The unique identifier for this Document, e.g. used for database lookup, etc.
Defined to be the Document's name; we are relying on the user to set the name to a unique value. */
def uniqueId = name
// One of the following two is always null, the other non-null. The later is used while multiple appendString() method calls are made.
private var _string: String = ""
private var _stringbuf: StringBuffer = null
/** Append the string 's' to this Document.
@return the length of the Document's string before string 's' was appended. */
def appendString(s:String): Int = this.synchronized {
if (_stringbuf eq null) _stringbuf = new StringBuffer(_string)
val result = _stringbuf.length
_stringbuf.append(s)
_string = null
result
}
/** The string contents of this Document. */
def string: String = {
this.synchronized {
if (_string eq null) _string = _stringbuf.toString
_stringbuf = null
}
_string
}
/** The number of characters in this Document's string.
Use this instead of Document.string.length because it is more efficient when the Document's string is growing with appendString. */
def stringLength: Int = if (_string ne null) _string.length else _stringbuf.length
// For the DocumentSubstring trait
/** A method required by the DocumentSubstring trait, which in this case simply returns this Document itself. */
def document: Document = this
/** A method required by the DocumentSubstring trait, which in this case simply returns 0. */
def stringStart: Int = 0
/** A method required by the DocumentSubstring trait, which in this case simply returns Document.stringLength. */
def stringEnd: Int = stringLength
// Managing sections. These are the canonical Sections, but alternative Sections can be attached as Attr's.
/** A predefined Section that covers the entirety of the Document string, and even grows as the length of this Document may grow.
If the user does not explicitly add Sections to the document, this Section is the only one returned by the "sections" method. */
lazy val asSection: Section = new Section { def document: Document = Document.this; def stringStart = 0; def stringEnd = document.stringEnd }
private lazy val _sections: mutable.Buffer[Section] = new ArrayBuffer[Section] += asSection
/** The canonical list of Sections containing the tokens of the document.
The user may create and add Sections covering various substrings within the Document.
If the user does not explicitly add any Sections, by default there will be one Section that covers the entire Document string;
this one Section is the one returned by "Document.asSection".
Note that Sections may overlap with each other, representing alternative tokenizations or annotations. */
def sections: Seq[Section] = _sections // if (_sections.length == 0) Seq(asSection) else _sections
/** Add a new Section to this Document's canonical list of Sections.
If the only previously existing Section is the default (asSection), then remove it before adding the argument. */
def +=(s: Section) = { if (_sections.length == 1 && _sections(0) == asSection) _sections.clear(); _sections += s }
/** Remove a Section from this Document's canonical list of Sections. */
def -=(s: Section) = _sections -= s
/** Remove all Section from this Document's canonical list of Sections. */
def clearSections(): Unit = _sections.clear()
// A few iterators that combine the results from the Sections
/** Return an Iterable collection of all Tokens in all canonical Sections of this Document. */
def tokens: Iterable[Token] = if (sections.length == 1) sections.head.tokens else new Iterable[Token] { def iterator = for (section <- sections.iterator; token <- section.tokens.iterator) yield token }
/** Return an Iterable collection of all Sentences in all canonical Sections of this Document. */
def sentences: Iterable[Sentence] = if (sections.length == 1) sections.head.sentences else new Iterable[Sentence] { def iterator = for (section <- sections.iterator; sentence <- section.sentences.iterator) yield sentence }
/** An efficient way to get the total number of Tokens in the canonical Sections of this Document. */
def tokenCount: Int = if (sections.length == 0) sections.head.length else sections.foldLeft(0)((result, section) => result + section.length)
/** An efficient way to get the total number of Sentences in the canonical Sections of this Document. */
def sentenceCount: Int = if (sections.length == 0) sections.head.sentences.length else sections.foldLeft(0)((result, section) => result + section.sentences.length)
/** The collection of DocumentAnnotators that have been run on this Document,
For keeping records of which DocumentAnnotators have been run on this document, producing which annotations.
A Map from the annotation class to the DocumentAnnotator that produced it,
for example from classOf[cc.factorie.app.nlp.pos.PennPos] to classOf[cc.factorie.app.nlp.pos.ChainPosTagger].
Note that this map records annotations placed not just on the Document itself, but also its constituents,
such as NounPhraseNumberLabel on NounPhrase, PennPos on Token, ParseTree on Sentence, etc. */
lazy val annotators = new collection.mutable.LinkedHashMap[Class[_], Class[_]]
/** Return true if an annotation of class 'c' been placed somewhere within this Document. */
def hasAnnotation(c:Class[_]): Boolean = annotators.keys.exists(k => c.isAssignableFrom(k))
/** Optionally return the DocumentAnnotator that produced the annotation of class 'c' within this Document. */
def annotatorFor(c:Class[_]): Option[Class[_]] = annotators.keys.find(k => c.isAssignableFrom(k)).collect({case k:Class[_] => annotators(k)})
// /** Return a String containing the Token strings in the document, with sentence and span boundaries indicated with SGML. */
// def sgmlString(spanLists:SpanList[_,_,_]*): String = {
// val buf = new StringBuffer
// for (section <- sections; token <- section.tokens) {
// if (token.isSentenceStart) buf.append("<sentence>")
// token.startsSpans.foreach(span => buf.append("<"+span.name+">"))
// buf.append(token.string)
// token.endsSpans.foreach(span => buf.append("</"+span.name+">"))
// if (token.isSentenceEnd) buf.append("</sentence>")
// buf.append(" ")
// }
// buf.toString
// }
// Common attributes, will return null if not present
def coref: WithinDocCoref = this.attr[WithinDocCoref]
def targetCoref: WithinDocCoref = { val coref = this.attr[WithinDocCoref]; if (coref eq null) null else coref.target }
/** Return the WithinDocCoref solution for this Document. If not already present create it. */
def getCoref: WithinDocCoref = this.attr.getOrElseUpdate[WithinDocCoref](new WithinDocCoref(this))
/** Return the gold-standard WithinDocCoref.target solution for this Document. If not already present create it. */
def getTargetCoref: WithinDocCoref = { val coref = this.getCoref; if (coref.target eq null) coref.target = new WithinDocCoref(this); coref.target }
/** Return a String containing the Token strings in the document, formatted with one-word-per-line
and various tab-separated attributes appended on each line, generated as specified by the argument. */
def owplString(attributes:Iterable[(Token)=>Any]): String = {
val buf = new StringBuffer
for (section <- sections; token <- section.tokens) {
if (token.isSentenceStart) buf.append("\n")
buf.append("%d\t%d\t%s\t".format(token.position+1, token.positionInSentence+1, token.string))
//buf.append(token.stringStart); buf.append("\t")
//buf.append(token.stringEnd)
for (af <- attributes) {
buf.append("\t")
af(token) match {
case cv:CategoricalVar[String @unchecked] => buf.append(cv.categoryValue.toString)
case null => {}
case v:Any => buf.append(v.toString)
}
}
buf.append("\n")
}
buf.toString
}
/** Return a String containing the Token strings in the document, formatted with one-word-per-line
and various tab-separated attributes appended on each line, generated from the 'annotator.tokenAnnotationString' method. */
def owplString(annotator:DocumentAnnotator): String = annotator match {
case pipeline:DocumentAnnotationPipeline => owplString(pipeline.annotators.map(a => a.tokenAnnotationString(_)))
case annotator:DocumentAnnotator => owplString(Seq(annotator.tokenAnnotationString(_)))
}
/** Return the Section that contains the pair of string offsets into the document. */
def getSectionByOffsets(strStart:Int, strEnd:Int):Option[Section] =
this.sections.map(sec => (sec.stringStart, sec.stringEnd, sec)).sortBy(_._1)
.find{case(start, end, _) => start <= strStart && end >= strEnd}.map(_._3)
}
/** Used as an attribute on Document to hold the document's name. */
case class DocumentName(string:String) {
override def toString: String = string
}
// TODO Consider removing DocumentCubbie because this implementation is inefficient,
// and it isn't sensible that everyone would want the same selection of saved items.
/** A Cubbie for serializing a Document, with separate slots for the Tokens, Sentences, and TokenSpans.
Note that it does not yet serialize Sections, and relies on Document.asSection being the only Section. */
//class DocumentCubbie[TC<:TokenCubbie,SC<:SentenceCubbie,TSC<:TokenSpanCubbie](val tc:()=>TC, val sc:()=>SC, val tsc:()=>TSC) extends Cubbie with AttrCubbieSlots {
// val name = StringSlot("name")
// val string = StringSlot("string")
// val tokens = CubbieListSlot("tokens", tc)
// val sentences = CubbieListSlot("sentences", sc)
// val spans = CubbieListSlot("spans", tsc)
// def storeDocument(doc:Document): this.type = {
// name := doc.name
// string := doc.string
// if (doc.asSection.length > 0) tokens := doc.tokens.toSeq.map(t => tokens.constructor().storeToken(t))
//// if (doc.spans.length > 0) spans := doc.spans.map(s => spans.constructor().store(s))
// if (doc.asSection.sentences.length > 0) sentences := doc.sentences.toSeq.map(s => sentences.constructor().storeSentence(s))
// storeAttr(doc)
// this
// }
// def fetchDocument: Document = {
// val doc = new Document(string.value).setName(name.value)
// if (tokens.value ne null) tokens.value.foreach(tc => doc.asSection += tc.fetchToken)
// //if (spans.value ne null) spans.value.foreach(sc => doc += sc.fetch(doc))
// if (sentences.value ne null) sentences.value.foreach(sc => sc.fetchSentence(doc.asSection))
// fetchAttr(doc)
// doc
// }
//}
// TODO Consider moving this to file util/Attr.scala
//trait AttrCubbieSlots extends Cubbie {
// val storeHooks = new cc.factorie.util.Hooks1[Attr]
// val fetchHooks = new cc.factorie.util.Hooks1[AnyRef]
// def storeAttr(a:Attr): this.type = { storeHooks(a); this }
// def fetchAttr(a:Attr): Attr = { fetchHooks(a); a }
//}
//
//trait DateAttrCubbieSlot extends AttrCubbieSlots {
// val date = DateSlot("date")
// storeHooks += ((a:Attr) => date := a.attr[java.util.Date])
// //fetchHooks += ((a:Attr) => a.attr += date.value)
// fetchHooks += { case a:Attr => a.attr += date.value }
//}
| hlin117/factorie | src/main/scala/cc/factorie/app/nlp/Document.scala | Scala | apache-2.0 | 15,987 |
package edu.rice.habanero.benchmarks.facloc
import java.util.function.Consumer
import edu.rice.habanero.actors.{JetlangActor, JetlangActorState, JetlangPool}
import edu.rice.habanero.benchmarks.facloc.FacilityLocationConfig.{Box, Point, Position}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object FacilityLocationJetlangActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new FacilityLocationJetlangActorBenchmark)
}
private final class FacilityLocationJetlangActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
FacilityLocationConfig.parseArgs(args)
}
def printArgInfo() {
FacilityLocationConfig.printArgs()
}
def runIteration() {
val threshold = FacilityLocationConfig.ALPHA * FacilityLocationConfig.F
val boundingBox = new Box(0, 0, FacilityLocationConfig.GRID_SIZE, FacilityLocationConfig.GRID_SIZE)
val rootQuadrant = new QuadrantActor(
null, Position.ROOT, boundingBox, threshold, 0,
new java.util.ArrayList[Point](), 1, -1, new java.util.ArrayList[Point]())
rootQuadrant.start()
val producer = new ProducerActor(rootQuadrant)
producer.start()
JetlangActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
JetlangPool.shutdown()
}
}
}
private abstract class Msg()
private case class FacilityMsg(positionRelativeToParent: Int, depth: Int, point: Point, fromChild: Boolean) extends Msg
private case class NextCustomerMsg() extends Msg
private case class CustomerMsg(producer: JetlangActor[AnyRef], point: Point) extends Msg
private case class RequestExitMsg() extends Msg
private case class ConfirmExitMsg(facilities: Int, supportCustomers: Int) extends Msg
private class ProducerActor(consumer: JetlangActor[AnyRef]) extends JetlangActor[AnyRef] {
private val selfActor = this
private var itemsProduced = 0
override def onPostStart(): Unit = {
produceCustomer()
}
private def produceCustomer(): Unit = {
consumer.send(CustomerMsg(selfActor, Point.random(FacilityLocationConfig.GRID_SIZE)))
itemsProduced += 1
}
override def process(message: AnyRef) {
message match {
case msg: NextCustomerMsg =>
if (itemsProduced < FacilityLocationConfig.NUM_POINTS) {
produceCustomer()
} else {
consumer.send(RequestExitMsg())
exit()
}
}
}
}
private class QuadrantActor(parent: QuadrantActor,
positionRelativeToParent: Int,
val boundary: Box,
threshold: Double,
depth: Int,
initLocalFacilities: java.util.List[Point],
initKnownFacilities: Int,
initMaxDepthOfKnownOpenFacility: Int,
initCustomers: java.util.List[Point]) extends JetlangActor[AnyRef] {
private val selfActor = this
// the facility associated with this quadrant if it were to open
private val facility: Point = boundary.midPoint()
// all the local facilities from corner ancestors
val localFacilities = new java.util.ArrayList[Point]()
localFacilities.addAll(initLocalFacilities)
localFacilities.add(facility)
private var knownFacilities = initKnownFacilities
private var maxDepthOfKnownOpenFacility = initMaxDepthOfKnownOpenFacility
private var terminatedChildCount = 0
// the support customers for this Quadrant
private val supportCustomers = new java.util.ArrayList[Point]()
private var childrenFacilities = 0
private var facilityCustomers = 0
// null when closed, non-null when open
private var children: List[QuadrantActor] = null
private var childrenBoundaries: List[Box] = null
// the cost so far
private var totalCost = 0.0
initCustomers.forEach(new Consumer[Point] {
override def accept(loopPoint: Point): Unit = {
if (boundary.contains(loopPoint)) {
addCustomer(loopPoint)
}
}
override def andThen(after: Consumer[_ >: Point]): Consumer[Point] = {
this
}
})
override def process(msg: AnyRef) {
msg match {
case customer: CustomerMsg =>
val point: Point = customer.point
if (children == null) {
// no open facility
addCustomer(point)
if (totalCost > threshold) {
partition()
}
} else {
// a facility is already open, propagate customer to correct child
var index = 0
while (index <= 4) {
val loopChildBoundary = childrenBoundaries(index)
if (loopChildBoundary.contains(point)) {
children(index).send(customer)
index = 5
} else {
index += 1
}
}
}
if (parent eq null) {
// request next customer
customer.producer.send(NextCustomerMsg())
}
case facility: FacilityMsg =>
val point = facility.point
val fromChild = facility.fromChild
knownFacilities += 1
localFacilities.add(point)
if (fromChild) {
notifyParentOfFacility(point, facility.depth)
if (facility.depth > maxDepthOfKnownOpenFacility) {
maxDepthOfKnownOpenFacility = facility.depth
}
// notify sibling
val childPos = facility.positionRelativeToParent
val siblingPos: Int = if (childPos == Position.TOP_LEFT) {
Position.BOT_RIGHT
} else if (childPos == Position.TOP_RIGHT) {
Position.BOT_LEFT
} else if (childPos == Position.BOT_RIGHT) {
Position.TOP_LEFT
} else {
Position.TOP_RIGHT
}
children(siblingPos).send(FacilityMsg(Position.UNKNOWN, depth, point, false))
} else {
// notify all children
if (children ne null) {
children.foreach {
loopChild =>
loopChild.send(FacilityMsg(Position.UNKNOWN, depth, point, false))
}
}
}
case exitMsg: RequestExitMsg =>
if (children ne null) {
children.foreach {
loopChild =>
loopChild.send(exitMsg)
}
} else {
// No children, notify parent and safely exit
safelyExit()
}
case exitMsg: ConfirmExitMsg =>
// child has sent a confirmation that it has exited
terminatedChildCount += 1
childrenFacilities += exitMsg.facilities
facilityCustomers += exitMsg.supportCustomers
if (terminatedChildCount == 4) {
// all children terminated
safelyExit()
}
}
}
private def addCustomer(point: Point): Unit = {
supportCustomers.add(point)
val minCost = findCost(point)
totalCost += minCost
}
private def findCost(point: Point): Double = {
var result = Double.MaxValue
// there will be at least one facility
localFacilities.forEach(new Consumer[Point] {
override def accept(loopPoint: Point): Unit = {
val distance = loopPoint.getDistance(point)
if (distance < result) {
result = distance
}
}
override def andThen(after: Consumer[_ >: Point]): Consumer[Point] = {
this
}
})
result
}
private def notifyParentOfFacility(p: Point, depth: Int): Unit = {
//println("Quadrant-" + id + ": notifyParentOfFacility: parent = " + parent)
if (parent ne null) {
//println("Quadrant-" + id + ": notifyParentOfFacility: sending msg to parent: " + parent.id)
parent.send(FacilityMsg(positionRelativeToParent, depth, p, true))
}
}
private def partition(): Unit = {
// notify parent that opened a new facility
notifyParentOfFacility(facility, depth)
maxDepthOfKnownOpenFacility = math.max(maxDepthOfKnownOpenFacility, depth)
// create children and propagate their share of customers to them
val firstBoundary: Box = new Box(boundary.x1, facility.y, facility.x, boundary.y2)
val secondBoundary: Box = new Box(facility.x, facility.y, boundary.x2, boundary.y2)
val thirdBoundary: Box = new Box(boundary.x1, boundary.y1, facility.x, facility.y)
val fourthBoundary: Box = new Box(facility.x, boundary.y1, boundary.x2, facility.y)
val firstChild = new QuadrantActor(
selfActor, Position.TOP_LEFT, firstBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
firstChild.start()
val secondChild = new QuadrantActor(
selfActor, Position.TOP_RIGHT, secondBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
secondChild.start()
val thirdChild = new QuadrantActor(
selfActor, Position.BOT_LEFT, thirdBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
thirdChild.start()
val fourthChild = new QuadrantActor(
selfActor, Position.BOT_RIGHT, fourthBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
fourthChild.start()
children = List[QuadrantActor](firstChild, secondChild, thirdChild, fourthChild)
childrenBoundaries = List[Box](firstBoundary, secondBoundary, thirdBoundary, fourthBoundary)
// support customers have been distributed to the children
supportCustomers.clear()
}
private def safelyExit(): Unit = {
if (parent ne null) {
val numFacilities = if (children ne null) childrenFacilities + 1 else childrenFacilities
val numCustomers = facilityCustomers + supportCustomers.size
parent.send(ConfirmExitMsg(numFacilities, numCustomers))
} else {
val numFacilities = childrenFacilities + 1
println(" Num Facilities: " + numFacilities + ", Num customers: " + facilityCustomers)
}
exit()
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/facloc/FacilityLocationJetlangActorBenchmark.scala | Scala | gpl-2.0 | 10,699 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.{Condition, ReentrantLock}
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming._
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
private val checkpointRoot: String,
analyzedPlan: LogicalPlan,
val sink: BaseStreamingSink,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
val resolvedCheckpointRoot = {
val checkpointPath = new Path(checkpointRoot)
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
checkpointPath.makeQualified(fs.getUri, fs.getWorkingDirectory).toUri.toString
}
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Seq[BaseStreamingSource] = Seq.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[BaseStreamingSource, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toUri.toString
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
postEvent(new QueryStartedEvent(id, runId, name))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
SparkSession.setActiveSession(sparkSession)
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
// Isolated spark session to run the batches with.
val sparkSessionForStream = sparkSession.cloneSession()
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
} catch {
case e if isInterruptedByStop(e) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint only when the query didn't fail
if (deleteCheckpointOnStop && exception.isEmpty) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
fs.delete(checkpointPath, true)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { source =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: Offset, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
Option(name).map(_ + "<br/>").getOrElse("") +
s"id = $id<br/>runId = $runId<br/>batch = $batchDescription"
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
def isInterruptionException(e: Throwable): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause)
case _ =>
false
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryxecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| sahilTakiar/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 21,915 |
/*
* Copyright 2013-2016 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configs.testutil.instance
import java.io.File
import java.nio.file.{Path, Paths}
import scalaprops.{Gen, ScalapropsScalaz}
import scalaz.Equal
object io {
implicit lazy val pathGen: Gen[Path] =
ScalapropsScalaz.nonEmptyListGen(Gen.nonEmptyString(Gen.alphaChar)).map(p => Paths.get(p.head, p.tail.toList: _*))
implicit lazy val pathEqual: Equal[Path] =
Equal.equalA[Path]
implicit lazy val fileGen: Gen[File] =
pathGen.map(_.toFile)
implicit lazy val fileEqual: Equal[File] =
Equal.equalA[File]
}
| kxbmap/configs | core/src/test/scala/configs/testutil/instance/io.scala | Scala | apache-2.0 | 1,142 |
package com.artclod.mathml.scalar.apply
import org.junit.runner.RunWith
import play.api.test._
import play.api.test.Helpers._
import org.specs2.mutable._
import com.artclod.mathml._
import com.artclod.mathml.scalar._
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
// LATER try out http://rlegendi.github.io/specs2-runner/ and remove RunWith
@RunWith(classOf[JUnitRunner])
class ApplyMinusUSpec extends Specification {
"eval" should {
"return negative of value" in {
ApplyMinusU(`6`).eval().get must beEqualTo(-6)
}
}
"variables" should {
"be empty if element is constant" in {
ApplyMinusU(`2`).variables must beEmpty
}
"be x if element constains an x" in {
ApplyMinusU(x).variables must beEqualTo(Set("x"))
}
"be y if element constains a y" in {
ApplyMinusU(y).variables must beEqualTo(Set("y"))
}
}
"c" should {
"return 0 if value is 0" in {
ApplyMinusU(`0`).c.get must beEqualTo(`0`)
}
"return 1 if value is -1" in {
ApplyMinusU(`-1`).c.get must beEqualTo(`1`)
}
"return negative of a value" in {
ApplyMinusU(`3`).c.get must beEqualTo(`-3`)
}
"fail if not a constant " in {
ApplyMinusU(x).c must beNone
}
}
"s" should {
"return constant if value is constant" in {
ApplyMinusU(`-4`).s must beEqualTo(`4`)
}
"remain unchanged if nothing can be simplified" in {
ApplyMinusU(x).s must beEqualTo(ApplyMinusU(x))
}
}
"d" should {
"return negative of values derivative" in {
ApplyMinusU(F).dx must beEqualTo(ApplyMinusU(Fdx))
}
}
"toText" should {
"handle -4" in {
ApplyMinusU(4).toMathJS must beEqualTo("-4")
}
}
} | kristiankime/web-education-games | test/com/artclod/mathml/scalar/apply/ApplyMinusUSpec.scala | Scala | mit | 1,645 |
//val logFile = "IdeaProjects/glogs/2015-02-28-7.json" // Should be some file on your system
val logFile = "IdeaProjects/glogs/*.json" // Should be some file on your system
val logData = sc.textFile(logFile, 2).cache()
val numAs = logData.filter(line => line.contains("a")).count()
val numBs = logData.filter(line => line.contains("b")).count()
println("Lines with a: %s, Lines with b: %s".format(numAs, numBs))
def extract(name: String)(input: String): Option[String] = {
val pattern = s""""$name":"(\\\\w*)"""".r
pattern.findFirstMatchIn(input).map(_.group(1))
}
def resultList = logData.map(extract("type")(_))
def resultsWithNones = resultList.map { case Some(x) => x; case None => "{NONE}" }
def groupedList = resultsWithNones.groupBy(p => p)
val rez = groupedList.map { case (k: String, v: List[String]) => (k, v.size) }
def extract(name: String)(input: String): Option[String] = {
implicit val formats = DefaultFormats
(parse(input) \\ name).toSome.map(_.extract[String])
}
def extract(name: String)(input: String): Option[String] = {
val pattern = s""""$name":"(\\\\w*)"""".r
pattern.findFirstMatchIn(input).map(_.group(1))
}
val files = (new java.io.File("IdeaProjects/glogs/")).listFiles.filter(f => f.getName.endsWith(".json"))
val logDataPF = files.map(f => sc.textFile(f.getAbsolutePath))
val logData = sc.union(logDataPF)
def resultList = logData.map(extract("type")(_))
val rez = resultList.map(v => (v, 1)).reduceByKey((a, b) => a + b)
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val df = sqlContext.read.json("/Users/olivertupran/IdeaProjects/glogs/2015-01-01-*.json")
df.show()
df.select("name").show()
import sqlContext.implicits._
val interest = sc.makeRDD(List("DVSB", "MaxMillion", "ProjectDayDrum", "oliver")).toDF("name")
interest.registerTempTable("interest")
val query = """SELECT pt.actor.login, pt.type, count(*) as count
FROM interest ti LEFT JOIN pt ON (ti.name = pt.actor.login)
WHERE pt.type='ForkEvent'
GROUP BY actor.login, type
ORDER BY count desc
"""
sqlContext.sql(query).show
val query = """SELECT ti.name, pt.name, pt.type as count
FROM interest ti LEFT JOIN pt ON (ti.name = pt.actor.login)
--WHERE pt.type='ForkEvent'
GROUP BY ti.name, pt.actor.login, pt.type
ORDER BY count desc
"""
sqlContext.sql(query).show
| tupol/spark-learning | src/snippets/test.scala | Scala | apache-2.0 | 2,294 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.ct600.v2.retriever.CT600BoxRetriever
case class B15(value: Int) extends CtBoxIdentifier(name = "Income within Sch D Case VI") with CtInteger
//TODO
object B15 extends Calculated[B15, CT600BoxRetriever] {
override def calculate(fieldValueRetriever: CT600BoxRetriever): B15 = {
// B15(fieldValueRetriever.retrieveB12() + fieldValueRetriever.retrieveB13() + fieldValueRetriever.retrieveB14())
???
}
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B15.scala | Scala | apache-2.0 | 1,139 |
package at.vizu.s2n.generator.expression
import at.vizu.s2n.generator.{GeneratorContext, GeneratorUtils}
import at.vizu.s2n.types.symbol.{BaseTypes, TType}
/**
* Phil on 29.11.15.
*/
case class DoWhileExpression(baseTypes: BaseTypes, condExpr: Expression, body: Expression) extends Expression {
override def exprTpe: TType = baseTypes.unit
override def generate: GeneratorContext = {
val condCtx = condExpr.content
val bodyCtx = body.content
val content = s"do ${bodyCtx.value} while (${condCtx.value});"
GeneratorUtils.mergeGeneratorContexts(Vector(condCtx, bodyCtx), givenContent = content)
}
override def skipSemiColon: Boolean = true
override def generateReturn: GeneratorContext = generate
}
| viZu/nasca | src/main/scala/at/vizu/s2n/generator/expression/DoWhileExpression.scala | Scala | apache-2.0 | 732 |
package com.lorandszakacs.sg.model
import java.net.URL
import com.lorandszakacs.util.time._
import com.lorandszakacs.util.mongodb.Annotations
/**
* @author Lorand Szakacs, lsz@lorandszakacs.com
* @since 16 Mar 2015
*
*/
object M {
sealed trait MFactory[T <: M] {
def apply(photoSetURL: URL, name: Name, photoSets: List[PhotoSet]): T
def name: String
}
object SGFactory extends MFactory[SG] {
override def apply(photoSetURL: URL, name: Name, photoSets: List[PhotoSet]): SG =
SG(photoSetURL = photoSetURL, name = name, photoSets = photoSets.sortBy(_.date))
override def name: String = "SG"
}
object HFFactory extends MFactory[HF] {
override def apply(photoSetURL: URL, name: Name, photoSets: List[PhotoSet]): HF = {
HF(photoSetURL = photoSetURL, name = name, photoSets = photoSets.sortBy(_.date))
}
override def name: String = "HF"
}
}
/**
*
* @param all
* is a union of [[sgs]] and [[hfs]]
*/
case class Ms(
sgs: List[SG],
hfs: List[HF],
all: List[M],
) extends Product with Serializable {
def newestM: Option[M] = all.headOption
def ml(name: Name): Option[M] = all.find(_.name == name)
def sg(name: Name): Option[SG] = sgs.find(_.name == name)
def hf(name: Name): Option[HF] = hfs.find(_.name == name)
def sgNames: List[Name] = sgs.map(_.name)
def hfNames: List[Name] = hfs.map(_.name)
def allNames: List[Name] = all.map(_.name)
}
sealed trait M extends Product with Serializable {
type MType <: M
def photoSetURL: URL
def name: Name
def photoSets: List[PhotoSet]
def isHF: Boolean
def isSG: Boolean
def asSG: Option[SG]
def makeSG: SG
def asHF: Option[HF]
def makeHF: HF
def stringifyType: String
def updatePhotoSets(newPhotoSets: List[PhotoSet]): MType
final def numberOfSets: Int = photoSets.length
final def numberOfPhotos: Int = photoSets.map(_.photos.length).sum
final def photoSetsOldestFirst: List[PhotoSet] =
this.photoSets.sortBy(_.date)
final def photoSetsNewestFirst: List[PhotoSet] =
this.photoSetsOldestFirst.reverse
final def setsByNewestFirst: MType = updatePhotoSets(this.photoSets.sortBy(_.date).reverse)
final def setsByOldestFirst: MType = updatePhotoSets(this.photoSets.sortBy(_.date))
override def toString: String =
s"""|---------${this.getClass.getSimpleName}: ${name.name} : ${photoSets.length}---------
|url=${photoSetURL.toExternalForm}
|${photoSetsNewestFirst.mkString("", "\\n", "")}
|""".stripMargin
}
object Name {
def apply(name: String): Name = {
new Name(name.trim.toLowerCase)
}
}
final class Name private (
val name: String,
) {
override def toString: String = s"Name($name)"
def externalForm: String = s"${name.capitalize}"
/**
* HFs lose prefix, or suffix underscores in names
* when they become SGs, therefore this is useful to determine
* if one has become an SG.
*
* @return
*/
def stripUnderscore: Name = Name(
name.stripPrefix("_").stripPrefix("__").stripPrefix("___").stripSuffix("_").stripSuffix("__").stripSuffix("___"),
)
override def equals(other: Any): Boolean = other match {
case that: Name =>
name == that.name
case _ => false
}
override def hashCode(): Int = {
name.hashCode * 31
}
}
object PhotoSetTitle {
def apply(name: String): PhotoSetTitle = {
new PhotoSetTitle(name.trim.toUpperCase.replace(" ", "").replace("\\t", " "))
}
}
final class PhotoSetTitle private (
val name: String,
) {
override def toString: String = s"PhotoSetTitle($name)"
def externalForm: String = s"${name.toLowerCase.capitalize}"
override def equals(other: Any): Boolean = other match {
case that: PhotoSetTitle =>
name == that.name
case _ => false
}
override def hashCode(): Int = {
name.hashCode * 31
}
}
final case class SG(
photoSetURL: URL,
@Annotations.Key("_id") name: Name,
photoSets: List[PhotoSet],
) extends M with Product with Serializable {
override type MType = SG
override def updatePhotoSets(newPhotoSets: List[PhotoSet]): SG = this.copy(photoSets = newPhotoSets)
override def isHF: Boolean = false
override def isSG: Boolean = true
override def asSG: Option[SG] = Option(this)
override def makeSG: SG = this
override def asHF: Option[HF] = None
override def makeHF: HF = throw new AssertionError("attempted to cast a SG to a HF")
override def stringifyType: String = "SG"
}
final case class HF(
photoSetURL: URL,
@Annotations.Key("_id") name: Name,
photoSets: List[PhotoSet],
) extends M with Product with Serializable {
override type MType = HF
override def updatePhotoSets(newPhotoSets: List[PhotoSet]): HF = this.copy(photoSets = newPhotoSets)
override def isHF: Boolean = true
override def isSG: Boolean = false
override def asSG: Option[SG] = None
override def makeSG: SG = throw new AssertionError("attempted to cast HF as SG")
override def asHF: Option[HF] = Option(this)
override def makeHF: HF = this
override def stringifyType: String = "HF"
}
final case class PhotoSet(
url: URL,
title: PhotoSetTitle,
date: LocalDate,
photos: List[Photo] = Nil,
@Annotations.Ignore()
isHFSet: Option[Boolean] = None,
) extends Product with Serializable {
def id: String = url.toExternalForm
override def toString: String =
s"""
|title = ${title.name}
|date = ${Util.dateTimeFormat.format(date)}
|url = ${url.toExternalForm}
|${isHFSet.map(b => s"isHF = $b").getOrElse("")}
|${photos.mkString("{\\n\\t", "\\n\\t", "\\n}")}
|${"_________________"}
""".stripMargin
}
final case class Photo(
url: URL,
thumbnailURL: URL,
index: Int,
) extends Product with Serializable {
override def toString: String = s"$url :: $thumbnailURL"
}
private[model] object Util {
final val dateTimeFormat: DateTimeFormatter = DateTimeFormatter.ofPattern("YYYY-MM-dd")
}
| lorandszakacs/sg-downloader | sg-repo/src/main/scala/com/lorandszakacs/sg/model/sgs.scala | Scala | apache-2.0 | 6,056 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company Β«MEZHGALAKTICHESKIJ TORGOVYJ ALIANSΒ»,
* Limited Liability Company Β«MEZHGALAKTICHESKIJ TORGOVYJ ALIANSΒ» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.logic.action
/*
import scala.collection.mutable
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.Messages
import org.digimead.tabuddy.desktop.core.Resources
import org.digimead.tabuddy.desktop.core.Resources.resources2implementation
import org.digimead.tabuddy.desktop.logic.Data
import org.digimead.tabuddy.desktop.logic.operation.OperationModelNew
import org.digimead.tabuddy.desktop.logic.operation.OperationModelOpen
import org.digimead.tabuddy.desktop.logic.payload.Payload
import org.digimead.tabuddy.desktop.logic.payload.Payload.payload2implementation
import org.digimead.tabuddy.desktop.core.support.App
import org.digimead.tabuddy.desktop.core.support.App.app2implementation
import org.digimead.tabuddy.desktop.core.support.SymbolValidator
import org.digimead.tabuddy.desktop.core.support.Validator
import org.digimead.tabuddy.desktop.core.support.WritableValue
import org.digimead.tabuddy.desktop.core.support.WritableValue.wrapper2underlying
import org.digimead.tabuddy.model.Model
import org.digimead.tabuddy.model.Model.model2implementation
import org.eclipse.core.databinding.observable.value.IValueChangeListener
import org.eclipse.core.databinding.observable.value.ValueChangeEvent
import org.eclipse.core.runtime.jobs.Job
import org.eclipse.jface.action.ControlContribution
import org.eclipse.jface.databinding.viewers.ObservableListContentProvider
import org.eclipse.jface.layout.RowLayoutFactory
import org.eclipse.jface.viewers.ComboViewer
import org.eclipse.jface.viewers.ISelectionChangedListener
import org.eclipse.jface.viewers.IStructuredSelection
import org.eclipse.jface.viewers.LabelProvider
import org.eclipse.jface.viewers.SelectionChangedEvent
import org.eclipse.jface.viewers.StructuredSelection
import org.eclipse.swt.SWT
import org.eclipse.swt.events.KeyAdapter
import org.eclipse.swt.events.KeyEvent
import org.eclipse.swt.events.ModifyEvent
import org.eclipse.swt.events.ModifyListener
import org.eclipse.swt.layout.GridData
import org.eclipse.swt.layout.GridLayout
import org.eclipse.swt.layout.RowData
import org.eclipse.swt.widgets.Combo
import org.eclipse.swt.widgets.Composite
import org.eclipse.swt.widgets.Control
import org.eclipse.swt.widgets.Label
import org.eclipse.swt.widgets.Widget
class ContributionSelectModel extends ControlContribution(ContributionSelectModel.id) with XLoggable {
val id = getClass.getName
@volatile protected var combo: Option[ComboViewer] = None
@volatile protected var label: Option[Label] = None
/** Id text value. */
protected val idValue = WritableValue("")
ContributionSelectModel.instance += (ContributionSelectModel.this) -> {}
def comboMinimumWidth = 80
def comboMaximumWidth = (window.getShell().getBounds().width / 4).toInt
/** Create contribution control. */
override protected def createControl(parent: Composite): Control = {
val parentShell = App.findShell(parent)
val container = new Composite(parent, SWT.NONE)
val layout = RowLayoutFactory.fillDefaults().wrap(false).spacing(0).create()
layout.marginLeft = 3
layout.center = true
container.setLayout(layout)
val label = createLabel(container)
ContributionSelectModel.this.label = Option(label)
val comboViewer = createCombo(container)
ContributionSelectModel.this.combo = Option(comboViewer)
//
// initialize combo
//
val context = App.getWindowContext(parent.getShell())
// propagate idValue -> context Data.Id.modelIdUserInput
idValue.addChangeListener { (id, event) =>
if (id == Messages.default_text || id == Payload.defaultModel.eId.name)
context.set(Data.Id.modelIdUserInput, "")
else
context.set(Data.Id.modelIdUserInput, id)
}
// acquire Data.modelName -> combo
Data.modelName.addChangeListener { (name, event) =>
if (name == Payload.defaultModel.eId.name) {
comboViewer.getCombo.setEnabled(true)
} else {
comboViewer.getCombo.setEnabled(false)
comboViewer.setSelection(new StructuredSelection(name), true)
}
}
// acquire Data.availableGraphs -> combo
comboViewer.setInput(Data.availableGraphs.underlying)
Data.availableGraphs.addChangeListener { (event) => App.exec { resizeCombo() } }
idValue.value = Messages.default_text
resizeCombo()
container
}
protected def createLabel(parent: Composite): Label = {
val container = new Composite(parent, SWT.NONE)
container.setLayout(new GridLayout(1, false))
val label = new Label(container, SWT.NONE)
label.setAlignment(SWT.CENTER);
label.setLayoutData(new GridData(SWT.FILL, SWT.FILL, true, true, 1, 1))
label.setText(Messages.localModel_text + ":")
label.setToolTipText(Messages.localModel_tooltip_text)
label.setFont(Resources.fontSmall)
label
}
/** Create combo box for toolbar. */
protected def createCombo(parent: Composite): ComboViewer = {
val viewer = new ComboViewer(parent, SWT.NONE)
val validator = SymbolValidator(viewer.getCombo, true) {
(validator, event) => validateComboText(validator, event.getSource.asInstanceOf[Combo].getText, event.doit)
}
viewer.getCombo.setToolTipText(Messages.localModel_tooltip_text)
viewer.setContentProvider(new ObservableListContentProvider())
viewer.setLabelProvider(new ContributionSelectModel.ComboLabelProvider())
// there is no built in support for combo text field property binding
// bind combo -> modelComboText
viewer.getCombo.addModifyListener(new ModifyListener() {
def modifyText(e: ModifyEvent) = {
validator.withDecoration(_.hide())
val newValue = e.getSource().asInstanceOf[Combo].getText()
if (idValue.getValue() != newValue)
idValue.setValue(newValue)
}
})
// there is no built in support for combo text field property binding
// bind idValue -> combo
idValue.addValueChangeListener(new IValueChangeListener() {
def handleValueChange(event: ValueChangeEvent) {
val newValue = event.diff.getNewValue().asInstanceOf[String]
combo.foreach { viewer =>
val combo = viewer.getCombo
if (combo.getText() != newValue)
combo.setText(newValue)
}
}
})
viewer.addSelectionChangedListener(new ISelectionChangedListener() {
override def selectionChanged(event: SelectionChangedEvent) = event.getSelection() match {
case selection: IStructuredSelection if !selection.isEmpty() =>
validateComboText(validator, event.getSource.asInstanceOf[ComboViewer].getCombo().getText, true)
case selection =>
}
})
viewer.getCombo.addKeyListener(new KeyAdapter() { override def keyReleased(e: KeyEvent) = if (e.keyCode == SWT.CR) onEnter(e.widget) })
viewer.getCombo.setEnabled(Data.modelName.value == Payload.defaultModel.eId.name)
viewer.getCombo.setLayoutData(new RowData(comboMinimumWidth, SWT.DEFAULT))
viewer
}
/** Get combo text. */
def getComboText() = if (Model.eId == Payload.defaultModel.eId) Messages.default_text else Model.eId.name
/** On Enter key event. */
protected def onEnter(widget: Widget) = if (idValue.value.nonEmpty) {
val id = idValue.value
if (id.nonEmpty)
Payload.listModels.find(marker => marker.isValid && marker.id.name == id) match {
case Some(marker) =>
OperationModelOpen(Some(Model.eId), Symbol(id), false) foreach { operation =>
operation.getExecuteJob() match {
case Some(job) =>
job.setPriority(Job.SHORT)
job.schedule()
case None =>
log.fatal(s"Unable to create job for ${operation}.")
}
}
case None =>
OperationModelNew(Some(id), None, true) foreach { operation =>
operation.getExecuteJob() match {
case Some(job) =>
job.setPriority(Job.SHORT)
job.schedule()
case None =>
log.fatal(s"Unable to create job for ${operation}.")
}
}
}
}
/** Resize combo viewer */
protected def resizeCombo() = for {
combo <- combo
control = combo.getCombo()
} {
val prefferedWidth = control.computeSize(SWT.DEFAULT, SWT.DEFAULT, true).x
val width = math.min(math.max(comboMinimumWidth, prefferedWidth), comboMaximumWidth)
control.setLayoutData(new RowData(width, SWT.DEFAULT))
control.getParent().layout()
}
/** Validates a text in the the combo viewer */
protected def validateComboText(validator: Validator, text: String, valid: Boolean) = if (!valid)
validator.withDecoration { validator.showDecorationError(_) }
else
validator.withDecoration { _.hide() }
protected def window = combo.get.getControl().getShell()
}
object ContributionSelectModel {
/** All SelectModel instances. */
private val instance = new mutable.WeakHashMap[ContributionSelectModel, Unit] with mutable.SynchronizedMap[ContributionSelectModel, Unit]
/** Singleton identificator. */
val id = getClass.getName().dropRight(1)
class ComboLabelProvider extends LabelProvider {
override def getText(element: AnyRef): String = element match {
case value: String if value == Payload.defaultModel.eId.name => Messages.default_text
case value => super.getText(element)
}
}
}
*/ | digimead/digi-TABuddy-desktop | part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/ui/action/ContributionSelectModel.scala | Scala | agpl-3.0 | 11,692 |
/*
* Copyright 2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package http {
import _root_.net.liftweb.common._
import _root_.net.liftweb.util._
import Helpers._
import _root_.scala.xml._
import _root_.scala.reflect.Manifest
private[liftweb] trait AbstractScreen extends Factory {
override def toString = screenName
@volatile private[this] var _fieldList: List[() => FieldContainer] = Nil
/**
* any additional parameters that need to be put in the on the form (e.g., mime type)
*/
def additionalAttributes: MetaData =
if (hasUploadField) new UnprefixedAttribute("enctype", Text("multipart/form-data"), Null) else Null
protected def _register(field: () => FieldContainer) {
_fieldList = _fieldList ::: List(field)
}
protected def hasUploadField: Boolean = screenFields.foldLeft(false)(_ | _.uploadField_?)
/**
* A list of fields in this screen
*/
def screenFields: List[BaseField] = _fieldList.flatMap(_.apply().allFields)
def screenTop: Box[Elem] = Empty
def screenBottom: Box[Elem] = Empty
/**
* The name of the screen. Override this to change the screen name
*/
def screenName: String = "Screen"
def screenNameAsHtml: NodeSeq = Text(screenName)
def screenTitle: NodeSeq = screenNameAsHtml
def screenTopText: Box[String] = Empty
def screenTopTextAsHtml: Box[NodeSeq] = screenTopText.map(Text.apply)
def cancelButton: Elem = <button>{S.??("Cancel")}</button>
def finishButton: Elem = <button>{S.??("Finish")}</button>
implicit def boxOfScreen[T <: AbstractScreen](in: T): Box[T] = Box !! in
def validate: List[FieldError] = screenFields.flatMap(_.validate)
protected def vendForm[T](implicit man: Manifest[T]): Box[(T, T => Unit) => NodeSeq] = Empty
protected def vendAVar[T](dflt: => T): NonCleanAnyVar[T]
trait Field extends BaseField {
AbstractScreen.this._register(() => this)
private val _currentValue: NonCleanAnyVar[ValueType] =
vendAVar[ValueType](setFilter.foldLeft(default)((nv, f) => f(nv)))
def default: ValueType
def is = _currentValue.is
/**
* Set to true if this field is part of a multi-part mime upload
*/
override def uploadField_? = false
def get = is
def set(v: ValueType) = _currentValue.set(setFilter.foldLeft(v)((nv, f) => f(nv)))
implicit def manifest: Manifest[ValueType]
protected def buildIt[T](implicit man: Manifest[T]): Manifest[T] = man
def help: Box[String] = Empty
override def helpAsHtml: Box[NodeSeq] = help.map(Text.apply)
/**
* Is the field editable
*/
def editable_? = true
def toForm: Box[NodeSeq] = {
val func: Box[(ValueType, ValueType => Unit) => NodeSeq] =
AbstractScreen.this.vendForm(manifest) or otherFuncVendors(manifest) or
LiftRules.vendForm(manifest)
func.map(f => f(is, set _))
}
protected def otherFuncVendors(what: Manifest[ValueType]): Box[(ValueType, ValueType => Unit) => NodeSeq] = Empty
def validate: List[FieldError] = validations.flatMap(_.apply(is))
def validations: List[ValueType => List[FieldError]] = Nil
def setFilter: List[ValueType => ValueType] = Nil
override lazy val uniqueFieldId: Box[String] = Full(Helpers.hash(this.getClass.getName))
override def toString = is.toString
}
}
trait LiftScreen extends AbstractScreen with DispatchSnippet {
def dispatch = {
case _ => ignore => this.toForm
}
private object ScreenVars extends RequestVar[Map[String, (NonCleanAnyVar[_], Any)]](Map())
private object PrevSnapshot extends RequestVar[Box[ScreenSnapshot]](Empty)
private object Referer extends ScreenVar[String](S.referer openOr "/")
private object FirstTime extends ScreenVar[Boolean](true)
protected class ScreenSnapshot(private[http] val screenVars: Map[String, (NonCleanAnyVar[_], Any)],
private[http] val snapshot: Box[ScreenSnapshot]) {
def restore() {
ScreenVars.set(screenVars)
PrevSnapshot.set(snapshot)
}
}
protected def vendAVar[T](dflt: => T): NonCleanAnyVar[T] = new ScreenVar[T](dflt) {
override protected def __nameSalt = randomString(20)
}
protected def createSnapshot = new ScreenSnapshot(ScreenVars.is, PrevSnapshot.is)
/**
* Keep request-local information around without the nastiness of naming session variables
* or the type-unsafety of casting the results.
* RequestVars share their value through the scope of the current HTTP
* request. They have no value at the beginning of request servicing
* and their value is discarded at the end of request processing. They
* are helpful to share values across many snippets.
*
* @param dflt - the default value of the session variable
*/
abstract class ScreenVar[T](dflt: => T) extends NonCleanAnyVar[T](dflt) {
override protected def findFunc(name: String): Box[T] = ScreenVarHandler.get(name)
override protected def setFunc(name: String, value: T): Unit = ScreenVarHandler.set(name, this, value)
override protected def clearFunc(name: String): Unit = ScreenVarHandler.clear(name)
override protected def wasInitialized(name: String): Boolean = {
val bn = name + "_inited_?"
val old: Boolean = ScreenVarHandler.get(bn) openOr false
ScreenVarHandler.set(bn, this, true)
old
}
override protected def testWasSet(name: String): Boolean = {
val bn = name + "_inited_?"
ScreenVarHandler.get(name).isDefined || (ScreenVarHandler.get(bn) openOr false)
}
/**
* Different Vars require different mechanisms for synchronization. This method implements
* the Var specific synchronization mechanism
*/
def doSync[F](f: => F): F = f // no sync necessary for RequestVars... always on the same thread
}
private object ScreenVarHandler {
def get[T](name: String): Box[T] =
ScreenVars.is.get(name).map(_._2.asInstanceOf[T])
def set[T](name: String, from: ScreenVar[_], value: T): Unit =
ScreenVars.set(ScreenVars.is + (name -> (from, value)))
def clear(name: String): Unit =
ScreenVars.set(ScreenVars.is - name)
}
protected def localSetup() {
}
def toForm = {
Referer.is // touch to capture the referer
if (FirstTime) {
FirstTime.set(false)
localSetup()
val localSnapshot = createSnapshot
S.redirectTo(S.uri, () => localSnapshot.restore)
}
val finishId = Helpers.nextFuncName
val cancelId = Helpers.nextFuncName
val theScreen = this
val finishButton = theScreen.finishButton % ("onclick" -> ("document.getElementById(" + finishId.encJs + ").submit()"))
val cancelButton: Elem = theScreen.cancelButton % ("onclick" -> ("document.getElementById(" + cancelId.encJs + ").submit()"))
val url = S.uri
renderAll(theScreen.screenTop,
theScreen.screenFields.map(f => ScreenFieldInfo(f, f.displayHtml, f.helpAsHtml, f.toForm)),
Full(cancelButton),
Full(finishButton), theScreen.screenBottom, finishId, cancelId, theScreen)
}
protected case class ScreenFieldInfo(field: FieldIdentifier, text: NodeSeq, help: Box[NodeSeq], input: Box[NodeSeq])
protected def renderAll(screenTop: Box[Elem],
fields: List[ScreenFieldInfo],
cancel: Box[Elem],
finish: Box[Elem],
screenBottom: Box[Elem],
finishId: String, cancelId: String, theScreen: AbstractScreen): NodeSeq = {
val notices: List[(NoticeType.Value, NodeSeq, Box[String])] = S.getAllNotices
def bindFieldLine(xhtml: NodeSeq): NodeSeq = {
fields.flatMap {
f =>
val myNotices = notices.filter(fi => fi._3.isDefined && fi._3 == f.field.uniqueFieldId)
bind("wizard", xhtml, "label" -> f.text, "form" -> f.input,
"help" -> NodeSeq.Empty,
FuncBindParam("field_errors", xml => {
myNotices match {
case Nil => NodeSeq.Empty
case xs => bind("wizard", xml, "error" ->
(innerXml => xs.flatMap {case (_, msg, _) => bind("wizard", innerXml, "bind" -> msg)}))
}
}))
}
}
def url = S.uri
val snapshot = createSnapshot
def bindFields(xhtml: NodeSeq): NodeSeq =
(<form id={finishId} action={url} method="post">{S.formGroup(-1)(SHtml.hidden(() =>
snapshot.restore()))}{bind("wizard", xhtml, "line" -> bindFieldLine _)}{S.formGroup(4)(SHtml.hidden(() =>
{doFinish(); val localSnapshot = createSnapshot; S.redirectTo(url, () => localSnapshot.restore)}))}</form> %
theScreen.additionalAttributes) ++
<form id={cancelId} action={url} method="post">{SHtml.hidden(() => {
snapshot.restore();
S.redirectTo(Referer.is)
})}</form>
Helpers.bind("wizard", allTemplate,
"screen_number" -> Text("1"),
"total_screens" -> Text("1"),
FuncBindParam("wizard_top", xml => NodeSeq.Empty),
FuncBindParam("screen_top", xml => (screenTop.map(top => bind("wizard", xml, "bind" -%> top)) openOr NodeSeq.Empty)),
FuncBindParam("wizard_bottom", xml => NodeSeq.Empty),
FuncBindParam("screen_bottom", xml => (screenBottom.map(bottom => bind("wizard", xml, "bind" -%> bottom)) openOr NodeSeq.Empty)),
"prev" -> (Unparsed(" ") : NodeSeq),
"next" -> ((finish) openOr Unparsed(" ")),
"cancel" -> (cancel openOr Unparsed(" ")),
"errors" -> NodeSeq.Empty, // FIXME deal with errors
FuncBindParam("fields", bindFields _))
}
protected def allTemplatePath: List[String] = LiftScreenRules.allTemplatePath.vend
protected def allTemplateNodeSeq: NodeSeq =
<div>
<wizard:wizard_top> <div> <wizard:bind/> </div> </wizard:wizard_top>
<wizard:screen_top> <div> <wizard:bind/> </div> </wizard:screen_top>
<wizard:errors> <div> <ul> <wizard:item> <li> <wizard:bind/> </li> </wizard:item> </ul> </div> </wizard:errors>
<div> <wizard:fields>
<table>
<wizard:line>
<tr>
<td>
<wizard:label error_style="error"/> <wizard:help/> <wizard:field_errors> <ul> <wizard:error> <li> <wizard:bind/> </li> </wizard:error> </ul> </wizard:field_errors>
</td>
<td> <wizard:form/> </td>
</tr>
</wizard:line>
</table>
</wizard:fields> </div>
<div> <table> <tr> <td> <wizard:prev/> </td> <td> <wizard:cancel/> </td> <td> <wizard:next/> </td> </tr> </table> </div>
<wizard:screen_bottom> <div> <wizard:bind/> </div> </wizard:screen_bottom>
<wizard:wizard_bottom> <div> <wizard:bind/> </div> </wizard:wizard_bottom>
</div>
protected def allTemplate: NodeSeq = TemplateFinder.findAnyTemplate(allTemplatePath) openOr allTemplateNodeSeq
/**
* What additional attributes should be put on the
*/
protected def formAttrs: MetaData = scala.xml.Null
protected def finish(): Unit
protected def doFinish() {
validate match {
case Nil =>
val snapshot = createSnapshot
PrevSnapshot.set(Full(snapshot))
finish()
redirectBack()
case xs => S.error(xs)
}
}
protected def redirectBack() {
S.redirectTo(Referer.is)
}
}
trait IntField extends FieldIdentifier {
self: AbstractScreen#Field =>
type ValueType = Int
def default = 0
lazy val manifest = buildIt[Int]
def minVal(len: Int, msg: => String): Int => List[FieldError] = s =>
if (s < len) List(FieldError(this, Text(msg))) else Nil
def maxVal(len: Int, msg: => String): Int => List[FieldError] = s =>
if (s > len) List(FieldError(this, Text(msg))) else Nil
}
trait BooleanField extends FieldIdentifier {
self: AbstractScreen#Field =>
type ValueType = Boolean
def default = false
lazy val manifest = buildIt[Boolean]
}
trait StringField extends FieldIdentifier {
self: AbstractScreen#Field =>
type ValueType = String
def default = ""
lazy val manifest = buildIt[String]
def minLen(len: Int, msg: => String): String => List[FieldError] = s =>
if (s.length < len) List(FieldError(this, Text(msg))) else Nil
def maxLen(len: Int, msg: => String): String => List[FieldError] = s =>
if (s.length > len) List(FieldError(this, Text(msg))) else Nil
}
object LiftScreenRules extends Factory with FormVendor {
private def m[T](implicit man: Manifest[T]): Manifest[T] = man
val allTemplatePath: FactoryMaker[List[String]] = new FactoryMaker[List[String]](() => List("templates-hidden", "wizard-all")) {}
}
}
}
| jeppenejsum/liftweb | framework/lift-base/lift-webkit/src/main/scala/net/liftweb/http/LiftScreen.scala | Scala | apache-2.0 | 13,146 |
package minCostFlow
trait Solver {
def computeFlow(graph: Graph): Graph.Flow
}
| yfcai/tutorial-assignment | main/minCostFlow.Solver.scala | Scala | unlicense | 82 |
package fp
import java.util.concurrent.Executors._
import fp.EqualSpecification.equalLaws
import fp.MonadSpecification.monadLaws
import fp.MonoidSpecification.monoidLaws
import fp.Par._
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Prop.forAll
import org.scalacheck.{Arbitrary, Properties}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.ExecutionContext._
object ParSpecification extends Properties("Par") {
implicit def arbPar[A: Arbitrary]: Arbitrary[Par[A]] = Arbitrary {
for (a <- arbitrary[A]) yield Par.unit(a)
}
include(equalLaws[Par[Int]])
include(monoidLaws[Par[Int]])
include(monadLaws[Par])
property("fork") = forAll { (a: Par[Int]) =>
implicit val executor = fromExecutorService(newFixedThreadPool(1))
val result = a equal fork(a)
executor.shutdown()
result
}
property("parMap") = forAll { (as: Vector[Int], f: Int => Int) =>
implicit val executor = fromExecutorService(newFixedThreadPool(2))
val result = run(parMap(as)(f)) == as.map(f)
executor.shutdown()
result
}
property("parFoldMap") = forAll { (as: Vector[Int], f: Int => Int) =>
implicit val executor = fromExecutorService(newCachedThreadPool())
val result = run(parFoldMap(as)(f)) == as.foldMap(f)
executor.shutdown()
result
}
}
| adamgfraser/fp | src/test/scala/fp/ParSpecification.scala | Scala | apache-2.0 | 1,336 |
/*
* Copyright (c) 2009 Michel Alexandre Salim. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. The names of the authors may not be used to endorse or promote
* products derived from this software without specific, prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
package info.hircus.kanren
object MiniKanren {
/* Type definitions */
import java.util.HashMap
/**
* A constraint is a list of pairs, each pair consisting of a logical variable and a list of
* variables/values it is not allowed to unify with
*/
type Constraints = List[(Var, List[Any])]
/**
* This abstract class specifies the basic operations any substitution must satisfy.
*/
abstract class Subst {
/**
* Extend a substitution with a new mapping from v -> x. Might fail in some substitution implementations.
*/
def extend(v: Var, x: Any): Option[Subst]
/**
* Add a constraint for the specified variable
*/
def c_extend(v: Var, x: Any): Subst = this
/**
* Given a variable, look up its constraints
*/
def constraints(v: Var): List[Any] = Nil
/**
* Given a variable, look up its bound value
*/
def lookup(v: Var): Option[Any]
/**
* The length of a substitution, i.e. the number of var -> value mappings it contains
*/
def length: Int
/**
* Unifies two terms
* This default implementation always succeeds; substitution classes with constraints
* must override this, but may call this implementation once the unification is verified to be safe
*
* @param term1 Any value
* @param term2 Any value
* @return Some substitution
*/
def unify(term1: Any, term2: Any): Option[Subst] = {
val t1 = walk(term1, this)
val t2 = walk(term2, this)
if (t1 == t2) return Some(this)
else if (t1.isInstanceOf[Var])
return this.extend(t1.asInstanceOf[Var], t2)
else if (t2.isInstanceOf[Var])
return this.extend(t2.asInstanceOf[Var], t1)
else if (pairp(t1) && pairp(t2)) {
val ls1 = t1.asInstanceOf[(Any,Any)]
val ls2 = t2.asInstanceOf[(Any,Any)]
this.unify(ls1._1, ls2._1) match {
case None => return None
case Some(s2: Subst) =>
return s2.unify(ls1._2, ls2._2)
}
}
else if (t1 == t2) return Some(this)
else return None
}
}
import info.hircus.kanren.Substitution._
/**
* A goal is a function that, given a substitution, produces a stream of substitution.
* This stream is empty if the goal fails; otherwise, it may contain any number of
* substitutions
*/
type Goal = (Subst) => Stream[Subst]
val empty_s = EmptySubst
val empty_cs = ConstraintSubst0(Nil)
/**
* A logic variable
* It consists of two parts: a user-supplied name, and a count that is automatically incremented.
* The count makes sure that each created variable is unique.
*/
case class Var(name: Symbol, count: Int)
private val m = new HashMap[Symbol, Int]()
/**
* Creates a logic variable, with the requested name, and a count that is automatically incremented
*
* @param name The name of the variable
* @return a logic variable
*/
def make_var(name: Symbol) = {
val count = m.get(name)
m.put(name, count+1)
Var(name, count)
}
/* Monads */
/**
* A goal that always succeeds, returning a stream containing only its input substitution
*/
def succeed: Goal = { s: Subst =>
Stream.cons(s, Stream.empty)
}
/**
* A goal that always fails, returning an empty stream of substitution
*/
def fail: Goal = { s: Subst => Stream.empty }
def pairp(x: Any): Boolean =
x.isInstanceOf[(Any,Any)]
/*
* (define walk
* (lambda (v s)
* (cond
* ((var? v)
* (cond
* ((assq v s) =>
* (lambda (a)
* (let ((v^ (rhs a)))
* (walk v^ s))))
* (else v)))
* (else v))))
*
*
*/
def walk(v: Any, s: Subst): Any =
if (v.isInstanceOf[Var]) s.lookup(v.asInstanceOf[Var]) match {
case Some(x) => walk(x, s)
case None => v
} else v
/*
* (define walk*
* (lambda (v s)
* (let ((v (walk v s)))
* (cond
* ((var? v) v)
* ((pair? v)
* (cons
* (walk* (car v) s)
* (walk* (cdr v) s)))
* (else v)))))
*/
def walk_*(v: Any, s: Subst): Any = {
val v1 = walk(v, s)
if (v1.isInstanceOf[Var]) v1
else if (pairp(v1)) {
val ls = v1.asInstanceOf[(Any,Any)]
(walk_*(ls._1, s), walk_*(ls._2, s))
} else v1
}
/* (define reify-s
* (lambda (v s)
* (let ((v (walk v s)))
* (cond
* ((var? v) (ext-s v (reify-name (size-s s)) s))
* ((pair? v) (reify-s (cdr v) (reify-s (car v) s)))
* (else s)))))
*
* (define reify-name
* (lambda (n)
* (string->symbol
* (string-append "_" "." (number->string n)))))
*/
def reify_name(n: Int) =
Symbol("_." + n)
def reify_s(v: Any, s: Subst): Subst= {
val v1 = walk(v, s)
if (v1.isInstanceOf[Var])
s.extend(v1.asInstanceOf[Var], reify_name(s.length)) match {
case Some(s1) => s1
/* never happens as reification does not use any constraints
* but the compiler does not know that
*/
case _ => s
}
else if (pairp(v1)) {
val ls = v1.asInstanceOf[(Any,Any)]
reify_s(ls._2, reify_s(ls._1, s))
} else s
}
/* (define reify
* (lambda (v)
* (walk* v (reify-s v empty-s))))
*/
def reify(v: Any) = walk_*(v, reify_s(v, empty_s))
/* Logic system */
/* (define bind
* (lambda (a-inf g)
* (case-inf a-inf
* (mzero)
* ((a) (g a))
* ((a f) (mplus (g a)
* (lambdaf@ () (bind (f) g)))))))
*/
def bind(a_inf: Stream[Subst], g: Goal): Stream[Subst] =
a_inf flatMap g
def bind_i(a_inf: Stream[Subst], g: Goal): Stream[Subst] =
a_inf match {
case Stream.empty => a_inf
case Stream.cons(a, f) => f match {
case Stream.empty => g(a)
case _ => mplus_i(g(a), bind(f, g))
}
}
/* (define mplus
* (lambda (a-inf f)
* (case-inf a-inf
* (f)
* ((a) (choice a f))
* ((a f0) (choice a
* (lambdaf@ () (mplus (f0) f)))))))
*/
def mplus(a_inf: Stream[Subst],
f: => Stream[Subst]): Stream[Subst] =
a_inf append f
/**
* Like mplus, but interleaves the two input streams
* Allows a goal to proceed even if the first subgoal is bottom
*
* @param a_inf a stream of substitutions
* @param f a second stream of substitutions to append
* @return an interleaved stream of substitutions
*/
def mplus_i(a_inf: Stream[Subst],
f: => Stream[Subst]): Stream[Subst] = a_inf match {
case Stream.empty => f
case Stream.cons(a, f0) => f0 match {
case Stream.empty => Stream.cons(a, f)
case _ => Stream.cons(a, mplus_i(f, f0))
}
}
/* (define-syntax anye
* (syntax-rules ()
* ((_ g1 g2)
* (lambdag@ (s)
* (mplus (g1 s)
* (lambdaf@ () (g2 s)))))))
*/
def any_e(g1: Goal, g2: Goal): Goal = { s: Subst =>
mplus(g1(s), g2(s)) }
/* (define-syntax all
* (syntax-rules ()
* ((_) succeed)
* ((_ g) (lambdag@ (s) (g s)))
* ((_ g^ g ...) (lambdag@ (s) (bind (g^ s) (all g ...))))))
*/
def all_aux(bindfn: (Stream[Subst], Goal) => Stream[Subst])(gs: Goal*): Goal = {
gs.toList match {
case Nil => succeed
case g :: Nil => g
case g :: gs2 =>
{ s: Subst => bindfn(g(s), all(gs2: _*)) }
}
}
def all = all_aux(bind) _
def all_i = all_aux(bind_i) _
/**
* Faster than all, if only two goals are used
*/
def both(g0: Goal, g1: Goal): Goal = { s: Subst =>
g0(s) flatMap g1 }
/* (define-syntax ife
* (syntax-rules ()
* ((_ g0 g1 g2)
* (lambdag@ (s)
* (mplus ((all g0 g1) s)
* (lambdaf@ () (g2 s)))))))
*/
/**
* if_e produces a goal that, given a substitution, produces a stream of substitutions
* starting with the result of running a combination of the first two goals on the substitution,
* followed by running the alternate goal.
*
* @param testg The first, 'test' goal. Guards the consequent
* @param conseqg The 'consequent' goal
* @param altg The alternate goal. Call-by-name as otherwise, in a situation with many nested if_e
* (e.g. using any_o), the stack overflows.
*/
def if_e(testg: Goal, conseqg: =>Goal, altg: =>Goal): Goal = {
s: Subst =>
mplus(both(testg, conseqg)(s),
altg(s))
}
def if_i(testg: Goal, conseqg: =>Goal, altg: =>Goal): Goal = {
s: Subst =>
mplus_i(both(testg, conseqg)(s),
altg(s))
}
def if_a(testg: Goal, conseqg: =>Goal, altg: =>Goal): Goal = {
s: Subst => {
val s_inf = testg(s)
s_inf match {
case Stream.empty => altg(s)
case Stream.cons(s_1, s_inf_1) => s_inf_1 match {
case Stream.empty => conseqg(s_1)
case _ => bind(s_inf, conseqg) } }
} }
def if_u(testg: Goal, conseqg: =>Goal, altg: =>Goal): Goal = {
s: Subst => {
testg(s) match {
case Stream.empty => altg(s)
case Stream.cons(s_1, s_inf) => conseqg(s_1) }
} }
def cond_aux(ifer: (Goal, =>Goal, =>Goal) => Goal)(gs: (Goal,Goal)*): Goal =
{ gs.toList match {
case Nil => fail
case (g0, g1) :: gs2 => gs2 match {
case Nil => both(g0, g1)
case _ => ifer(g0, g1,
cond_aux(ifer)(gs2: _*))
} } }
def cond_e = cond_aux(if_e _) _
def cond_i = cond_aux(if_i _) _
def cond_a = cond_aux(if_a _) _
def cond_u = cond_aux(if_u _) _
class Unifiable(a: Any) {
def ===(b: Any): Goal = mkEqual(a, b)
def =/=(b: Any): Goal = neverEqual(a, b)
}
implicit def unifiable(a: Any) = new Unifiable(a)
def mkEqual(t1: Any, t2: Any): Goal = { s: Subst => {
s.unify(t1, t2) match {
case Some(s2) => succeed(s2)
case None => fail(s) // does not matter which substitution
}
} }
def neverEqual(t1: Any, t2: Any): Goal = { s: Subst => {
val v1 = walk(t1, s)
val v2 = walk(t2, s)
if (v1 == v2) fail(s)
else {
val s1 = if (v1.isInstanceOf[Var]) s.c_extend(v1.asInstanceOf[Var], v2) else s
val s2 = if (v2.isInstanceOf[Var]) s1.c_extend(v2.asInstanceOf[Var], v1) else s1
succeed(s2)
}
} }
/* (define-syntax run
* (syntax-rules ()
* ((_ n^ (x) g ...)
* (let ((n n^) (x (var 'x)))
* (if (or (not n) (> n 0))
* (map-inf n
* (lambda (s) (reify (walk* x s)))
* ((all g ...) empty-s))
* '())))))
*/
/**
* Runs the given goals and produce up to n results for the specified variable
*
* @param n max number of results. A negative number specifies that all available results should be returned
* @param v the variable to be inspected
* @param g0 a goal; multiple goals might be specified
*/
def run(n: Int, v: Var) = run_aux(n, v, empty_s) _
def crun(n: Int, v: Var) = run_aux(n, v, empty_cs) _
def maprun(n: Int, v: Var) = run_aux(n, v, empty_msubst) _
def cljrun(n: Int, v: Var) = run_aux(n, v, empty_cljsubst) _
private def run_aux(n: Int, v: Var, subst: Subst)(g0: Goal, gs: Goal*): List[Any] = {
val g = gs.toList match {
case Nil => g0
case gls => all((g0::gls): _*)
}
val allres = g(subst) map {s: Subst => reify(walk_*(v, s)) }
(if (n < 0) allres else (allres take n)) toList
}
}
| michel-slm/minikanren-scala | src/info/hircus/kanren/MiniKanren.scala | Scala | bsd-3-clause | 12,813 |
import scala.annotation._
// also works with subsets of {@annotation.meta.field @annotation.meta.getter @annotation.meta.setter}
class baz(out: Foo => Int) extends StaticAnnotation
class Foo {
@baz(out = _.value) val value: Int = 5
}
| lrytz/scala | test/files/pos/t10497.scala | Scala | apache-2.0 | 238 |
import sbt._
class UtilProject(info: ProjectInfo) extends DefaultProject(info) {
override def managedStyle = ManagedStyle.Maven
val scalaTools = "org.scala-lang" % "scala-compiler" % "2.8.0" % "compile"
override def filterScalaJars = false
val lagRepo = "lag.net" at "http://www.lag.net/repo/"
val lagNest = "lag.net/nest" at "http://www.lag.net/nest/"
val guava = "com.google.guava" % "guava" % "r06"
val commonsCollections = "commons-collections" % "commons-collections" % "3.2.1"
val mockito = "org.mockito" % "mockito-all" % "1.8.5" % "test" withSources()
val specs = "org.scala-tools.testing" %% "specs" % "1.6.5" % "test" withSources()
val junit = "junit" % "junit" % "3.8.2" % "test"
val vscaladoc = "org.scala-tools" % "vscaladoc" % "1.1-md-3" % "provided"
override def compileOptions = super.compileOptions ++ Seq(Unchecked) ++
compileOptions("-encoding", "utf8") ++
compileOptions("-deprecation")
}
| mccv/util | project/build/Project.scala | Scala | apache-2.0 | 947 |
package org.infinispan.spark.domain
/**
* @author gustavonalle
*/
case class Runner(name: String, finished: Boolean, finishTimeSeconds: Int, age: Int)
| rnowling/infinispan-spark | src/test/scala/org/infinispan/spark/domain/Runner.scala | Scala | apache-2.0 | 155 |
package com.arcusys.valamis.lesson.scorm.model
import com.arcusys.valamis.lesson.scorm.model.manifest._
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
class PostConditionRuleTest extends FlatSpec with ShouldMatchers {
val condition1 = new ConditionRuleItem(ConditionType.ActivityAttempted)
val condition2 = new ConditionRuleItem(ConditionType.ObjectiveStatusKnown)
val conditions = Seq(condition1, condition2)
"Post-condition rule" can "be constructed" in {
val set = new RuleConditionSet(conditions, ConditionCombination.All)
val rule = new PostConditionRule(set, PostConditionAction.Continue)
rule.conditions should equal(set)
rule.action should equal(PostConditionAction.Continue)
}
}
| igor-borisov/valamis | valamis-scorm-lesson/src/test/scala/com/arcusys/valamis/lesson/scorm/model/PostConditionRuleTest.scala | Scala | gpl-3.0 | 745 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top.template
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.base.Constructor
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.types.AnnotType
/**
* @author Alexander Podkhalyuzin
* Date: 08.02.2008
*/
/*
* TemplateParents ::= Constr {with AnnotType}
*/
object ClassParents {
def parse(builder: ScalaPsiBuilder): Boolean = {
val classParentsMarker = builder.mark
if (!Constructor.parse(builder)) {
classParentsMarker.drop()
return false
}
//Look for mixin
while (builder.getTokenType == ScalaTokenTypes.kWITH) {
builder.advanceLexer() //Ate with
if (!AnnotType.parse(builder, isPattern = false)) {
builder error ScalaBundle.message("wrong.simple.type")
classParentsMarker.done(ScalaElementTypes.CLASS_PARENTS)
return true
}
}
classParentsMarker.done(ScalaElementTypes.CLASS_PARENTS)
true
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/top/template/ClassParents.scala | Scala | apache-2.0 | 1,130 |
package edu.berkeley.velox.server
import org.apache.curator.framework.api.CuratorWatcher
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.WatchedEvent
import edu.berkeley.velox.catalog.Catalog
import edu.berkeley.velox.util.zk.DistributedCountdownLatch
import scala.collection.JavaConverters._
import org.apache.curator.utils.ZKPaths
import edu.berkeley.velox.server.{ZookeeperConnectionUtils => ZKUtils}
import com.typesafe.scalalogging.slf4j.Logging
import edu.berkeley.velox.trigger.TriggerManager
// TODO(crankshaw) this file has _A LOT_ of duplicated code
// Server Watchers
class ServerDBWatcher(client: CuratorFramework,
schemaChangeBarrier: DistributedCountdownLatch) extends CuratorWatcher with Logging {
override def process(event: WatchedEvent) {
// Find name of new DB and re-add watcher
val catalogDBs = client.getChildren.usingWatcher(new ServerDBWatcher(client, schemaChangeBarrier)).forPath(ZKUtils.CATALOG_ROOT)
.asScala
.toSet
val localDBs = Catalog.listLocalDatabases
val diff = catalogDBs -- localDBs
logger.error(s"SERVER: local: $localDBs, catalog: $catalogDBs, diff: $diff")
if (diff.size == 1) {
val newDBName = diff.toList(0)
Catalog._createDatabaseTrigger(newDBName)
client.getChildren.usingWatcher(new ServerTableWatcher(newDBName, client, schemaChangeBarrier))
.forPath(ZKUtils.makeDBPath(newDBName))
} else if (diff.size == 0) {
// we already know about all the databases in the catalog.
logger.warn(s"Server DB watcher activated but all dbs accounted for: $catalogDBs")
} else {
throw new IllegalStateException(s"DB schema addition issue. DIFF = ${diff.mkString(",")}")
}
// set table watcher on new database
schemaChangeBarrier.decrement()
}
}
/**
* Watches for changes to the tables of a specific database
* @param dbname The database to watch
*/
class ServerTableWatcher(dbname: String,
client: CuratorFramework,
schemaChangeBarrier: DistributedCountdownLatch) extends CuratorWatcher with Logging{
override def process(event: WatchedEvent) {
// Find name of new DB and re-add watcher
val catalogTables = client.getChildren.usingWatcher(new ServerTableWatcher(dbname, client, schemaChangeBarrier))
.forPath(ZKPaths.makePath(ZKUtils.CATALOG_ROOT, dbname))
.asScala
.toSet
val localTables = Catalog.listLocalTables(dbname)
val diff = catalogTables -- localTables
if (diff.size == 1) {
val newTableName = diff.toList(0)
val schemaBytes = client.getData.forPath(ZKUtils.makeTablePath(dbname, newTableName))
Catalog._createTableTrigger(dbname, newTableName, ZKUtils.bytesToSchema(schemaBytes))
} else if (diff.size == 0) {
// we already know about all the tables in the catalog.
logger.warn(s"Server Table watcher activated but all tables accounted for: $dbname, $catalogTables")
} else {
// TODO how should we handle this error?
throw new IllegalStateException(s"Table Schema addition issue: DIFF = ${diff.mkString(",")}")
}
schemaChangeBarrier.decrement()
}
} // end TableWatcher
// Watches for new triggers.
// (db, table) watches a specific db.table for new triggers
// (db, null) watches a specific db for new tables
// (null, null) watches for new dbs
class TriggerWatcher(client: CuratorFramework,
schemaChangeBarrier: DistributedCountdownLatch,
dbName: String = null,
tblName: String = null) extends CuratorWatcher {
override def process(event: WatchedEvent) {
if (dbName != null && tblName != null) {
// Find names of new triggers of a single table, and re-add new trigger watcher
val newTriggers = client.getChildren.usingWatcher(new TriggerWatcher(client, schemaChangeBarrier, dbName, tblName))
.forPath(ZKUtils.makePath(ZKUtils.TRIGGER_ROOT, dbName, tblName)).asScala.toSet
processNewTriggers(dbName, tblName, TriggerManager.getNewTriggers(dbName, tblName, newTriggers))
} else if (dbName != null) {
// Find names of new tables of a single db, and re-add new table watcher
val newTables = client.getChildren.usingWatcher(new TriggerWatcher(client, schemaChangeBarrier, dbName, null))
.forPath(ZKUtils.makePath(ZKUtils.TRIGGER_ROOT, dbName)).asScala.toSet
processNewTables(dbName, TriggerManager.getNewTables(dbName, newTables))
} else {
// Find names of new DBs, and re-add new DB watcher
val newDBs = client.getChildren.usingWatcher(new TriggerWatcher(client, schemaChangeBarrier, null, null))
.forPath(ZKUtils.TRIGGER_ROOT).asScala.toSet
processNewDBs(TriggerManager.getNewDBs(newDBs))
}
schemaChangeBarrier.decrement()
}
private def processNewDBs(dbs: Set[String]) {
dbs.foreach(newDB => {
val newTables = client.getChildren.usingWatcher(new TriggerWatcher(client, schemaChangeBarrier, newDB, null))
.forPath(ZKUtils.makePath(ZKUtils.TRIGGER_ROOT, newDB))
.asScala.toSet
processNewTables(newDB, TriggerManager.getNewTables(newDB, newTables))
})
}
private def processNewTables(db: String, tables: Set[String]) {
tables.foreach(newTable => {
val newTriggers = client.getChildren.usingWatcher(new TriggerWatcher(client, schemaChangeBarrier, db, newTable))
.forPath(ZKUtils.makePath(ZKUtils.TRIGGER_ROOT, db, newTable))
.asScala.toSet
processNewTriggers(db, newTable, TriggerManager.getNewTriggers(db, newTable, newTriggers))
})
}
private def processNewTriggers(db: String, table: String, triggers: Set[String]) {
triggers.foreach(newTrigger => {
val triggerBytes = client.getData.forPath(ZKUtils.makePath(ZKUtils.TRIGGER_ROOT, db, table, newTrigger))
TriggerManager._addTrigger(db, table, newTrigger, triggerBytes)
})
}
} // end TriggerWatcher
| pbailis/fast-tpcc-repo | core/src/main/scala/edu/berkeley/velox/server/SchemaWatchers.scala | Scala | apache-2.0 | 5,950 |
package weld
class WeldError extends WeldManaged(WeldJNI.weld_error_new()) {
override protected def doClose(): Unit = {
WeldJNI.weld_error_free(handle)
}
override protected def cleaner = new WeldError.Cleaner(handle)
def code: Int = {
checkAccess()
WeldJNI.weld_error_code(handle)
}
def message: String = {
checkAccess()
WeldJNI.weld_error_message(handle)
}
}
object WeldError {
private[weld] class Cleaner(handle: Long) extends Runnable {
override def run(): Unit = WeldJNI.weld_error_free(handle)
}
}
class WeldException(val code: Int, message: String) extends RuntimeException(message) {
def this(message: String) = this(-1, message)
def this(error: WeldError, code: Option[String] = None) =
this(error.code, error.message + code.map("\\n" + _).getOrElse(""))
}
| hvanhovell/weld-java | src/main/scala/weld/WeldError.scala | Scala | bsd-3-clause | 820 |
/*
* Copyright (C) 2015 Cotiviti Labs (nexgen.admin@cotiviti.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.signalcollect.triplerush.dictionary
import java.io.{ FileInputStream, InputStream }
import java.util.concurrent.Executors
import java.util.zip.GZIPInputStream
import scala.collection.JavaConversions.asScalaIterator
import scala.util.Random
import org.apache.jena.graph.{ Triple => JenaTriple }
import org.apache.jena.riot.{ Lang, RDFDataMgr }
import org.apache.jena.riot.lang.{ PipedRDFIterator, PipedTriplesStream }
import org.mapdb.DBMaker
object DictionaryBenchmark extends App {
val prefix = "http://www.signalcollect.com/triplerush#"
val suffixLength = 10
def generateSuffix(length: Int): String = {
Random.alphanumeric.take(length).mkString
}
def generateString: String = {
new java.lang.StringBuilder(prefix.length + suffixLength).append(prefix).append(generateSuffix(suffixLength)).toString
}
val string2IdNodeSize = 128
val id2StringNodeSize = 32
val asyncQueueSize = 4096
// val dbMaker = DBMaker
// .memoryUnsafeDB
// .closeOnJvmShutdown
// .transactionDisable
// .asyncWriteEnable
// .asyncWriteQueueSize(asyncQueueSize)
// .compressionEnable
val warmupStrings = 10
val timedStrings = 100000000
val maxId = warmupStrings + timedStrings
val dictionary = new HashDictionary()//, dbMaker
val startTime = System.currentTimeMillis
val inputStream = new FileInputStream(args(0))
val gzipInputStream: InputStream = new GZIPInputStream(inputStream)
val tripleIterator = new PipedRDFIterator[JenaTriple]
val sink = new PipedTriplesStream(tripleIterator)
val executor = Executors.newSingleThreadExecutor
val parser = new Runnable {
def run: Unit = RDFDataMgr.parse(sink, gzipInputStream, Lang.NTRIPLES)
}
executor.submit(parser)
var triplesAdded = 0
val stringIterator = tripleIterator.flatMap { triple =>
triplesAdded += 1
if (triplesAdded % 10000 == 0) {
val seconds = ((System.currentTimeMillis - startTime) / 100.0).round / 10.0
println(s"$triplesAdded triples loaded after $seconds seconds: $dictionary")
}
List(triple.getSubject.toString, triple.getPredicate.toString, triple.getObject.toString(true))
}
addStrings(warmupStrings, generatingIterator())
addStrings(timedStrings, stringIterator,
Some(s"PUTS: id2StringNodeSize=$id2StringNodeSize string2IdNodeSize=$string2IdNodeSize asyncQueueSize=$asyncQueueSize"))
println(dictionary)
tripleIterator.close
gzipInputStream.close
inputStream.close
executor.shutdownNow
def generatingIterator(size: Int = Int.MaxValue): Iterator[String] = {
val startTime = System.currentTimeMillis
new Iterator[String] {
var count = 0
def next = {
count += 1
if (count % 10000 == 0) {
val seconds = ((System.currentTimeMillis - startTime) / 100.0).round / 10.0
println(s"Generating iterator: $count strings loaded after $seconds seconds")
}
generateString
}
def hasNext: Boolean = {
count < size
}
}
}
def addStrings(howMany: Int, iter: Iterator[String], timed: Option[String] = None): Unit = {
def run(s: Iterator[String]): Unit = {
var i = 0
while (i < howMany) {
dictionary(s.next)
i += 1
}
}
timed match {
case Some(name) =>
println(s"Adding $howMany entries ...")
time {
run(iter)
}(name, Some(howMany))
case None =>
run(iter)
}
}
def time[R](code: => R)(name: String = "Time", entries: Option[Int] = None): R = {
val start = System.currentTimeMillis
val result = code
val end = System.currentTimeMillis
val time = end - start
println(s"$name: $time ms")
entries.map { e =>
val msPerEntry = time.toDouble / e
println(s"$msPerEntry ms per entry")
}
result
}
}
| uzh/triplerush | src/test/scala/com/signalcollect/triplerush/dictionary/DictionaryBenchmark.scala | Scala | apache-2.0 | 4,454 |
package org.jetbrains.plugins.scala
package lang
package psi
import com.intellij.lang.ASTNode
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.codeStyle.CodeStyleManager
import com.intellij.psi.impl.source.codeStyle.CodeEditUtil
import com.intellij.psi.scope._
import com.intellij.psi.stubs.StubElement
import org.jetbrains.plugins.scala.editor.importOptimizer._
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, _}
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScReferencePattern
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlockStatement
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScPatternDefinition, ScTypeAliasDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportExprUsed, ImportSelectorUsed, ImportUsed, ImportWildcardSelectorUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportSelector, ScImportStmt}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScDesignatorType
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
trait ScImportsHolder extends ScalaPsiElement {
def getImportStatements: Seq[ScImportStmt] = {
this match {
case s: ScalaStubBasedElementImpl[_, _] =>
val stub: StubElement[_] = s.getStub
if (stub != null) {
return stub.getChildrenByType(ScalaElementTypes.IMPORT_STMT, JavaArrayFactoryUtil.ScImportStmtFactory).toSeq
}
case _ =>
}
findChildrenByClassScala(classOf[ScImportStmt]).toSeq
}
override def processDeclarations(processor: PsiScopeProcessor,
state : ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (lastParent != null) {
var run = ScalaPsiUtil.getPrevStubOrPsiElement(lastParent)
// updateResolveCaches()
while (run != null) {
ProgressManager.checkCanceled()
if (run.isInstanceOf[ScImportStmt] &&
!run.processDeclarations(processor, state, lastParent, place)) return false
run = ScalaPsiUtil.getPrevStubOrPsiElement(run)
}
}
true
}
def getImportsForLastParent(lastParent: PsiElement): Seq[ScImportStmt] = {
val buffer: ArrayBuffer[ScImportStmt] = new ArrayBuffer[ScImportStmt]()
if (lastParent != null) {
var run = ScalaPsiUtil.getPrevStubOrPsiElement(lastParent)
while (run != null) {
ProgressManager.checkCanceled()
run match {
case importStmt: ScImportStmt => buffer += importStmt
case _ =>
}
run = ScalaPsiUtil.getPrevStubOrPsiElement(run)
}
}
buffer.toVector
}
def getAllImportUsed: mutable.Set[ImportUsed] = {
val res: mutable.Set[ImportUsed] = new mutable.HashSet[ImportUsed]
def processChild(element: PsiElement) {
for (child <- element.getChildren) {
child match {
case imp: ScImportExpr =>
if (/*!imp.singleWildcard && */imp.selectorSet.isEmpty) {
res += ImportExprUsed(imp)
}
else if (imp.isSingleWildcard) {
res += ImportWildcardSelectorUsed(imp)
}
for (selector <- imp.selectors) {
res += ImportSelectorUsed(selector)
}
case _ => processChild(child)
}
}
}
processChild(this)
res
}
def importStatementsInHeader: Seq[ScImportStmt] = {
val buf = new ArrayBuffer[ScImportStmt]
for (child <- getChildren) {
child match {
case x: ScImportStmt => buf += x
case p: ScPackaging if !p.isExplicit && buf.isEmpty => return p.importStatementsInHeader
case _: ScTypeDefinition | _: ScPackaging => return buf.toVector
case _ =>
}
}
buf.toVector
}
def addImportForClass(clazz: PsiClass, ref: PsiElement = null) {
ref match {
case ref: ScReferenceElement =>
if (!ref.isValid || ref.isReferenceTo(clazz)) return
ref.bind().foreach {
case ScalaResolveResult(t: ScTypeAliasDefinition, _) if t.typeParameters.isEmpty =>
t.aliasedType.foreach {
case ScDesignatorType(c: PsiClass) if c == clazz => return
case _ =>
}
case ScalaResolveResult(c: PsiClass, _) if c.qualifiedName == clazz.qualifiedName => return
case ScalaResolveResult.withActual(p: ScReferencePattern) =>
p.nameContext match {
case ContainingClass(o: ScObject) if Set("scala.Predef", "scala").contains(o.qualifiedName) => return
case ScPatternDefinition.expr(ResolvesTo(`clazz`)) => return
case _ =>
}
case _ =>
}
case _ =>
}
addImportForPath(clazz.qualifiedName, ref)
}
def addImportForPsiNamedElement(elem: PsiNamedElement, ref: PsiElement, cClass: Option[PsiClass] = None) {
def needImport = ref match {
case null => true
case ref: ScReferenceElement => ref.isValid && !ref.isReferenceTo(elem)
case _ => false
}
if (needImport) {
cClass match {
case Some(clazz) =>
val qualName = clazz.qualifiedName
if (qualName != null) {
addImportForPath(qualName + "." + elem.name, ref)
}
case _ =>
val qualName = ScalaNamesUtil.qualifiedName(elem).orNull
if (qualName != null) {
addImportForPath(qualName, ref)
}
}
}
}
def addImportsForPaths(paths: Seq[String], refsContainer: PsiElement = null): Unit = {
import ScalaImportOptimizer._
implicit val manager: PsiManager = getManager
def samePackage(path: String) = {
val ref = createReferenceFromText(path)
val pathQualifier = Option(ref).flatMap(_.qualifier.map(_.getText)).getOrElse("")
val ourPackageName = this.parentOfType(classOf[ScPackaging], strict = false)
.map(_.fullPackageName)
ourPackageName.contains(pathQualifier)
}
def firstChildNotCommentWhitespace =
this.children.dropWhile(el => el.isInstanceOf[PsiComment] || el.isInstanceOf[PsiWhiteSpace]).headOption
firstChildNotCommentWhitespace.foreach {
case pack: ScPackaging if !pack.isExplicit && this.children.filterByType[ScImportStmt].isEmpty =>
pack.addImportsForPaths(paths, refsContainer)
return
case _ =>
}
val file = this.getContainingFile match {
case sf: ScalaFile => sf
case _ => return
}
val settings = OptimizeImportSettings(getProject)
val optimizer: ScalaImportOptimizer = findOptimizerFor(file) match {
case Some(o: ScalaImportOptimizer) => o
case _ => return
}
val place = getImportStatements.lastOption.getOrElse(getFirstChild.getNextSibling)
val importInfosToAdd = paths
.filterNot(samePackage)
.flatMap(createInfoFromPath(_, place))
.filter(hasValidQualifier(_, place))
val importRanges = optimizer.collectImportRanges(this, createInfo(_), Set.empty)
val needToInsertFirst =
if (importRanges.isEmpty) true
else refsContainer == null && hasCodeBeforeImports
if (needToInsertFirst) {
val dummyImport = createImportFromText("import dummy.dummy")
val usedNames = collectUsedImportedNames(this)
val inserted = insertFirstImport(dummyImport, getFirstChild).asInstanceOf[ScImportStmt]
val psiAnchor = PsiAnchor.create(inserted)
val rangeInfo = RangeInfo(psiAnchor, psiAnchor, importInfosToAdd, usedImportedNames = usedNames, isLocal = false)
val infosToAdd = optimizedImportInfos(rangeInfo, settings)
optimizer.replaceWithNewImportInfos(rangeInfo, infosToAdd, settings, file)
}
else {
val sortedRanges = importRanges.toSeq.sortBy(_.startOffset)
val selectedRange =
if (refsContainer != null && ScalaCodeStyleSettings.getInstance(getProject).isAddImportMostCloseToReference)
sortedRanges.reverse.find(_.endOffset < refsContainer.getTextRange.getStartOffset)
else sortedRanges.headOption
selectedRange match {
case Some(rangeInfo @ (RangeInfo(rangeStart, _, infosFromRange, _, _))) =>
val resultInfos = insertImportInfos(importInfosToAdd, infosFromRange, rangeStart, settings)
optimizer.replaceWithNewImportInfos(rangeInfo, resultInfos, settings, file)
case _ =>
}
}
}
def addImportForPath(path: String, ref: PsiElement = null): Unit = {
addImportsForPaths(Seq(path), ref)
}
private def hasValidQualifier(importInfo: ImportInfo, place: PsiElement): Boolean = {
val ref = createReferenceFromText(importInfo.prefixQualifier, this, place)
ref.multiResolve(false).nonEmpty
}
private def createInfoFromPath(path: String, place: PsiElement): Seq[ImportInfo] = {
val importText = s"import ${ScalaNamesUtil.escapeKeywordsFqn(path)}"
val importStmt = createImportFromTextWithContext(importText, this, place)
ScalaImportOptimizer.createInfo(importStmt)
}
private def hasCodeBeforeImports: Boolean = {
val firstChild = childBeforeFirstImport.getOrElse(getFirstChild)
var nextChild = firstChild
while (nextChild != null) {
nextChild match {
case _: ScImportStmt => return false
case _: ScBlockStatement => return true
case _ => nextChild = nextChild.getNextSibling
}
}
true
}
protected def insertFirstImport(importSt: ScImportStmt, first: PsiElement): PsiElement = {
childBeforeFirstImport match {
case Some(elem) if first != null && elem.getTextRange.getEndOffset > first.getTextRange.getStartOffset =>
addImportAfter(importSt, elem)
case _ =>
addImportBefore(importSt, first)
}
}
protected def indentLine(element: PsiElement): Unit = {
val indent = CodeStyleManager.getInstance(getProject).getLineIndent(getContainingFile, element.getTextRange.getStartOffset)
if (indent == null) return
//it's better to work with psi on this stage
element.getPrevSibling match {
case ws: PsiWhiteSpace =>
val oldTextNoIndent = ws.getText.reverse.dropWhile(c => c == ' ' || c == '\\t').reverse
val newText = oldTextNoIndent + indent
if (newText != ws.getText) {
val indented = ScalaPsiElementFactory.createNewLine(newText)
ws.replace(indented)
}
case _ =>
if (!indent.isEmpty) {
val indented = ScalaPsiElementFactory.createNewLine(s"$indent")
addBefore(indented, element)
}
}
}
protected def childBeforeFirstImport: Option[PsiElement] = {
Option(getNode.findChildByType(ScalaTokenTypes.tLBRACE)).map(_.getPsi)
}
def addImport(element: PsiElement): PsiElement = {
CodeEditUtil.addChildren(getNode, element.getNode, element.getNode, null).getPsi
}
def addImportBefore(element: PsiElement, anchor: PsiElement): PsiElement = {
val anchorNode = anchor.getNode
val result = CodeEditUtil.addChildren(getNode, element.getNode, element.getNode, anchorNode).getPsi
indentLine(result)
result
}
def addImportAfter(element: PsiElement, anchor: PsiElement): PsiElement = {
if (anchor.getNode == getNode.getLastChildNode) return addImport(element)
addImportBefore(element, anchor.getNode.getTreeNext.getPsi)
}
def plainDeleteImport(stmt: ScImportExpr) {
stmt.deleteExpr()
}
def plainDeleteSelector(sel: ScImportSelector) {
sel.deleteSelector()
}
def deleteImportStmt(stmt: ScImportStmt) {
def remove(node: ASTNode) = getNode.removeChild(node)
def shortenWhitespace(node: ASTNode) {
if (node == null) return
if (node.getText.count(_ == '\\n') >= 2) {
val nl = createNewLine(node.getText.replaceFirst("[\\n]", ""))(getManager)
getNode.replaceChild(node, nl.getNode)
}
}
def removeWhitespace(node: ASTNode) {
if (node == null) return
if (node.getPsi.isInstanceOf[PsiWhiteSpace]) {
if (node.getText.count(_ == '\\n') < 2) remove(node)
else shortenWhitespace(node)
}
}
def removeSemicolonAndWhitespace(node: ASTNode) {
if (node == null) return
if (node.getElementType == ScalaTokenTypes.tSEMICOLON) {
removeWhitespace(node.getTreeNext)
remove(node)
}
else removeWhitespace(node)
}
val node = stmt.getNode
val next = node.getTreeNext
val prev = node.getTreePrev
removeSemicolonAndWhitespace(next)
remove(node)
shortenWhitespace(prev)
}
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/ScImportsHolder.scala | Scala | apache-2.0 | 13,347 |
package com.tpl.lib.gui.config
class GuiConfig(name: String, modId: String)
extends LabelWidgetLoading
with PlayerInventoryLoading
with PowerGaugeLoading
with FluidGaugeLoading
with JsonLoading
{
loadConfig(name, modId)
val widgets = data("widgets").asInstanceOf[Map[String, Any]]
val textures = new TextureGuiConfig(modId)
val window = new WindowGuiConfig(data("window").asInstanceOf[Map[String, Any]], textures, modId)
}
| piotrb/hamcraft | src/main/scala/com/tpl/lib/gui/config/GuiConfig.scala | Scala | bsd-2-clause | 446 |
package scorex.lagonaki
import java.net.InetSocketAddress
import akka.actor.{ActorRef, ActorSystem}
import akka.testkit.{ImplicitSender, TestKitBase, TestProbe}
import akka.util.Timeout
import org.scalamock.scalatest.PathMockFactory
import org.scalatest.Matchers
import scorex.app.Application
import scorex.block.Block
import scorex.block.Block._
import scorex.consensus.ConsensusModule
import scorex.network.NetworkController.{DataFromPeer, RegisterMessagesHandler, SendToNetwork}
import scorex.network.message.{BasicMessagesRepo, Message, MessageSpec}
import scorex.network.{ConnectedPeer, SendToChosen}
import scorex.transaction.TransactionModule
import scala.concurrent.duration._
import scala.language.{implicitConversions, postfixOps}
abstract class ActorTestingCommons extends TestKitBase
with org.scalatest.path.FreeSpecLike
with Matchers
with ImplicitSender
with PathMockFactory {
protected implicit val testTimeout = Timeout(500 milliseconds)
protected val testDuration = testTimeout.duration
implicit final lazy val system = ActorSystem(getClass.getSimpleName)
protected lazy val networkController = TestProbe("NetworkController")
protected def networkControllerMock = networkController.ref
networkController.ignoreMsg {
case RegisterMessagesHandler(_, _) => true
}
protected final def testSafely(fun: => Unit): Unit = getClass.getSimpleName testSafely fun
protected final class ActorTestingStringWrapper(s: String) {
def testSafely(fun: => Unit): Unit = {
s - {
try {
fun
} finally {
try verifyExpectations
finally shutdown()
}
}
}
}
protected final implicit def convertTo(s: String): ActorTestingStringWrapper = new ActorTestingStringWrapper(s)
protected val peerId = 9977
protected lazy val peerHandler = TestProbe("PeerHandler")
protected lazy val peer = ConnectedPeer(new InetSocketAddress(peerId), peerHandler.ref)
protected val actorRef: ActorRef
protected def dataFromNetwork[C](spec: MessageSpec[C], data: C, fromPeer: ConnectedPeer = peer): Unit =
actorRef ! DataFromPeer(spec.messageCode, data, fromPeer)
protected def blockIds(ids: Int*): BlockIds = ids.map(toBlockId)
protected implicit def toBlockIds(ids: Seq[Int]): BlockIds = blockIds(ids:_*)
protected implicit def toBlockId(i: Int): BlockId = Array(i.toByte)
protected def mockBlock[Id](id: Id)(implicit conv: Id => BlockId): Block = {
trait BlockMock extends Block {
override type ConsensusDataType = Unit
override type TransactionDataType = Unit
override val uniqueId: BlockId = id
}
mock[BlockMock]
}
protected trait TestDataExtraction[T] {
def extract(actual: T) : Any
}
protected implicit object BlockIdsExtraction extends TestDataExtraction[BlockIds] {
override def extract(blockIds: BlockIds): Seq[Int] = blockIds.map(BlockIdExtraction.extract)
}
protected implicit object BlockIdExtraction extends TestDataExtraction[BlockId] {
override def extract(blockId: BlockId): Int = blockId(0)
}
protected def expectNetworkMessage[Content : TestDataExtraction](expectedSpec: MessageSpec[Content], expectedData: Any): Unit =
networkController.expectMsgPF(hint = expectedData.toString) {
case SendToNetwork(Message(spec, Right(data: Content@unchecked), None), SendToChosen(peers)) =>
peers should contain (peer)
spec shouldEqual expectedSpec
implicitly[TestDataExtraction[Content]].extract(data) shouldEqual expectedData
}
trait ApplicationMock extends Application {
implicit val transactionModule = mock[TransactionModule[Unit]]
implicit val consensusModule = mock[ConsensusModule[Unit]]
final override val basicMessagesSpecsRepo: BasicMessagesRepo = new BasicMessagesRepo()
final override lazy val networkController: ActorRef = networkControllerMock
}
} | alexeykiselev/WavesScorex | src/test/scala/scorex/lagonaki/ActorTestingCommons.scala | Scala | cc0-1.0 | 3,899 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import java.util.concurrent.TimeoutException
import scala.util.control.NonFatal
import monix.execution.schedulers.TrampolineExecutionContext.immediate
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, MonixInternals, Promise}
import scala.util.{Failure, Success, Try}
/** Utilities for Scala's standard `concurrent.Future`. */
object FutureUtils {
/** Utility that returns a new Future that either completes with
* the original Future's result or with a TimeoutException in case
* the maximum wait time was exceeded.
*
* @param atMost specifies the maximum wait time until the future is
* terminated with a TimeoutException
* @param s is the Scheduler, needed for completing our internal promise
* @return a new future that will either complete with the result of our
* source or fail in case the timeout is reached.
*/
def timeout[A](source: Future[A], atMost: FiniteDuration)(implicit s: Scheduler): Future[A] = {
val err = new TimeoutException
val promise = Promise[A]()
val task = s.scheduleOnce(atMost.length, atMost.unit, new Runnable { def run() = { promise.tryFailure(err); () } })
source.onComplete { r =>
// canceling task to prevent waisted CPU resources and memory leaks
// if the task has been executed already, this has no effect
task.cancel()
promise.tryComplete(r)
}
promise.future
}
/** Utility that returns a new Future that either completes with
* the original Future's result or after the timeout specified by
* `atMost` it tries to complete with the given `fallback`.
* Whatever `Future` finishes first after the timeout, will win.
*
* @param atMost specifies the maximum wait time until the future is
* terminated with a TimeoutException
* @param fallback the fallback future that gets triggered after timeout
* @param s is the Scheduler, needed for completing our internal promise
* @return a new future that will either complete with the result of our
* source or with the fallback in case the timeout is reached
*/
def timeoutTo[A](source: Future[A], atMost: FiniteDuration, fallback: => Future[A])(implicit
s: Scheduler): Future[A] = {
val promise = Promise[Option[Try[A]]]()
val task = s.scheduleOnce(atMost.length, atMost.unit, new Runnable { def run() = { promise.trySuccess(None); () } })
source.onComplete { r =>
// canceling task to prevent waisted CPU resources and memory leaks
// if the task has been executed already, this has no effect
task.cancel()
promise.trySuccess(Some(r))
()
}
promise.future.flatMap {
case Some(res) => Future.fromTry(res)
case None =>
// evaluate fallback only here to exclude possibility of race condition
// between source and fallback when they are finishing at the same time
fallback
}
}
/** Utility that lifts a `Future[A]` into a `Future[Try[A]]`, exposing
* error explicitly.
*/
def materialize[A](source: Future[A])(implicit ec: ExecutionContext): Future[Try[A]] = {
if (source.isCompleted) {
Future.successful(source.value.get)
} else {
val p = Promise[Try[A]]()
source.onComplete(p.success)(immediate)
p.future
}
}
/** Given a mapping functions that operates on successful results as well as
* errors, transforms the source by applying it.
*
* Similar to `Future.transform` from Scala 2.12.
*/
def transform[A, B](source: Future[A], f: Try[A] => Try[B])(implicit ec: ExecutionContext): Future[B] = {
source match {
case ref: CancelableFuture[_] =>
// CancelableFuture already implements transform
ref.asInstanceOf[CancelableFuture[A]].transform(f)(ec)
case _ =>
val p = Promise[B]()
source.onComplete { result =>
val b =
try f(result)
catch { case t if NonFatal(t) => Failure(t) }
p.complete(b)
}
p.future
}
}
/** Given a mapping functions that operates on successful results
* as well as errors, transforms the source by applying it.
*
* Similar to `Future.transformWith` from Scala 2.12.
*/
def transformWith[A, B](source: Future[A], f: Try[A] => Future[B])(implicit ec: ExecutionContext): Future[B] = {
source match {
case ref: CancelableFuture[_] =>
// CancelableFuture already implements transformWith
ref.asInstanceOf[CancelableFuture[A]].transformWith(f)(ec)
case _ =>
MonixInternals.transformWith(source, f)(ec)
}
}
/** Utility that transforms a `Future[Try[A]]` into a `Future[A]`,
* hiding errors, being the opposite of [[materialize]].
*/
def dematerialize[A](source: Future[Try[A]])(implicit ec: ExecutionContext): Future[A] = {
if (source.isCompleted)
source.value.get match {
case Failure(error) => Future.failed(error)
case Success(value) =>
value match {
case Success(success) => Future.successful(success)
case Failure(error) => Future.failed(error)
}
}
else {
val p = Promise[A]()
source.onComplete({
case Failure(error) => p.failure(error)
case Success(result) => p.complete(result)
})(immediate)
p.future
}
}
/** Creates a future that completes with the specified `result`, but only
* after the specified `delay`.
*/
def delayedResult[A](delay: FiniteDuration)(result: => A)(implicit s: Scheduler): Future[A] = {
val p = Promise[A]()
s.scheduleOnce(delay.length, delay.unit, new Runnable { def run() = { p.complete(Try(result)); () } })
p.future
}
/** Provides extension methods for `Future`. */
object extensions {
/** Provides utility methods added on Scala's `concurrent.Future` */
implicit class FutureExtensions[A](val source: Future[A]) extends AnyVal {
/** [[FutureUtils.timeout]] exposed as an extension method. */
def timeout(atMost: FiniteDuration)(implicit s: Scheduler): Future[A] =
FutureUtils.timeout(source, atMost)
/** [[FutureUtils.timeoutTo]] exposed as an extension method. */
def timeoutTo[U >: A](atMost: FiniteDuration, fallback: => Future[U])(implicit s: Scheduler): Future[U] =
FutureUtils.timeoutTo(source, atMost, fallback)
/** [[FutureUtils.materialize]] exposed as an extension method. */
def materialize(implicit ec: ExecutionContext): Future[Try[A]] =
FutureUtils.materialize(source)
/** [[FutureUtils.dematerialize]] exposed as an extension method. */
def dematerialize[U](implicit ev: A <:< Try[U], ec: ExecutionContext): Future[U] =
FutureUtils.dematerialize(source.asInstanceOf[Future[Try[U]]])
}
/** Provides utility methods for Scala's `concurrent.Future` companion object. */
implicit class FutureCompanionExtensions(val f: Future.type) extends AnyVal {
/** [[FutureUtils.delayedResult]] exposed as an extension method. */
def delayedResult[A](delay: FiniteDuration)(result: => A)(implicit s: Scheduler): Future[A] =
FutureUtils.delayedResult(delay)(result)
}
}
}
| alexandru/monifu | monix-execution/shared/src/main/scala_2.11/monix/execution/FutureUtils.scala | Scala | apache-2.0 | 7,967 |
package com.twitter.finagle.pool
import collection.mutable.Queue
import scala.annotation.tailrec
import com.twitter.util.{Future, Time, Duration}
import com.twitter.finagle.{Service, ServiceFactory, ServiceProxy, ServiceClosedException}
import com.twitter.finagle.util.{Timer, Cache}
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
/**
* A pool that temporarily caches items from the underlying one, up to
* the given timeout amount of time.
*/
private[finagle] class CachingPool[Req, Rep](
factory: ServiceFactory[Req, Rep],
cacheSize: Int,
ttl: Duration,
timer: com.twitter.util.Timer = Timer.default,
statsReceiver: StatsReceiver = NullStatsReceiver)
extends ServiceFactory[Req, Rep]
{
private[this] val cache =
new Cache[Service[Req, Rep]](cacheSize, ttl, timer, Some(_.release()))
@volatile private[this] var isOpen = true
private[this] val sizeGauge =
statsReceiver.addGauge("pool_cached") { cache.size }
private[this] class WrappedService(underlying: Service[Req, Rep])
extends ServiceProxy[Req, Rep](underlying)
{
override def release() =
if (this.isAvailable && CachingPool.this.isOpen)
cache.put(underlying)
else
underlying.release()
}
@tailrec
private[this] def get(): Option[Service[Req, Rep]] = {
cache.get() match {
case s@Some(service) if service.isAvailable => s
case Some(service) /* unavailable */ => service.release(); get()
case None => None
}
}
def make(): Future[Service[Req, Rep]] = synchronized {
if (!isOpen) Future.exception(new ServiceClosedException) else {
get() match {
case Some(service) =>
Future.value(new WrappedService(service))
case None =>
factory.make() map { new WrappedService(_) }
}
}
}
def close() = synchronized {
isOpen = false
cache.evictAll()
factory.close()
}
override def isAvailable = isOpen
override val toString = "caching_pool_%s".format(factory.toString)
}
| enachb/finagle_2.9_durgh | finagle-core/src/main/scala/com/twitter/finagle/pool/CachingPool.scala | Scala | apache-2.0 | 2,024 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus.cache
/** Proxy for Mergeables. Methods not overrided in extensions will be forwared to Proxied
* self member */
trait MutableCacheProxy[K, V] extends MutableCache[K, V] with CacheProxied[MutableCache[K, V]] {
override def get(k: K): Option[V] = self.get(k)
override def +=(kv: (K, V)): this.type = {
self.+=(kv)
this
}
override def multiInsert(kvs: Map[K, V]): this.type = {
self.multiInsert(kvs)
this
}
override def hit(k: K): Option[V] = self.hit(k)
override def evict(k: K): Option[V] = self.evict(k)
override def iterator: Iterator[(K, V)] = self.iterator
override def empty: MutableCache[K, V] = self.empty
override def clear: this.type = {
self.clear
this
}
override def contains(k: K): Boolean = self.contains(k)
override def -=(k: K): this.type = {
self.-=(k)
this
}
override def multiRemove(ks: Set[K]): this.type = {
self.multiRemove(ks)
this
}
override def getOrElseUpdate(k: K, v: => V): V = self.getOrElseUpdate(k, v)
override def filter(pred: ((K, V)) => Boolean): MutableCache[K, V] = self.filter(pred)
}
| twitter/storehaus | storehaus-cache/src/main/scala/com/twitter/storehaus/cache/MutableCacheProxy.scala | Scala | apache-2.0 | 1,735 |
package org.sparkpipe.rdd
import java.io.{FileInputStream, File => JavaFile}
import java.nio.file.Files
import java.nio.file.attribute.{BasicFileAttributes, FileTime}
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import org.apache.commons.codec.digest.DigestUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.rdd.RDD
import org.sparkpipe.util.io.os
/**
* :: Experimental ::
* FileStatistics object keeps information of possible statistics we can collect from a file.
* `path` - absolute file path as uri
* `file` - name of the file
* `size` - size in bytes
* `permissions` - set of permissions, applicable only on Linux
* `datecreated` - date when file is created, if applicable
* `datemodified` - date when file is modified, if applicable
* `checksum` - checksum using MD5 hash for local file system, and MD5-of-0MD5-of-512CRC32C for
* HDFS, other file systems are not supported (will returned empty string). Consider moving from
* generating MD5 sum to using CRC32, if working with very large files.
*/
private[rdd] case class FileStatistics(
val path: String,
val file: String,
val size: Long,
val permissions: String,
val datecreated: Long,
val datemodified: Long,
val checksum: String
)
/**
* :: Experimental ::
* `FileStatisticsRDD` returns statistics for each entry assuming that entry is a valid file path,
* either HDFS or local file system, otherwise it will throw an exception. Statistics include
* full file path, name, size, date modified and etc.
* Be default, does not generate checksum for files, since it is relatively expensive. Checksum
* field in this case will be empty.
*/
private[rdd] class FileStatisticsRDD[T: ClassTag](
prev: RDD[T],
private val withChecksum: Boolean = false
) extends FileRDD[FileStatistics](prev) {
override def getPartitions: Array[Partition] = firstParent[T].partitions
override def compute(split: Partition, context: TaskContext): Iterator[FileStatistics] = {
// we assume that every entry is a valid file path. If entry does not comply to being
// file path, we throw an exception, without trying to process the rest.
val conf = getConf()
// buffer for statistics objects
val buff: ArrayBuffer[FileStatistics] = new ArrayBuffer()
// compute stats for each entry
for (elem <- firstParent[T].iterator(split, context)) {
val path = new Path(elem.toString())
val fs = path.getFileSystem(conf)
// resolve path relatively to current directory
val resolvedPath = fs.resolvePath(path)
val statuses = Option(fs.globStatus(resolvedPath))
if (statuses.isEmpty) {
throw new IllegalArgumentException("Cannot resolver status for a file path: " +
resolvedPath)
}
val statusArr = statuses.get
if (statusArr.length > 1) {
logWarning("Found more than one status for a file path \\"" + resolvedPath +
"\\". Will use the first status")
}
val status = statusArr(0)
val updatedPath = status.getPath()
val updatedFile = new JavaFile(updatedPath.toString().stripPrefix("file:"))
val updatedScheme = fs.getScheme()
// collect statistics from FileStatus instance
// - URI of the path
val pathStr = updatedPath.toString()
// - name of the file
val name = updatedPath.getName()
// - size in bytes
val size: Long = status.getLen()
// - permissions, as string representation of `Permission` object
val permission: String = status.getPermission().toString()
// - modified date
val dateModified: Long = status.getModificationTime()
// - creation date, works only for local Linux / OS X file system, otherwise return -1
val dateCreated: Long = if (updatedScheme == "file" && (os.isUnix() || os.isMac())) {
val attrs = Files.readAttributes(updatedFile.toPath, "creationTime")
// attribute "creationTime" is an instance of `java.nio.file.attribute.FileTime`
attrs.get("creationTime").asInstanceOf[FileTime].toMillis()
} else {
logWarning("Created date for a file currently only supports local file system " +
"either Linux or OS X")
-1L
}
// - checksum, only local file system and HDFS support checksum generation for now.
// md5hex for local, and MD5-of-0MD5-of-512CRC32C for HDFS (default)
val checksum = if (!withChecksum) {
logDebug("Checksum usage is off for path " + updatedPath.toString())
null
} else {
if (updatedScheme == "file") {
var stream: FileInputStream = null
try {
stream = new FileInputStream(updatedFile)
DigestUtils.md5Hex(stream)
} finally {
if (stream != null) {
stream.close()
}
}
} else if (updatedScheme == "hdfs") {
val sumWithAlg = fs.getFileChecksum(updatedPath).toString()
// split `sumWithAlg` string into name of algorithm and checksum, if length is
// less than 2, then we just return original string, otherwise second part,
// since it contains actual checksum
val parts = sumWithAlg.split(":", 2)
if (parts.length == 2) {
parts(1)
} else {
sumWithAlg
}
} else {
logWarning("Checksum is not supported for file scheme " + updatedScheme)
null
}
}
// add newly created statistics
buff.append(FileStatistics(
pathStr, name, size, permission, dateCreated, dateModified, checksum))
}
// return iterator of file statistics
buff.toIterator
}
}
| sadikovi/sparkpipe | src/main/scala/org/sparkpipe/rdd/FileStatisticsRDD.scala | Scala | mit | 6,391 |
package co.blocke.scalajack
package mongo
import org.bson._
import model.JackFlavor
import co.blocke.scalajack.Converters._
import co.blocke.scalajack.json._
import json4s._
import yaml._
import delimited._
import org.json4s.JValue
object Converters:
extension (b: BsonValue)
inline def mapMongoTo[T, S](toFlavor: JackFlavor[S])(fn: T => T)(implicit sjB: JackFlavor[BsonValue]): S = toFlavor.render[T](fn(sjB.read[T](b)))
// montoTo... flavors need T to be able to handle _id (DBKey) fields.
extension (b: BsonValue)
inline def mongoToJson[T](implicit sjJ: JackFlavor[JSON], sjB: JackFlavor[BsonValue]): JSON = sjJ.render( sjB.read[T](b) )
inline def mongoToYaml[T](implicit sjY: JackFlavor[YAML], sjB: JackFlavor[BsonValue]): YAML = sjY.render( sjB.read[T](b) )
inline def mongoToJson4s[T](implicit sjV: JackFlavor[JValue], sjB: JackFlavor[BsonValue]): JValue = sjV.render( sjB.read[T](b) )
inline def fromMongo[T](implicit sjB: JackFlavor[BsonValue]): T = sjB.read[T](b)
inline def mapMongo[T](fn: T => T)(implicit sjB: JackFlavor[BsonValue]): BsonValue = sjB.render[T](fn(sjB.read[T](b)))
// Tie in other converters...
extension (j: JSON)
inline def jsonToMongo[T](implicit sjB: JackFlavor[BsonValue], sjJ: JackFlavor[JSON]): BsonValue = sjB.render( sjJ.read[T](j) )
extension (y: YAML)
inline def yamlToMongo[T](implicit sjB: JackFlavor[BsonValue], sjY: JackFlavor[YAML]): BsonValue = sjB.render( sjY.read[T](y) )
extension (j: JValue)
inline def json4sToMongo[T](implicit sjB: JackFlavor[BsonValue], sjV: JackFlavor[JValue]): BsonValue = sjB.render( sjV.read[T](j) )
extension[T] (a: T)
inline def toMongo(implicit sjB: JackFlavor[BsonValue]): BsonValue = sjB.render(a) | gzoller/ScalaJack | mongo/src/main/scala/co.blocke.scalajack/mongo/Converters.scala | Scala | mit | 1,856 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution.command
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.execution.command.{CarbonDropTableCommand, DropDatabaseCommand, RunnableCommand}
case class CarbonDropDatabaseCommand(command: DropDatabaseCommand)
extends RunnableCommand {
override val output = command.output
override def run(sparkSession: SparkSession): Seq[Row] = {
val dbName = command.databaseName
// DropHiveDB command will fail if cascade is false and one or more table exists in database
val rows = command.run(sparkSession)
if (command.cascade) {
val tablesInDB = CarbonEnv.getInstance(sparkSession).carbonMetastore.getAllTables()
.filterNot(_.database.exists(_.equalsIgnoreCase(dbName)))
tablesInDB.foreach { tableName =>
CarbonDropTableCommand(true, Some(dbName), tableName.table).run(sparkSession)
}
}
CarbonEnv.getInstance(sparkSession).carbonMetastore.dropDatabaseDirectory(dbName)
rows
}
}
| ksimar/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala | Scala | apache-2.0 | 1,822 |
package com.banno.salat.avro.test
import com.banno.salat.avro._
import global._
import java.io.ByteArrayOutputStream
import org.apache.avro.io.{ DatumReader, DatumWriter, DecoderFactory, EncoderFactory }
import org.apache.avro.Schema
object BasicCaseClassSpec extends SalatAvroSpec {
import models._
"a grater" should {
"make an avro schema for a basic case class" in {
val schema = grater[Edward].asAvroSchema
println(schema)
schema.getName must_== "union"
val recordSchema = schema.getTypes().get(0)
recordSchema.getName must_== "Edward"
recordSchema.getNamespace must_== "com.banno.salat.avro.test.models"
recordSchema must containField("a", Schema.Type.STRING)
recordSchema must containField("b", Schema.Type.INT)
recordSchema must containField("c", Schema.Type.DOUBLE)
recordSchema must containField("aa", List(Schema.Type.STRING, Schema.Type.NULL))
recordSchema must containField("bb", List(Schema.Type.INT, Schema.Type.NULL))
recordSchema must containField("cc", List(Schema.Type.DOUBLE, Schema.Type.NULL))
recordSchema must containField("aaa", List(Schema.Type.STRING, Schema.Type.NULL))
recordSchema must containField("bbb", List(Schema.Type.INT, Schema.Type.NULL))
recordSchema must containField("ccc", List(Schema.Type.DOUBLE, Schema.Type.NULL))
}
"make a datum writer for a basic case class" in {
val json = serializeToJSON(ed)
println(json)
json must /("com.banno.salat.avro.test.models.Edward") /("a" -> ed.a)
json must /("com.banno.salat.avro.test.models.Edward") /("b" -> ed.b)
json must /("com.banno.salat.avro.test.models.Edward") /("c" -> ed.c)
json must /("com.banno.salat.avro.test.models.Edward") /("aa") /("string" -> ed.aa.get)
json must /("com.banno.salat.avro.test.models.Edward") /("bb") /("int" -> ed.bb.get)
json must /("com.banno.salat.avro.test.models.Edward") /("cc") /("double" -> ed.cc.get)
json must /("com.banno.salat.avro.test.models.Edward") /("aaa" -> null)
json must /("com.banno.salat.avro.test.models.Edward") /("bbb" -> null)
json must /("com.banno.salat.avro.test.models.Edward") /("ccc" -> null)
}
"make a datum reader for a basic case class" in {
val oldEd = ed
val newEd: Edward = serializeAndDeserialize(oldEd)
println(newEd)
newEd must_== oldEd
}
}
}
| Banno/salat-avro | src/test/scala/BasicCaseClassSpec.scala | Scala | apache-2.0 | 2,422 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.amp
import java.time.{LocalDate, LocalDateTime}
import models.amp.Amp
import models.eab.Eab
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatestplus.mockito.MockitoSugar
import play.api.libs.json.Json
import services.ProxyCacheService
import utils.{AmlsSpec, AuthorisedFixture, DependencyMocks}
class ProxyCacheServiceSpec extends AmlsSpec with MockitoSugar
with ScalaFutures
with IntegrationPatience {
val dateVal = LocalDateTime.now
//AMP
val completeAmpData = Json.obj(
"typeOfParticipant" -> Seq("artGalleryOwner"),
"soldOverThreshold" -> true,
"dateTransactionOverThreshold" -> LocalDate.now,
"identifyLinkedTransactions" -> true,
"percentageExpectedTurnover" -> "fortyOneToSixty"
)
val completeAmpJson = Json.obj(
"data" -> completeAmpData,
"hasChanged" -> false,
"hasAccepted" -> false
)
val completeAmpModel = Amp(completeAmpData)
//EAB
val completeEstateAgencyActPenalty = Json.obj(
"penalisedEstateAgentsAct" -> true,
"penalisedEstateAgentsActDetail" -> "details"
)
val completePenalisedProfessionalBody = Json.obj(
"penalisedProfessionalBody" -> true,
"penalisedProfessionalBodyDetail" -> "details"
)
val completeRedressScheme = Json.obj(
"redressScheme" -> "propertyRedressScheme",
"redressSchemeDetail" -> "null"
)
val completeMoneyProtectionScheme = Json.obj(
"clientMoneyProtectionScheme" -> true
)
val completeServiceList = Seq(
"assetManagement",
"auctioneering",
"businessTransfer",
"commercial",
"developmentCompany",
"landManagement",
"lettings",
"relocation",
"residential",
"socialHousingProvision")
val completeServices = Json.obj("eabServicesProvided" -> completeServiceList )
val completeEabData = completeServices ++
completeEstateAgencyActPenalty ++
completePenalisedProfessionalBody ++
completeRedressScheme ++
completeMoneyProtectionScheme
val completeEabJson = Json.obj(
"data" -> completeEabData,
"hasChanged" -> false,
"hasAccepted" -> false
)
val completeEabModel = Eab(completeEabData)
val credId = "someId"
trait Fixture extends AuthorisedFixture with DependencyMocks {
self =>
val request = addToken(authRequest)
val svc = new ProxyCacheService(mockCacheConnector)
}
"AMP" when {
"get" when {
"cache data exists" when {
"returns the amp section" in new Fixture {
mockCacheFetch[Amp](Some(completeAmpModel), Some(Amp.key))
whenReady(svc.getAmp(credId)) { result =>
result mustBe Some(completeAmpJson)
}
}
}
"cache data does not exist" when {
"returns null" in new Fixture {
mockCacheFetch[Amp](None, Some(Amp.key))
whenReady(svc.getAmp(credId)) { result =>
result mustBe None
}
}
}
}
"setAmp" when {
"given valid json" when {
"updates an existing model" in new Fixture {
mockCacheFetch[Amp](Some(completeAmpModel), Some(Amp.key))
mockCacheSave[Amp]
whenReady(svc.setAmp(credId, completeAmpJson)) { result =>
result mustBe mockCacheMap
}
}
"saves a new model" in new Fixture {
mockCacheFetch[Amp](None, Some(Amp.key))
mockCacheSave[Amp]
whenReady(svc.setAmp(credId, completeAmpJson)) { result =>
result mustBe mockCacheMap
}
}
}
}
}
"EAB" when {
"get" when {
"cache data exists" when {
"returns the eab section" in new Fixture {
mockCacheFetch[Eab](Some(completeEabModel), Some(Eab.key))
whenReady(svc.getEab(credId)) { result =>
result mustBe Some(completeEabJson)
}
}
}
"cache data does not exist" when {
"returns null" in new Fixture {
mockCacheFetch[Eab](None, Some(Eab.key))
whenReady(svc.getEab(credId)) { result =>
result mustBe None
}
}
}
}
"setEab" when {
"given valid json" when {
"updates an existing model" in new Fixture {
mockCacheFetch[Eab](Some(completeEabModel), Some(Eab.key))
mockCacheSave[Eab]
whenReady(svc.setEab(credId, completeEabJson)) { result =>
result mustBe mockCacheMap
}
}
"saves a new model" in new Fixture {
mockCacheFetch[Eab](None, Some(Eab.key))
mockCacheSave[Eab]
whenReady(svc.setEab(credId, completeEabJson)) { result =>
result mustBe mockCacheMap
}
}
}
}
}
}
| hmrc/amls-frontend | test/services/amp/ProxyCacheServiceSpec.scala | Scala | apache-2.0 | 5,405 |
package net.surguy.less
/**
* Convert CSS objects into text form.
*
* @author Inigo Surguy
*/
object CssGenerator {
def output(css: Css): String = css match {
case Stylesheet(directives, rules) => directives.map(output).mkString("\\n") + rules.map(output).mkString("\\n")
case Directive(directive) => directive.text
case Ruleset(selector, declarations) => output(selector) + " { " + declarations.map(output).mkString + "}"
case Selector(terms) => terms.map(_.text).mkString(" ")
case Declaration(property, value) => property.text + ": " + output(value) + "; "
case NullCss => ""
case SimpleValue(v) => v.trim
// case RgbColor(r, g, b) => "rgb(%s,%s,%s)".format(r, g, b)
// case RgbaColor(r, g, b, a) => "rgba(%s,%s,%s,%s)".format(r, g, b, a)
case col: RgbColor => toHashColor(col)
case col: RgbaColor => toHashColor(col)
case NamedColor(name) => name
case HashColor(value) => toRgb(value)
}
def toRgb(hash: String) = {
def dec(start: Int, end: Int) = java.lang.Long.parseLong(hash.substring(start, end), 16)
def decdec(start: Int, end: Int) = java.lang.Long.parseLong(hash.substring(start, end)+hash.substring(start, end), 16)
hash match {
case _ if hash.length==3 => "rgb(%s,%s,%s)".format( decdec(0,1), decdec(1,2), decdec(2,3) )
case _ if hash.length==4 => "rgba(%s,%s,%s,%s)".format( decdec(0,1), decdec(1,2), decdec(2,3), decdec(3,4) )
case _ if hash.length==6 => "rgb(%s,%s,%s)".format( dec(0,2), dec(2,4), dec(4,6) )
case _ if hash.length==8 => "rgba(%s,%s,%s,%s)".format( dec(0,2), dec(2,4), dec(4,6), dec(6,8) )
case _ => "#"+hash
}
}
def toHashColor(col: RgbColor) = "#"+hex(col.r)+hex(col.g)+hex(col.b)
def toHashColor(col: RgbaColor) = "#"+hex(col.r)+hex(col.g)+hex(col.b)+hex(col.a)
private def hex(s: String) = "%02X".format(s.toInt).toLowerCase // Lowercase because that's what Less.js does
}
| inigo/less-scala | src/main/scala/net/surguy/less/CssGenerator.scala | Scala | apache-2.0 | 1,926 |
package com.twitter.finagle.loadbalancer
import com.twitter.app.GlobalFlag
import com.twitter.finagle._
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.factory.TrafficDistributor
import com.twitter.finagle.stats._
import com.twitter.util.{Activity, Future, Time, Var}
import java.net.SocketAddress
import java.util.logging.{Level, Logger}
/**
* Allows duplicate SocketAddresses to be threaded through the
* load balancer while avoiding the cache.
*/
private object SocketAddresses {
trait Wrapped extends SocketAddress {
def underlying: SocketAddress
}
def unwrap(addr: SocketAddress): SocketAddress = {
addr match {
case sa: Wrapped => unwrap(sa.underlying)
case WeightedSocketAddress(s, _) => unwrap(s)
case _ => addr
}
}
}
object perHostStats extends GlobalFlag(false, "enable/default per-host stats.\\n" +
"\\tWhen enabled,the configured stats receiver will be used,\\n" +
"\\tor the loaded stats receiver if none given.\\n" +
"\\tWhen disabled, the configured stats receiver will be used,\\n" +
"\\tor the NullStatsReceiver if none given.")
object LoadBalancerFactory {
val role = Stack.Role("LoadBalancer")
/**
* A class eligible for configuring a client's load balancer probation setting.
* When enabled, the balancer treats removals as advisory and flags them. If a
* a flagged endpoint is also detected as unhealthy by Finagle's session
* qualifiers (e.g. fail-fast, failure accrual, etc) then the host is removed
* from the collection.
*/
case class EnableProbation(enable: Boolean)
implicit object EnableProbation extends Stack.Param[EnableProbation] {
val default = EnableProbation(false)
}
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]] per host
* [[com.twitter.finagle.stats.StatsReceiver]]. If the per-host StatsReceiver is
* not null, the load balancer will broadcast stats to it (scoped with the
* "host:port" pair) for each host in the destination. For clients with a
* large host sets in their destination, this can cause unmanageable
* memory pressure.
*/
case class HostStats(hostStatsReceiver: StatsReceiver) {
def mk(): (HostStats, Stack.Param[HostStats]) =
(this, HostStats.param)
}
object HostStats {
implicit val param = Stack.Param(HostStats(NullStatsReceiver))
}
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]] with a collection
* of addrs to load balance.
*/
case class Dest(va: Var[Addr]) {
def mk(): (Dest, Stack.Param[Dest]) =
(this, Dest.param)
}
object Dest {
implicit val param = Stack.Param(Dest(Var.value(Addr.Neg)))
}
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]] with a label
* for use in error messages.
*/
case class ErrorLabel(label: String) {
def mk(): (ErrorLabel, Stack.Param[ErrorLabel]) =
(this, ErrorLabel.param)
}
object ErrorLabel {
implicit val param = Stack.Param(ErrorLabel("unknown"))
}
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]].
*/
case class Param(loadBalancerFactory: LoadBalancerFactory) {
def mk(): (Param, Stack.Param[Param]) =
(this, Param.param)
}
object Param {
implicit val param = Stack.Param(Param(DefaultBalancerFactory))
}
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.loadbalancer.LoadBalancerFactory]].
* The module creates a new `ServiceFactory` based on the module above it for each `Addr`
* in `LoadBalancerFactory.Dest`. Incoming requests are balanced using the load balancer
* defined by the `LoadBalancerFactory.Param` parameter.
*/
private[finagle] trait StackModule[Req, Rep] extends Stack.Module[ServiceFactory[Req, Rep]] {
val role = LoadBalancerFactory.role
val parameters = Seq(
implicitly[Stack.Param[ErrorLabel]],
implicitly[Stack.Param[Dest]],
implicitly[Stack.Param[Param]],
implicitly[Stack.Param[HostStats]],
implicitly[Stack.Param[param.Stats]],
implicitly[Stack.Param[param.Logger]],
implicitly[Stack.Param[param.Monitor]],
implicitly[Stack.Param[param.Reporter]])
def make(params: Stack.Params, next: Stack[ServiceFactory[Req, Rep]]) = {
val ErrorLabel(errorLabel) = params[ErrorLabel]
val Dest(dest) = params[Dest]
val Param(loadBalancerFactory) = params[Param]
val EnableProbation(probationEnabled) = params[EnableProbation]
val param.Stats(statsReceiver) = params[param.Stats]
val param.Logger(log) = params[param.Logger]
val param.Label(label) = params[param.Label]
val param.Monitor(monitor) = params[param.Monitor]
val param.Reporter(reporter) = params[param.Reporter]
val rawStatsReceiver = statsReceiver match {
case sr: RollupStatsReceiver => sr.self
case sr => sr
}
// Determine which stats receiver to use based on `perHostStats`
// flag and the configured `HostStats` param. Report per-host stats
// only when the flag is set.
val hostStatsReceiver =
if (!perHostStats()) NullStatsReceiver
else params[LoadBalancerFactory.HostStats].hostStatsReceiver
// Creates a ServiceFactory from the `next` in the stack and ensures
// that `sockaddr` is an available param for `next`. Note, in the default
// client stack, `next` represents the endpoint stack which will result
// in a connection being established when materialized.
def newEndpoint(sockaddr: SocketAddress): ServiceFactory[Req, Rep] = {
val stats = if (hostStatsReceiver.isNull) statsReceiver else {
val scope = sockaddr match {
case WeightedInetSocketAddress(addr, _) =>
"%s:%d".format(addr.getHostName, addr.getPort)
case other => other.toString
}
val host = hostStatsReceiver.scope(label).scope(scope)
BroadcastStatsReceiver(Seq(host, statsReceiver))
}
val composite = reporter(label, Some(sockaddr)) andThen monitor
// While constructing a single endpoint stack is fairly cheap,
// creating a large number of them can be expensive. On server
// set change, if the set of endpoints is large, and we
// initialized endpoint stacks eagerly, it could delay the load
// balancer readiness significantly. Instead, we spread that
// cost across requests by moving endpoint stack creation into
// service acquisition (apply method below).
new ServiceFactory[Req, Rep] {
var underlying: ServiceFactory[Req, Rep] = null
var isClosed = false
def apply(conn: ClientConnection): Future[Service[Req, Rep]] = {
synchronized {
if (isClosed) return Future.exception(new ServiceClosedException)
if (underlying == null) underlying = next.make(params +
Transporter.EndpointAddr(SocketAddresses.unwrap(sockaddr)) +
param.Stats(stats) +
param.Monitor(composite))
}
underlying(conn)
}
def close(deadline: Time): Future[Unit] = synchronized {
isClosed = true
if (underlying == null) Future.Done
else underlying.close(deadline)
}
override def status: Status = synchronized {
if (underlying == null)
if (!isClosed) Status.Open
else Status.Closed
else underlying.status
}
override def toString: String = sockaddr.toString
}
}
val balancerStats = rawStatsReceiver.scope("loadbalancer")
val balancerExc = new NoBrokersAvailableException(errorLabel)
def newBalancer(endpoints: Activity[Set[ServiceFactory[Req, Rep]]]) =
loadBalancerFactory.newBalancer(endpoints, balancerStats, balancerExc)
val destActivity: Activity[Set[SocketAddress]] = Activity(dest.map {
case Addr.Bound(set, _) =>
Activity.Ok(set)
case Addr.Neg =>
log.info(s"$label: name resolution is negative (local dtab: ${Dtab.local})")
Activity.Ok(Set.empty)
case Addr.Failed(e) =>
log.log(Level.INFO, s"$label: name resolution failed (local dtab: ${Dtab.local})", e)
Activity.Failed(e)
case Addr.Pending =>
if (log.isLoggable(Level.FINE)) {
log.fine(s"$label: name resolution is pending")
}
Activity.Pending
}: Var[Activity.State[Set[SocketAddress]]])
// Instead of simply creating a newBalancer here, we defer to the
// traffic distributor to interpret `WeightedSocketAddresses`.
Stack.Leaf(role, new TrafficDistributor[Req, Rep](
dest = destActivity,
newEndpoint = newEndpoint,
newBalancer = newBalancer,
eagerEviction = !probationEnabled,
statsReceiver = balancerStats
))
}
}
private[finagle] def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new StackModule[Req, Rep] {
val description = "Balances requests across a collection of endpoints."
}
}
/**
* A load balancer that balances among multiple connections,
* useful for managing concurrency in pipelining protocols.
*
* Each endpoint can open multiple connections. For N endpoints,
* each opens M connections, load balancer balances among N*M
* options. Thus, it increases concurrency of each endpoint.
*/
object ConcurrentLoadBalancerFactory {
import LoadBalancerFactory._
private case class ReplicatedSocketAddress(underlying: SocketAddress, i: Int)
extends SocketAddresses.Wrapped
private def replicate(num: Int): SocketAddress => Set[SocketAddress] = {
case sa: SocketAddresses.Wrapped => Set(sa)
case sa =>
val (base, w) = WeightedSocketAddress.extract(sa)
for (i: Int <- (0 until num).toSet) yield
WeightedSocketAddress(ReplicatedSocketAddress(base, i), w)
}
/**
* A class eligible for configuring the number of connections
* a single endpoint has.
*/
case class Param(numConnections: Int) {
def mk(): (Param, Stack.Param[Param]) = (this, Param.param)
}
object Param {
implicit val param = Stack.Param(Param(4))
}
private[finagle] def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new StackModule[Req, Rep] {
val description = "Balance requests across multiple connections on a single " +
"endpoint, used for pipelining protocols"
override def make(params: Stack.Params, next: Stack[ServiceFactory[Req, Rep]]) = {
val Param(numConnections) = params[Param]
val Dest(dest) = params[Dest]
val newDest = dest.map {
case bound@Addr.Bound(set, meta) =>
bound.copy(addrs = set.flatMap(replicate(numConnections)))
case addr => addr
}
super.make(params + Dest(newDest), next)
}
}
}
/**
* A thin interface around a Balancer's contructor that allows Finagle to pass in
* context from the stack to the balancers at construction time.
*
* @see [[Balancers]] for a collection of available balancers.
*/
abstract class LoadBalancerFactory {
/**
* Returns a new balancer which is represented by a [[com.twitter.finagle.ServiceFactory]].
*
* @param endpoints The load balancer's collection is usually populated concurrently.
* So the interface to build a balancer is wrapped in an [[com.twitter.util.Activity]]
* which allows us to observe this process for changes.
*
* @param statsReceiver The StatsReceiver which balancers report stats to. See
* [[com.twitter.finagle.loadbalancer.Balancer]] to see which stats are exported
* across implementations.
*
* @param emptyException The exception returned when a balancer's collection is empty.
*/
def newBalancer[Req, Rep](
endpoints: Activity[Set[ServiceFactory[Req, Rep]]],
statsReceiver: StatsReceiver,
emptyException: NoBrokersAvailableException
): ServiceFactory[Req, Rep]
}
/**
* We expose the ability to configure balancers per-process via flags. However,
* this is generally not a good idea as Finagle processes usually contain many clients.
* This will likely go away in the future or be no-op and, therfore, should not be
* depended on. Instead, configure your balancers via the `configured` method on
* clients:
*
* {{
* val balancer = Balancers.aperture(...)
* Protocol.configured(LoadBalancerFactory.Param(balancer))
* }}
*/
@deprecated("Use com.twitter.finagle.loadbalancer.Balancers per-client.", "2015-06-15")
object defaultBalancer extends GlobalFlag("choice", "Default load balancer")
package exp {
object loadMetric extends GlobalFlag("leastReq",
"Metric used to measure load across endpoints (leastReq | ewma)")
}
object DefaultBalancerFactory extends LoadBalancerFactory {
private val log = Logger.getLogger(getClass.getName)
private def p2c(): LoadBalancerFactory =
exp.loadMetric() match {
case "ewma" => Balancers.p2cPeakEwma()
case _ => Balancers.p2c()
}
private val underlying =
defaultBalancer() match {
case "heap" => Balancers.heap()
case "choice" => p2c()
case x =>
log.warning(s"""Invalid load balancer ${x}, using "choice" balancer.""")
p2c()
}
def newBalancer[Req, Rep](
endpoints: Activity[Set[ServiceFactory[Req, Rep]]],
statsReceiver: StatsReceiver,
emptyException: NoBrokersAvailableException
): ServiceFactory[Req, Rep] = {
underlying.newBalancer(endpoints, statsReceiver, emptyException)
}
}
| a-manumohan/finagle | finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/LoadBalancerFactory.scala | Scala | apache-2.0 | 13,913 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import scala.collection.immutable.TreeSet
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode, FalseLiteral, GenerateSafeProjection, GenerateUnsafeProjection, Predicate => BasePredicate}
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.types._
object InterpretedPredicate {
def create(expression: Expression, inputSchema: Seq[Attribute]): InterpretedPredicate =
create(BindReferences.bindReference(expression, inputSchema))
def create(expression: Expression): InterpretedPredicate = new InterpretedPredicate(expression)
}
case class InterpretedPredicate(expression: Expression) extends BasePredicate {
override def eval(r: InternalRow): Boolean = expression.eval(r).asInstanceOf[Boolean]
override def initialize(partitionIndex: Int): Unit = {
super.initialize(partitionIndex)
expression.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
}
}
}
/**
* An [[Expression]] that returns a boolean value.
*/
trait Predicate extends Expression {
override def dataType: DataType = BooleanType
}
trait PredicateHelper {
protected def splitConjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case And(cond1, cond2) =>
splitConjunctivePredicates(cond1) ++ splitConjunctivePredicates(cond2)
case other => other :: Nil
}
}
protected def splitDisjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case Or(cond1, cond2) =>
splitDisjunctivePredicates(cond1) ++ splitDisjunctivePredicates(cond2)
case other => other :: Nil
}
}
// Substitute any known alias from a map.
protected def replaceAlias(
condition: Expression,
aliases: AttributeMap[Expression]): Expression = {
// Use transformUp to prevent infinite recursion when the replacement expression
// redefines the same ExprId,
condition.transformUp {
case a: Attribute =>
aliases.getOrElse(a, a)
}
}
/**
* Returns true if `expr` can be evaluated using only the output of `plan`. This method
* can be used to determine when it is acceptable to move expression evaluation within a query
* plan.
*
* For example consider a join between two relations R(a, b) and S(c, d).
*
* - `canEvaluate(EqualTo(a,b), R)` returns `true`
* - `canEvaluate(EqualTo(a,c), R)` returns `false`
* - `canEvaluate(Literal(1), R)` returns `true` as literals CAN be evaluated on any plan
*/
protected def canEvaluate(expr: Expression, plan: LogicalPlan): Boolean =
expr.references.subsetOf(plan.outputSet)
/**
* Returns true iff `expr` could be evaluated as a condition within join.
*/
protected def canEvaluateWithinJoin(expr: Expression): Boolean = expr match {
// Non-deterministic expressions are not allowed as join conditions.
case e if !e.deterministic => false
case _: ListQuery | _: Exists =>
// A ListQuery defines the query which we want to search in an IN subquery expression.
// Currently the only way to evaluate an IN subquery is to convert it to a
// LeftSemi/LeftAnti/ExistenceJoin by `RewritePredicateSubquery` rule.
// It cannot be evaluated as part of a Join operator.
// An Exists shouldn't be push into a Join operator too.
false
case e: SubqueryExpression =>
// non-correlated subquery will be replaced as literal
e.children.isEmpty
case a: AttributeReference => true
case e: Unevaluable => false
case e => e.children.forall(canEvaluateWithinJoin)
}
}
@ExpressionDescription(
usage = "_FUNC_ expr - Logical not.")
case class Not(child: Expression)
extends UnaryExpression with Predicate with ImplicitCastInputTypes with NullIntolerant {
override def toString: String = s"NOT $child"
override def inputTypes: Seq[DataType] = Seq(BooleanType)
// +---------+-----------+
// | CHILD | NOT CHILD |
// +---------+-----------+
// | TRUE | FALSE |
// | FALSE | TRUE |
// | UNKNOWN | UNKNOWN |
// +---------+-----------+
protected override def nullSafeEval(input: Any): Any = !input.asInstanceOf[Boolean]
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"!($c)")
}
override def sql: String = s"(NOT ${child.sql})"
}
/**
* Evaluates to `true` if `values` are returned in `query`'s result set.
*/
case class InSubquery(values: Seq[Expression], query: ListQuery)
extends Predicate with Unevaluable {
@transient private lazy val value: Expression = if (values.length > 1) {
CreateNamedStruct(values.zipWithIndex.flatMap {
case (v: NamedExpression, _) => Seq(Literal(v.name), v)
case (v, idx) => Seq(Literal(s"_$idx"), v)
})
} else {
values.head
}
override def checkInputDataTypes(): TypeCheckResult = {
if (values.length != query.childOutputs.length) {
TypeCheckResult.TypeCheckFailure(
s"""
|The number of columns in the left hand side of an IN subquery does not match the
|number of columns in the output of subquery.
|#columns in left hand side: ${values.length}.
|#columns in right hand side: ${query.childOutputs.length}.
|Left side columns:
|[${values.map(_.sql).mkString(", ")}].
|Right side columns:
|[${query.childOutputs.map(_.sql).mkString(", ")}].""".stripMargin)
} else if (!DataType.equalsStructurally(
query.dataType, value.dataType, ignoreNullability = true)) {
val mismatchedColumns = values.zip(query.childOutputs).flatMap {
case (l, r) if l.dataType != r.dataType =>
Seq(s"(${l.sql}:${l.dataType.catalogString}, ${r.sql}:${r.dataType.catalogString})")
case _ => None
}
TypeCheckResult.TypeCheckFailure(
s"""
|The data type of one or more elements in the left hand side of an IN subquery
|is not compatible with the data type of the output of the subquery
|Mismatched columns:
|[${mismatchedColumns.mkString(", ")}]
|Left side:
|[${values.map(_.dataType.catalogString).mkString(", ")}].
|Right side:
|[${query.childOutputs.map(_.dataType.catalogString).mkString(", ")}].""".stripMargin)
} else {
TypeUtils.checkForOrderingExpr(value.dataType, s"function $prettyName")
}
}
override def children: Seq[Expression] = values :+ query
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ($query)"
override def sql: String = s"(${value.sql} IN (${query.sql}))"
}
/**
* Evaluates to `true` if `list` contains `value`.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "expr1 _FUNC_(expr2, expr3, ...) - Returns true if `expr` equals to any valN.",
arguments = """
Arguments:
* expr1, expr2, expr3, ... - the arguments must be same type.
""",
examples = """
Examples:
> SELECT 1 _FUNC_(1, 2, 3);
true
> SELECT 1 _FUNC_(2, 3, 4);
false
> SELECT named_struct('a', 1, 'b', 2) _FUNC_(named_struct('a', 1, 'b', 1), named_struct('a', 1, 'b', 3));
false
> SELECT named_struct('a', 1, 'b', 2) _FUNC_(named_struct('a', 1, 'b', 2), named_struct('a', 1, 'b', 3));
true
""")
// scalastyle:on line.size.limit
case class In(value: Expression, list: Seq[Expression]) extends Predicate {
require(list != null, "list should not be null")
override def checkInputDataTypes(): TypeCheckResult = {
val mismatchOpt = list.find(l => !DataType.equalsStructurally(l.dataType, value.dataType,
ignoreNullability = true))
if (mismatchOpt.isDefined) {
TypeCheckResult.TypeCheckFailure(s"Arguments must be same type but were: " +
s"${value.dataType.catalogString} != ${mismatchOpt.get.dataType.catalogString}")
} else {
TypeUtils.checkForOrderingExpr(value.dataType, s"function $prettyName")
}
}
override def children: Seq[Expression] = value +: list
lazy val inSetConvertible = list.forall(_.isInstanceOf[Literal])
private lazy val ordering = TypeUtils.getInterpretedOrdering(value.dataType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ${list.mkString("(", ",", ")")}"
override def eval(input: InternalRow): Any = {
val evaluatedValue = value.eval(input)
if (evaluatedValue == null) {
null
} else {
var hasNull = false
list.foreach { e =>
val v = e.eval(input)
if (v == null) {
hasNull = true
} else if (ordering.equiv(v, evaluatedValue)) {
return true
}
}
if (hasNull) {
null
} else {
false
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaDataType = CodeGenerator.javaType(value.dataType)
val valueGen = value.genCode(ctx)
val listGen = list.map(_.genCode(ctx))
// inTmpResult has 3 possible values:
// -1 means no matches found and there is at least one value in the list evaluated to null
val HAS_NULL = -1
// 0 means no matches found and all values in the list are not null
val NOT_MATCHED = 0
// 1 means one value in the list is matched
val MATCHED = 1
val tmpResult = ctx.freshName("inTmpResult")
val valueArg = ctx.freshName("valueArg")
// All the blocks are meant to be inside a do { ... } while (false); loop.
// The evaluation of variables can be stopped when we find a matching value.
val listCode = listGen.map(x =>
s"""
|${x.code}
|if (${x.isNull}) {
| $tmpResult = $HAS_NULL; // ${ev.isNull} = true;
|} else if (${ctx.genEqual(value.dataType, valueArg, x.value)}) {
| $tmpResult = $MATCHED; // ${ev.isNull} = false; ${ev.value} = true;
| continue;
|}
""".stripMargin)
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = listCode,
funcName = "valueIn",
extraArguments = (javaDataType, valueArg) :: (CodeGenerator.JAVA_BYTE, tmpResult) :: Nil,
returnType = CodeGenerator.JAVA_BYTE,
makeSplitFunction = body =>
s"""
|do {
| $body
|} while (false);
|return $tmpResult;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$tmpResult = $funcCall;
|if ($tmpResult == $MATCHED) {
| continue;
|}
""".stripMargin
}.mkString("\\n"))
ev.copy(code =
code"""
|${valueGen.code}
|byte $tmpResult = $HAS_NULL;
|if (!${valueGen.isNull}) {
| $tmpResult = $NOT_MATCHED;
| $javaDataType $valueArg = ${valueGen.value};
| do {
| $codes
| } while (false);
|}
|final boolean ${ev.isNull} = ($tmpResult == $HAS_NULL);
|final boolean ${ev.value} = ($tmpResult == $MATCHED);
""".stripMargin)
}
override def sql: String = {
val valueSQL = value.sql
val listSQL = list.map(_.sql).mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
/**
* Optimized version of In clause, when all filter values of In clause are
* static.
*/
case class InSet(child: Expression, hset: Set[Any]) extends UnaryExpression with Predicate {
require(hset != null, "hset could not be null")
override def toString: String = s"$child INSET ${hset.mkString("(", ",", ")")}"
@transient private[this] lazy val hasNull: Boolean = hset.contains(null)
override def nullable: Boolean = child.nullable || hasNull
protected override def nullSafeEval(value: Any): Any = {
if (set.contains(value)) {
true
} else if (hasNull) {
null
} else {
false
}
}
@transient lazy val set: Set[Any] = child.dataType match {
case t: AtomicType if !t.isInstanceOf[BinaryType] => hset
case _: NullType => hset
case _ =>
// for structs use interpreted ordering to be able to compare UnsafeRows with non-UnsafeRows
TreeSet.empty(TypeUtils.getInterpretedOrdering(child.dataType)) ++ (hset - null)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
val setTerm = ctx.addReferenceObj("set", set)
val setIsNull = if (hasNull) {
s"${ev.isNull} = !${ev.value};"
} else {
""
}
s"""
|${ev.value} = $setTerm.contains($c);
|$setIsNull
""".stripMargin
})
}
override def sql: String = {
val valueSQL = child.sql
val listSQL = hset.toSeq.map(Literal(_).sql).mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical AND.")
case class And(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "&&"
override def sqlOperator: String = "AND"
// +---------+---------+---------+---------+
// | AND | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | FALSE | UNKNOWN |
// | FALSE | FALSE | FALSE | FALSE |
// | UNKNOWN | UNKNOWN | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == false) {
false
} else {
val input2 = right.eval(input)
if (input2 == false) {
false
} else {
if (input1 != null && input2 != null) {
true
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `false`, if any of them is `false` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.value} = false;
if (${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = FalseLiteral)
} else {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = false;
if (!${eval1.isNull} && !${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && !${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = true;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical OR.")
case class Or(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "||"
override def sqlOperator: String = "OR"
// +---------+---------+---------+---------+
// | OR | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | TRUE | TRUE |
// | FALSE | TRUE | FALSE | UNKNOWN |
// | UNKNOWN | TRUE | UNKNOWN | UNKNOWN |
// +---------+---------+---------+---------+
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == true) {
true
} else {
val input2 = right.eval(input)
if (input2 == true) {
true
} else {
if (input1 != null && input2 != null) {
false
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `true`, if any of them is `true` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.isNull = FalseLiteral
ev.copy(code = code"""
${eval1.code}
boolean ${ev.value} = true;
if (!${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = FalseLiteral)
} else {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = true;
if (!${eval1.isNull} && ${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && ${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = false;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
abstract class BinaryComparison extends BinaryOperator with Predicate {
// Note that we need to give a superset of allowable input types since orderable types are not
// finitely enumerable. The allowable types are checked below by checkInputDataTypes.
override def inputType: AbstractDataType = AnyDataType
override def checkInputDataTypes(): TypeCheckResult = super.checkInputDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
TypeUtils.checkForOrderingExpr(left.dataType, this.getClass.getSimpleName)
case failure => failure
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (CodeGenerator.isPrimitiveType(left.dataType)
&& left.dataType != BooleanType // java boolean doesn't support > or < operator
&& left.dataType != FloatType
&& left.dataType != DoubleType) {
// faster version
defineCodeGen(ctx, ev, (c1, c2) => s"$c1 $symbol $c2")
} else {
defineCodeGen(ctx, ev, (c1, c2) => s"${ctx.genComp(left.dataType, c1, c2)} $symbol 0")
}
}
protected lazy val ordering: Ordering[Any] = TypeUtils.getInterpretedOrdering(left.dataType)
}
object BinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = Some((e.left, e.right))
}
/** An extractor that matches both standard 3VL equality and null-safe equality. */
object Equality {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case EqualTo(l, r) => Some((l, r))
case EqualNullSafe(l, r) => Some((l, r))
case _ => None
}
}
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` equals `expr2`, or false otherwise.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be used in equality comparison. Map type is not supported.
For complex types such array/struct, the data types of fields must be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1 _FUNC_ '1';
true
> SELECT true _FUNC_ NULL;
NULL
> SELECT NULL _FUNC_ NULL;
NULL
""")
case class EqualTo(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "="
// +---------+---------+---------+---------+
// | = | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | FALSE | UNKNOWN |
// | FALSE | FALSE | TRUE | UNKNOWN |
// | UNKNOWN | UNKNOWN | UNKNOWN | UNKNOWN |
// +---------+---------+---------+---------+
protected override def nullSafeEval(left: Any, right: Any): Any = ordering.equiv(left, right)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => ctx.genEqual(left.dataType, c1, c2))
}
}
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison
@ExpressionDescription(
usage = """
expr1 _FUNC_ expr2 - Returns same result as the EQUAL(=) operator for non-null operands,
but returns true if both are null, false if one of the them is null.
""",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be used in equality comparison. Map type is not supported.
For complex types such array/struct, the data types of fields must be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1 _FUNC_ '1';
true
> SELECT true _FUNC_ NULL;
false
> SELECT NULL _FUNC_ NULL;
true
""")
case class EqualNullSafe(left: Expression, right: Expression) extends BinaryComparison {
override def symbol: String = "<=>"
override def nullable: Boolean = false
// +---------+---------+---------+---------+
// | <=> | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | FALSE | UNKNOWN |
// | FALSE | FALSE | TRUE | UNKNOWN |
// | UNKNOWN | UNKNOWN | UNKNOWN | TRUE |
// +---------+---------+---------+---------+
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
val input2 = right.eval(input)
if (input1 == null && input2 == null) {
true
} else if (input1 == null || input2 == null) {
false
} else {
ordering.equiv(input1, input2)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
val equalCode = ctx.genEqual(left.dataType, eval1.value, eval2.value)
ev.copy(code = eval1.code + eval2.code + code"""
boolean ${ev.value} = (${eval1.isNull} && ${eval2.isNull}) ||
(!${eval1.isNull} && !${eval2.isNull} && $equalCode);""", isNull = FalseLiteral)
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 1 _FUNC_ 2;
true
> SELECT 1.1 _FUNC_ '1';
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
true
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class LessThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "<"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than or equal to `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1.0 _FUNC_ '1';
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
true
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class LessThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "<="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lteq(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 1;
true
> SELECT 2 _FUNC_ '1.1';
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
false
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class GreaterThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = ">"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than or equal to `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 1;
true
> SELECT 2.0 _FUNC_ '2.1';
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
false
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class GreaterThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = ">="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gteq(input1, input2)
}
| facaiy/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala | Scala | apache-2.0 | 27,522 |
package com.naokia.groonga4s.util.request
/**
* utils for query(free word search)
*/
object Query {
def escape(target: String): String = {
target.replaceAll("""([ \(\)'\"\\])""", """\\$1""")
}
}
| naokia/groonga4s | src/main/scala/com/naokia/groonga4s/util/request/Query.scala | Scala | apache-2.0 | 206 |
/**
* Copyright 2014 AndrΓ© RouΓ©l
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.before.uadetector.datasource
import scala.annotation.tailrec
import scala.collection.immutable.Map
import scala.collection.immutable.Vector
import scala.collection.mutable.ArrayBuffer
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import com.github.before.uadetector.datasource.regex.Converter
import com.github.before.uadetector.datasource.regex.Pattern
/**
* Describes the INI format of an *UAS* database (Data for UASparser) available
* from http://user-agent-string.info.
*/
object IniFormat {
sealed trait Line
/** Determines the type of a given line in string representation. */
object Line {
def apply(line: String): Line = {
if (Version.classify(line))
Version.parse(line).get
else if (Comment.classify(line))
Comment.parse(line).get
else if (Section.classify(line))
Section.parse(line).get
else if (Property.classify(line))
Property(line)
else
Unknown(line)
}
}
case class Comment(value: String) extends Line with Entry
/** Semicolons (;) at the beginning of the line indicate a comment. */
object Comment {
def classify(line: String): Boolean =
if (line.nonEmpty)
line.head == ';'
else
false
def parse(line: String): Option[Comment] =
if (line.nonEmpty)
Some(Comment(line.substring(1)))
else
None
}
case class Property(line: String = "") extends Line {
private val index = line.indexOf('=')
require(index > 0, "property must contain a name and value separated by an equal signs")
private val split = line.splitAt(index)
val name: String = split._1
val value: String = split._2.substring(1)
}
/**
* The basic element contained in an INI file is the *key* or *property*.
* Every key has a *name* and a *value*, delimited by an equals sign (=). The
* name appears to the left of the equals sign.
*/
object Property {
def classify(line: String): Boolean =
if (line.indexOf('=') > 0)
true
else
false
def parse(line: String): Option[Property] = {
if (classify(line))
Some(Property(line))
else
None
}
}
/**
* An intermediate state when collecting properties until they can be
* transformed into an `Entry`.
*/
case class Properties(values: Vector[Property]) extends Entry // TODO reuse when folding list of lines to entries
object Properties {
import Entry._
def convert(section: Section, entries: Vector[Property]): Try[Entry] = {
if (enoughEntries(section, entries))
section match {
case Browsers =>
mapToBrowser(entries)
case BrowserOperatingSystems =>
mapToBrowserToOperatingSystem(entries)
case BrowserRegexs =>
mapToBrowserRegex(entries)
case BrowserTypes =>
mapToBrowserType(entries)
case Devices =>
mapToDevice(entries)
case DeviceRegexs =>
mapToDeviceRegex(entries)
case OperatingSystems =>
mapToOperatingSystem(entries)
case OperatingSystemRegexs =>
mapToOperatingSystemRegex(entries)
case Robots =>
mapToRobot(entries)
case u: UnknownSection =>
Success(FailureType("Entries of unknown sections cannot be mapped"))
case Global =>
Success(FailureType("Entries in global section must be mapped separately"))
}
else
Try {
Properties(entries)
}
}
}
/**
* Keys may (but need not) be grouped into arbitrarily named sections. The
* section name appears on a line by itself, in square brackets (`[` and `]`).
* All keys after the section declaration are associated with that section.
* There is no explicit "end of section" delimiter; sections end at the next
* section declaration, or the end of the file. Sections may not be nested.
*/
sealed trait Section extends Line with Group {
def isGlobal = false
def isKnown = false
}
case object Global extends Section {
override def isGlobal = true
}
case class UnknownSection(name: String) extends Section
object Section {
def classify(line: String): Boolean =
if (line.nonEmpty)
line.head == '[' && line.last == ']'
else
false
def parse(line: String): Option[Section] =
if (line.nonEmpty) {
val name = line.substring(1, line.length - 1)
val section = KnownSection.parse(name).getOrElse(UnknownSection(name))
Some(section)
} else
None
}
/**
* If a string could not be determined as `Comment`, `Section` or `Property`
* it becomes an `Unknown` type.
*/
case class Unknown(value: String) extends Line with Entry
/** Defines known section of an *UAS* INI file */
sealed trait KnownSection extends Section {
def classify(line: String): Boolean =
if (line.nonEmpty)
line == id
else
false
def id: String
override def isKnown = true
}
object KnownSection {
val variants = List(
BrowserOperatingSystems,
BrowserRegexs,
BrowserTypes,
Browsers,
DeviceRegexs,
Devices,
OperatingSystemRegexs,
OperatingSystems,
Robots)
def parse(line: String): Option[KnownSection] = {
val matches = variants.filter(_.classify(line))
if (matches.length == 1)
Some(matches.head)
else
None
}
}
case object BrowserOperatingSystems extends KnownSection {
val id = "browser_os"
}
case object BrowserRegexs extends KnownSection {
val id = "browser_reg"
}
case object BrowserTypes extends KnownSection {
val id = "browser_type"
}
case object Browsers extends KnownSection {
val id = "browser"
}
case object DeviceRegexs extends KnownSection {
val id = "device_reg"
}
case object Devices extends KnownSection {
val id = "device"
}
case object OperatingSystemRegexs extends KnownSection {
val id = "os_reg"
}
case object OperatingSystems extends KnownSection {
val id = "os"
}
case object Robots extends KnownSection {
val id = "robots"
}
object Entry {
private class ParseException(msg: String) extends RuntimeException(msg)
def apply(section: Section, lines: Seq[Line]): Entry = {
val entries: Seq[Property] = this.properties(lines)
val parseFailure: PartialFunction[Throwable, Try[Entry]] = { case _ => Failure(new ParseException(s"Cannot parse entries in section $section: $entries")) }
val attempt: Try[Entry] = section match {
case Browsers =>
mapToBrowser(entries).recoverWith(parseFailure)
case BrowserOperatingSystems =>
mapToBrowserToOperatingSystem(entries).recoverWith(parseFailure)
case BrowserRegexs =>
mapToBrowserRegex(entries).recoverWith(parseFailure)
case BrowserTypes =>
mapToBrowserType(entries).recoverWith(parseFailure)
case Devices =>
mapToDevice(entries).recoverWith(parseFailure)
case DeviceRegexs =>
mapToDeviceRegex(entries).recoverWith(parseFailure)
case OperatingSystems =>
mapToOperatingSystem(entries).recoverWith(parseFailure)
case OperatingSystemRegexs =>
mapToOperatingSystemRegex(entries).recoverWith(parseFailure)
case Robots =>
mapToRobot(entries).recoverWith(parseFailure)
case u: UnknownSection => Try {
FailureType("Entries of unknown sections cannot be mapped")
}
case Global => Try {
FailureType("Entries in global section must be mapped separately")
}
}
attempt match {
case Success(s) => s
case Failure(t) => FailureType(t.getLocalizedMessage())
}
}
def extractId(p: Property): Option[Int] = startingDigits(p.name)
def extractValue(p: Property): String = unquote(p.value)
def intOption(s: String): Option[Int] =
if (s.isEmpty) None else Some(s.toInt)
def enoughEntries(section: Section, entries: Seq[Property]): Boolean =
entrySize(section) == entries.size
def entrySize(section: Section): Int = {
section match {
case BrowserOperatingSystems => 2
case BrowserRegexs => 2
case BrowserTypes => 1
case Browsers => 7
case DeviceRegexs => 2
case Devices => 3
case Global => 1
case OperatingSystemRegexs => 2
case OperatingSystems => 6
case Robots => 9
case u: UnknownSection => 1
}
}
def mapToBrowser(entries: Seq[Property]): Try[Browser] = Try {
Browser(
BrowserId(extractId(entries(0)).get),
extractValue(entries(0)).toInt,
extractValue(entries(1)),
extractValue(entries(2)),
extractValue(entries(3)),
extractValue(entries(4)),
extractValue(entries(5)),
extractValue(entries(6)))
}
def mapToBrowserRegex(entries: Seq[Property]): Try[BrowserRegex] = Try {
BrowserRegex(
extractId(entries(0)).get,
regexToPattern(extractValue(entries(0))),
BrowserId(extractValue(entries(1)).toInt))
}
def mapToBrowserToOperatingSystem(entries: Seq[Property]): Try[BrowserToOperatingSystem] = Try {
BrowserToOperatingSystem(
BrowserId(extractId(entries(0)).get.toInt),
OperatingSystemId(extractValue(entries(0)).toInt))
}
def mapToBrowserType(entries: Seq[Property]): Try[BrowserType] = Try {
BrowserType(
extractId(entries(0)).get,
extractValue(entries(0)))
}
def mapToDevice(entries: Seq[Property]): Try[Device] = Try {
Device(
DeviceId(extractId(entries(0)).get),
extractValue(entries(0)),
extractValue(entries(1)),
extractValue(entries(2)))
}
def mapToDeviceRegex(entries: Seq[Property]): Try[DeviceRegex] = Try {
DeviceRegex(
extractId(entries(0)).get,
regexToPattern(extractValue(entries(0))),
DeviceId(extractValue(entries(1)).toInt))
}
def mapToOperatingSystem(entries: Seq[Property]): Try[OperatingSystem] = Try {
OperatingSystem(
OperatingSystemId(extractId(entries(0)).get),
extractValue(entries(0)),
extractValue(entries(1)),
extractValue(entries(2)),
extractValue(entries(3)),
extractValue(entries(4)),
extractValue(entries(5)))
}
def mapToOperatingSystemRegex(entries: Seq[Property]): Try[OperatingSystemRegex] = Try {
OperatingSystemRegex(
extractId(entries(0)).get,
regexToPattern(extractValue(entries(0))),
OperatingSystemId(extractValue(entries(1)).toInt))
}
def mapToRobot(entries: Seq[Property]): Try[Robot] = Try {
Robot(
RobotId(extractId(entries(0)).get),
extractValue(entries(0)),
extractValue(entries(1)),
extractValue(entries(2)),
extractValue(entries(3)),
extractValue(entries(4)),
extractValue(entries(5)),
extractValue(entries(6)),
operatingSystemId(extractValue(entries(7))),
extractValue(entries(8)))
}
def operatingSystemId(s: String): Option[OperatingSystemId] =
intOption(s).flatMap(o => Some(OperatingSystemId(o)))
def properties(lines: Seq[Line]): Seq[Property] = {
val zero = List[Property]()
lines.foldRight(zero)((ln, acc) => ln match {
case e: Property => e :: acc
case _ => acc
})
}
def regexToPattern(regex: String): Pattern = {
Converter.convertPerlRegexToPattern(regex) // TODO return Option or Try
}
/**
* Filter beginning numbers from input. If the input does not start with
* digits or the digits could not be parsed into an `Int` the function
* returns `None`.
*/
def startingDigits(s: String): Option[Int] = {
val digits = s.trim.takeWhile(_.isDigit)
lazy val num = Try(digits.toInt)
if (!digits.isEmpty && num.isSuccess)
num.toOption
else
None
}
/**
* Trims leading and trailing whitespace characters from input. Additionally
* surrounding quotes will be removed.
*/
def unquote(s: String): String = {
val b = new StringBuilder(s.trim)
if (b.length > 1 && b.startsWith("\\"") && b.endsWith("\\""))
b.deleteCharAt(0).deleteCharAt(b.length - 1)
b.toString
}
}
private def append(acc: Map[Section, Vector[Line]], ln: Line, section: Section): Map[Section, Vector[Line]] = {
val types = acc.getOrElse(section, Vector())
acc + (section -> (types :+ ln))
}
private[datasource] def asProperty(ln: Line): Property = ln match {
case p: Property => p
case l: Line => throw new Exception(s"$l is not a Property")
}
private[datasource] def isPropertyWithName(name: String)(ln: Line): Boolean = ln match {
case p: Property => p.name == name
case _ => false
}
private[datasource] def firstPropertiesWithName(name: String, lines: Seq[Line]): Seq[Property] = {
val firstWithName = lines takeWhile isPropertyWithName(name)
firstWithName map asProperty
}
private[datasource] def groupLinesBySection(lines: Seq[Line]): Map[Section, Vector[Line]] = {
@tailrec
def go(acc: Map[Section, Vector[Line]], lines: Seq[Line], s: Section): Map[Section, Vector[Line]] = {
if (lines.isEmpty)
acc
else {
var section = s
val accumulated = lines.head match {
case c: Comment =>
append(acc, c, s)
case p: Property =>
append(acc, p, s)
case s: Section =>
section = s
if (acc.get(section).isEmpty)
acc + (section -> Vector())
else
acc
case u: Unknown =>
append(acc, u, s)
case v: Version =>
append(acc, v, s)
}
go(accumulated, lines.tail, section)
}
}
go(Map(), lines, Global)
}
/**
* Reduces a stream of lines into a data structure for *UAS parser*.
*/
def linesToData(lines: Seq[String]): Data = {
val classified = lines map Line.apply
toData(classified)
}
private[datasource] def toData(lines: Seq[Line]): Data = {
toData {
groupLinesBySection(lines)
}
}
private[datasource] def toData: Map[Section, Vector[Line]] => Data = _ map {
case (section: Section, entries: Vector[Line]) =>
section -> toEntries(section, entries)
}
private[datasource] def toEntries(section: Section, lines: Seq[Line]): Vector[Entry] = {
if (lines.nonEmpty) {
var entries = lines
var buf = new ArrayBuffer[Entry]
while (entries.nonEmpty) {
entries = entries.head match {
case c: Comment =>
buf.append(c)
entries.tail
case e: Property => {
val fields = firstPropertiesWithName(e.name, entries).toVector
val fieldsLength = fields.length
if (fieldsLength > 0) {
buf.append(Entry(section, fields))
entries.drop(fieldsLength)
} else {
buf.append(Unknown(e.name + "=" + e.value))
entries.tail
}
}
case s: Section =>
throw new Error("group by section first")
case u: Unknown =>
buf.append(u)
entries.tail
case v: Version =>
buf.append(v)
entries.tail
}
}
buf.toVector
} else Vector()
}
case class Version(value: String) extends Line with DataVersion
/** The prefix `; Version:` indicate the version of `Data`. */
object Version {
val prefix = "; Version:"
def classify(line: String): Boolean =
if (line.nonEmpty)
line startsWith prefix
else
false
def parse(line: String): Option[Version] =
if (classify(line))
Some(Version(line.substring(prefix.length).trim))
else
None
}
} | before/uadetector-scala | src/main/scala/com/github/before/uadetector/datasource/IniFormat.scala | Scala | apache-2.0 | 16,743 |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\\
* @ @ *
* # # # # (c) 2017 CAB *
* # # # # # # *
* # # # # # # # # # # # # *
* # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* @ @ *
\\* * http://github.com/alexcab * * * * * * * * * * * * * * * * * * * * * * * * * */
package manual.sketches
import mathact.core.bricks.linking.{LinkIn, LinkOut}
import mathact.core.bricks.plumbing.wiring.fun.FunWiring
import mathact.core.bricks.plumbing.wiring.obj.{ObjOnStart, ObjWiring}
import mathact.tools.EmptyBlock
import mathact.tools.workbenches.SimpleWorkbench
import scala.concurrent.Future
/** Helper blocks to use in examples
* Created by CAB on 03.01.2017.
*/
trait HelperBlocks { _: SimpleWorkbench β
//Int value generator
object Generator extends EmptyBlock with ObjWiring with ObjOnStart with LinkOut[Int]{ name = "Int Generator"
private val gen = new Outflow[Int] {
def start(): Unit = Future{
(0 to 10).foreach{ i β
pour(i)
Thread.sleep(1000)}}}
protected def onStart(): Unit = gen.start()
val out = Outlet(gen) }
//String logger
object Logger extends EmptyBlock with FunWiring with LinkIn[String]{ name = "Logger"
val in = In[String]
in.foreach(v β logger.info("Logger: " + v))}}
| AlexCAB/MathAct | mathact_examples/src/main/scala/manual/sketches/HelperBlocks.scala | Scala | mit | 2,100 |
package models
import play.api.libs.json._
/**
* Created by evalery on 10/03/15.
*/
object Formats {
implicit val AlarmFormat = Json.format[Alarm]
} | evanther/play-akka-reactivemongo | app/models/JsonFormats.scala | Scala | apache-2.0 | 154 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate
import java.util.concurrent.TimeUnit
import akka.actor._
import com.rbmhtechnology.eventuate.PersistOnEvent._
import com.typesafe.config.Config
import scala.concurrent.duration._
import scala.util._
private class EventsourcedActorSettings(config: Config) {
val writeTimeout =
config.getDuration("eventuate.log.write-timeout", TimeUnit.MILLISECONDS).millis
}
/**
* An `EventsourcedActor` is an [[EventsourcedView]] that can also write new events to its event log.
* New events are written with the asynchronous [[persist]] and [[persistN]] methods. They must only
* be used within the `onCommand` command handler. After successful persistence, the `onEvent` handler
* is automatically called with the persisted event(s). The `onEvent` handler is the place where actor
* state may be updated. The `onCommand` handler should not update actor state but only read it e.g.
* for command validation. `EventsourcedActor`s that want to persist new events within the `onEvent`
* handler should additionally mixin the [[PersistOnEvent]] trait and use the
* [[PersistOnEvent.persistOnEvent persistOnEvent]] method.
*
* @see [[EventsourcedView]]
* @see [[PersistOnEvent]]
*/
trait EventsourcedActor extends EventsourcedView with EventsourcedVersion {
import EventsourcingProtocol._
private val settings =
new EventsourcedActorSettings(context.system.settings.config)
private val messageStash = new MessageStash()
private val commandStash = new MessageStash()
private var writeRequests: Vector[DurableEvent] = Vector.empty
private var writeHandlers: Vector[Handler[Any]] = Vector.empty
private var writeRequestCorrelationId: Int = 0
private var writesInProgress: Set[Int] = Set.empty
private var writing: Boolean = false
private var writeReplyHandling: Boolean = false
/**
* State synchronization. If set to `true`, commands see internal state that is consistent
* with the event log. This is achieved by stashing new commands if this actor is currently
* writing events. If set to `false`, commands see internal state that is eventually
* consistent with the event log.
*/
//#state-sync
def stateSync: Boolean = true
//#
/**
* Asynchronously persists a sequence of `events` and calls `handler` with the persist result
* for each event in the sequence. If persistence was successful, `onEvent` is called with a
* persisted event before `handler` is called. Both, `onEvent` and `handler`, are called on a
* dispatcher thread of this actor, hence, it is safe to modify internal state within them.
* The `handler` can also obtain a reference to the initial command sender via `sender()`. The
* `onLast` handler is additionally called for the last event in the sequence.
*
* By default, the event is routed to event-sourced destinations with an undefined `aggregateId`.
* If this actor's `aggregateId` is defined it is additionally routed to all actors with the same
* `aggregateId`. Further routing destinations can be defined with the `customDestinationAggregateIds`
* parameter.
*/
final def persistN[A](events: Seq[A], onLast: Handler[A] = (_: Try[A]) => (), customDestinationAggregateIds: Set[String] = Set())(handler: Handler[A]): Unit = events match {
case Seq() =>
case es :+ e =>
es.foreach { event =>
persist(event, customDestinationAggregateIds)(handler)
}
persist(e, customDestinationAggregateIds) { r =>
handler(r)
onLast(r)
}
}
/**
* Asynchronously persists the given `event` and calls `handler` with the persist result. If
* persistence was successful, `onEvent` is called with the persisted event before `handler`
* is called. Both, `onEvent` and `handler`, are called on a dispatcher thread of this actor,
* hence, it is safe to modify internal state within them. The `handler` can also obtain a
* reference to the initial command sender via `sender()`.
*
* By default, the event is routed to event-sourced destinations with an undefined `aggregateId`.
* If this actor's `aggregateId` is defined it is additionally routed to all actors with the same
* `aggregateId`. Further routing destinations can be defined with the `customDestinationAggregateIds`
* parameter.
*/
final def persist[A](event: A, customDestinationAggregateIds: Set[String] = Set())(handler: Handler[A]): Unit =
persistDurableEvent(durableEvent(event, customDestinationAggregateIds), handler.asInstanceOf[Handler[Any]])
/**
* Internal API.
*/
private[eventuate] def persistDurableEvent(event: DurableEvent, handler: Handler[Any]): Unit = {
writeRequests = writeRequests :+ event
writeHandlers = writeHandlers :+ handler
}
/**
* Internal API.
*/
override private[eventuate] def unhandledMessage(msg: Any): Unit = msg match {
case WriteSuccess(events, cid, iid) => if (writesInProgress.contains(cid) && iid == instanceId) writeReplyHandling(cid) {
events.foreach { event =>
receiveEvent(event)
writeHandlers.head(Success(event.payload))
writeHandlers = writeHandlers.tail
}
if (stateSync) {
writing = false
messageStash.unstash()
}
}
case WriteFailure(events, cause, cid, iid) => if (writesInProgress.contains(cid) && iid == instanceId) writeReplyHandling(cid) {
events.foreach { event =>
receiveEventInternal(event, cause)
writeHandlers.head(Failure(cause))
writeHandlers = writeHandlers.tail
}
if (stateSync) {
writing = false
messageStash.unstash()
}
}
case PersistOnEventRequest(persistOnEventSequenceNr: Long, invocations, iid) => if (iid == instanceId) {
writeOrDelay {
writeHandlers = Vector.fill(invocations.length)(PersistOnEvent.DefaultHandler)
writeRequests = invocations.map {
case PersistOnEventInvocation(event, customDestinationAggregateIds) =>
durableEvent(event, customDestinationAggregateIds, None, Some(persistOnEventSequenceNr))
}
}
}
case cmd =>
writeOrDelay(super.unhandledMessage(cmd))
}
private def writeReplyHandling(correlationId: Int)(body: => Unit): Unit =
try {
writeReplyHandling = true
body
} finally {
writeReplyHandling = false
writesInProgress = writesInProgress - correlationId
}
private def writePending: Boolean =
writeRequests.nonEmpty
private def writeOrDelay(writeRequestProducer: => Unit): Unit = {
if (writing) messageStash.stash() else {
writeRequestProducer
val wPending = writePending
if (wPending) write(nextCorrelationId())
if (wPending && stateSync) writing = true else if (stateSync) messageStash.unstash()
}
}
private def write(correlationId: Int): Unit = {
eventLog ! Write(writeRequests, sender(), self, correlationId, instanceId)
writesInProgress = writesInProgress + correlationId
writeRequests = Vector.empty
}
private def nextCorrelationId(): Int = {
writeRequestCorrelationId += 1
writeRequestCorrelationId
}
/**
* Adds the current command to the user's command stash. Must not be used in the event handler
* or `persist` handler.
*/
override def stash(): Unit =
if (writeReplyHandling || eventHandling) throw new StashError("stash() must not be used in event handler or persist handler") else commandStash.stash()
/**
* Prepends all stashed commands to the actor's mailbox and then clears the command stash.
* Has no effect if the actor is recovering i.e. if `recovering` returns `true`.
*/
override def unstashAll(): Unit =
if (!recovering) {
commandStash ++: messageStash
commandStash.clear()
messageStash.unstashAll()
}
}
| ianclegg/eventuate | eventuate-core/src/main/scala/com/rbmhtechnology/eventuate/EventsourcedActor.scala | Scala | apache-2.0 | 8,503 |
package org.pfcoperez.dailyalgorithm.applications
import org.pfcoperez.dailyalgorithm.numericmethods.random.DistributionRandomGenerator
import org.pfcoperez.dailyalgorithm.numericmethods.random.DistributionRandomGenerator.DensityFunction
object SampleBiasedRandom extends App {
val sampleSize = 1000
val range = (-1.0, 3.0)
val intervalWidth = 0.1
def genSample(size: Int, seed: Long, distribution: DensityFunction): List[Double] = {
def genSample(size: Int)(acc: List[Double] = Nil, generator: DistributionRandomGenerator): List[Double] =
if (size == 0) acc else {
val (nextGen, nextValue) = generator.next
genSample(size - 1)(nextValue :: acc, nextGen)
}
genSample(size)(Nil, DistributionRandomGenerator(distribution, seed))
}
val sample =
genSample(1000, 12122, DistributionRandomGenerator.DensityFunctions.normal(1.0, 0.25)(range))
def intervalFrequencies(
sample: Seq[Double])(
range: (Double, Double),
intervalWidth: Double): Seq[((Double, Double), Int)] = {
val classifiedSample: Map[Int, Seq[Double]] = sample.groupBy(x => (x / intervalWidth).toInt)
classifiedSample.toSeq.sortBy(_._1) map {
case (intNo, samples) =>
(intNo * intervalWidth, (intNo + 1) * intervalWidth) -> samples.size
}
}
println {
s"""
|
|
|Stats:
|------
""".stripMargin + "\\n" + {
intervalFrequencies(sample)(range, intervalWidth) map {
case ((a, b), n) => s"[${a.toString.take(4)}, ${b.toString.take(4)}) : $n"
} mkString ("\\n")
}
}
}
| pfcoperez/algorithmaday | src/main/scala/org/pfcoperez/dailyalgorithm/applications/SampleBiasedRandom.scala | Scala | gpl-3.0 | 1,587 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.data
import java.io.IOException
import java.util.{Map => JMap, NoSuchElementException}
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.client._
import org.apache.accumulo.core.client.admin.TimeType
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken
import org.apache.accumulo.core.data.{Key, Value}
import org.geotools.data._
import org.geotools.data.simple.SimpleFeatureSource
import org.geotools.factory.Hints
import org.geotools.feature.NameImpl
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.joda.time.Interval
import org.locationtech.geomesa.accumulo
import org.locationtech.geomesa.accumulo.GeomesaSystemProperties
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore._
import org.locationtech.geomesa.accumulo.data.tables._
import org.locationtech.geomesa.accumulo.index._
import org.locationtech.geomesa.accumulo.util.{ExplainingConnectorCreator, GeoMesaBatchWriterConfig}
import org.locationtech.geomesa.features.SerializationType.SerializationType
import org.locationtech.geomesa.features.{SerializationType, SimpleFeatureSerializers}
import org.locationtech.geomesa.security.AuthorizationsProvider
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.{FeatureSpec, NonGeomAttributeSpec}
import org.locationtech.geomesa.utils.time.Time._
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
import org.opengis.referencing.crs.CoordinateReferenceSystem
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
/**
*
* @param connector Accumulo connector
* @param catalogTable Table name in Accumulo to store metadata about featureTypes. For pre-catalog
* single-table stores this equates to the spatiotemporal table name
* @param authorizationsProvider Provides the authorizations used to access data
* @param writeVisibilities Visibilities applied to any data written by this store
*
* This class handles DataStores which are stored in Accumulo Tables. To be clear, one table may
* contain multiple features addressed by their featureName.
*/
class AccumuloDataStore(val connector: Connector,
val authToken: AuthenticationToken,
val catalogTable: String,
val authorizationsProvider: AuthorizationsProvider,
val writeVisibilities: String,
val queryTimeoutConfig: Option[Long] = None,
val queryThreadsConfig: Option[Int] = None,
val recordThreadsConfig: Option[Int] = None,
val writeThreadsConfig: Option[Int] = None,
val cachingConfig: Boolean = false,
val featureEncoding: SerializationType = DEFAULT_ENCODING)
extends AbstractDataStore(true) with AccumuloConnectorCreator with StrategyHintsProvider with Logging {
// having at least as many shards as tservers provides optimal parallelism in queries
val DEFAULT_MAX_SHARD = connector.instanceOperations().getTabletServers.size()
protected[data] val queryTimeoutMillis: Option[Long] = queryTimeoutConfig
.orElse(GeomesaSystemProperties.QueryProperties.QUERY_TIMEOUT_MILLIS.option.map(_.toLong))
// record scans are single-row ranges - increasing the threads too much actually causes performance to decrease
private val recordScanThreads = recordThreadsConfig.getOrElse(10)
private val writeThreads = writeThreadsConfig.getOrElse(10)
// cap on the number of threads for any one query
// if we let threads get too high, performance will suffer for simultaneous clients
private val MAX_QUERY_THREADS = 15
// floor on the number of query threads, even if the number of shards is 1
private val MIN_QUERY_THREADS = 5
// equivalent to: s"%~#s%$maxShard#r%${name}#cstr%0,3#gh%yyyyMMddHH#d::%~#s%3,2#gh::%~#s%#id"
def buildDefaultSpatioTemporalSchema(name: String, maxShard: Int = DEFAULT_MAX_SHARD): String =
new IndexSchemaBuilder("~")
.randomNumber(maxShard)
.indexOrDataFlag()
.constant(name)
.geoHash(0, 3)
.date("yyyyMMddHH")
.nextPart()
.geoHash(3, 2)
.nextPart()
.id()
.build()
Hints.putSystemDefault(Hints.FORCE_LONGITUDE_FIRST_AXIS_ORDER, true)
private val validated = new mutable.HashMap[String, String]()
with mutable.SynchronizedMap[String, String]
private val metadata: GeoMesaMetadata =
new AccumuloBackedMetadata(connector, catalogTable, writeVisibilities, authorizationsProvider)
private val visibilityCheckCache = new mutable.HashMap[(String, String), Boolean]()
with mutable.SynchronizedMap[(String, String), Boolean]
private val defaultBWConfig =
GeoMesaBatchWriterConfig().setMaxWriteThreads(writeThreads)
private val tableOps = connector.tableOperations()
ensureTableExists(catalogTable)
/**
* Computes and writes the metadata for this feature type
*
* @param sft
* @param fe
*/
private def writeMetadata(sft: SimpleFeatureType,
fe: SerializationType,
spatioTemporalSchemaValue: String) {
// compute the metadata values
val attributesValue = SimpleFeatureTypes.encodeType(sft)
val dtgValue: Option[String] = {
val userData = sft.getUserData
if (userData.containsKey(accumulo.index.SF_PROPERTY_START_TIME)) {
Option(userData.get(accumulo.index.SF_PROPERTY_START_TIME).asInstanceOf[String])
} else {
None
}
}
val featureEncodingValue = /*_*/fe.toString/*_*/
val z3TableValue = Z3Table.formatTableName(catalogTable, sft)
val spatioTemporalIdxTableValue = SpatioTemporalTable.formatTableName(catalogTable, sft)
val attrIdxTableValue = AttributeTable.formatTableName(catalogTable, sft)
val recordTableValue = RecordTable.formatTableName(catalogTable, sft)
val queriesTableValue = formatQueriesTableName(catalogTable)
val dtgFieldValue = dtgValue.getOrElse(accumulo.DEFAULT_DTG_PROPERTY_NAME)
val tableSharingValue = accumulo.index.getTableSharing(sft).toString
val dataStoreVersion = INTERNAL_GEOMESA_VERSION.toString
// store each metadata in the associated key
val attributeMap =
Map(
ATTRIBUTES_KEY -> attributesValue,
SCHEMA_KEY -> spatioTemporalSchemaValue,
DTGFIELD_KEY -> dtgFieldValue,
FEATURE_ENCODING_KEY -> featureEncodingValue,
VISIBILITIES_KEY -> writeVisibilities,
Z3_TABLE_KEY -> z3TableValue,
ST_IDX_TABLE_KEY -> spatioTemporalIdxTableValue,
ATTR_IDX_TABLE_KEY -> attrIdxTableValue,
RECORD_TABLE_KEY -> recordTableValue,
QUERIES_TABLE_KEY -> queriesTableValue,
SHARED_TABLES_KEY -> tableSharingValue,
VERSION_KEY -> dataStoreVersion
)
val featureName = getFeatureName(sft)
metadata.insert(featureName, attributeMap)
// write out a visibilities protected entry that we can use to validate that a user can see
// data in this store
if (!writeVisibilities.isEmpty) {
metadata.insert(featureName, VISIBILITIES_CHECK_KEY, writeVisibilities, writeVisibilities)
}
}
/**
* Used to update the attributes that are marked as indexed
*
* @param featureName
* @param attributes
*/
def updateIndexedAttributes(featureName: String, attributes: String): Unit = {
val FeatureSpec(existing, _) = SimpleFeatureTypes.parse(getAttributes(featureName).getOrElse(""))
val FeatureSpec(updated, _) = SimpleFeatureTypes.parse(attributes)
// check that the only changes are to non-geometry index flags
val ok = existing.length == updated.length &&
existing.zip(updated).forall { case (e, u) => e == u ||
(e.isInstanceOf[NonGeomAttributeSpec] &&
u.isInstanceOf[NonGeomAttributeSpec] &&
e.clazz == u.clazz &&
e.name == u.name) }
if (!ok) {
throw new IllegalArgumentException("Attribute spec is not consistent with existing spec")
}
metadata.insert(featureName, ATTRIBUTES_KEY, attributes)
// reconfigure the splits on the attribute table
val sft = getSchema(featureName)
val table = getAttributeTable(featureName)
AttributeTable.configureTable(sft, table, tableOps)
}
type KVEntry = JMap.Entry[Key,Value]
/**
* Read Record table name from store metadata
*/
def getRecordTable(featureType: SimpleFeatureType): String = getRecordTable(featureType.getTypeName)
/**
* Read Record table name from store metadata
*/
def getRecordTable(featureName: String): String = metadata.readRequired(featureName, RECORD_TABLE_KEY)
/**
* Read SpatioTemporal Index table name from store metadata
*/
override def getSpatioTemporalTable(featureType: SimpleFeatureType): String =
getSpatioTemporalTable(featureType.getTypeName)
override def getZ3Table(featureType: SimpleFeatureType): String = getZ3Table(featureType.getTypeName)
def getZ3Table(featureName: String): String =
metadata.readRequired(featureName, Z3_TABLE_KEY)
/**
* Read SpatioTemporal Index table name from store metadata
*/
def getSpatioTemporalTable(featureName: String): String =
metadata.readRequired(featureName, ST_IDX_TABLE_KEY)
/**
* Read Attribute Index table name from store metadata
*/
override def getAttributeTable(featureType: SimpleFeatureType): String =
getAttributeTable(featureType.getTypeName)
/**
* Read Attribute Index table name from store metadata
*/
def getAttributeTable(featureName: String): String =
metadata.readRequired(featureName, ATTR_IDX_TABLE_KEY)
/**
* Read Queries table name from store metadata
*/
def getQueriesTableName(featureType: SimpleFeatureType): String =
getQueriesTableName(featureType.getTypeName)
/**
* Read Queries table name from store metadata
*/
def getQueriesTableName(featureName: String): String =
Try(metadata.readRequired(featureName, QUERIES_TABLE_KEY)) match {
case Success(queriesTableName) => queriesTableName
// For backwards compatibility with existing tables that do not have queries table metadata
case Failure(t) if t.getMessage.contains("Unable to find required metadata property") =>
writeAndReturnMissingQueryTableMetadata(featureName)
case Failure(t) => throw t
}
/**
* Here just to write missing query metadata (for backwards compatibility with preexisting data).
*/
private[this] def writeAndReturnMissingQueryTableMetadata(featureName: String): String = {
val queriesTableValue = formatQueriesTableName(catalogTable)
metadata.insert(featureName, QUERIES_TABLE_KEY, queriesTableValue)
queriesTableValue
}
/**
* Read SpatioTemporal Index table name from store metadata
*/
def getSpatioTemporalMaxShard(sft: SimpleFeatureType): Int = {
val indexSchemaFmt = metadata.read(sft.getTypeName, SCHEMA_KEY)
.getOrElse(throw new RuntimeException(s"Unable to find required metadata property for $SCHEMA_KEY"))
IndexSchema.maxShard(indexSchemaFmt)
}
def createTablesForType(sft: SimpleFeatureType) {
Seq(Z3Table, SpatioTemporalTable, AttributeTable, RecordTable).foreach { table =>
if (table.supports(sft)) {
val tableName = table.formatTableName(catalogTable, sft)
ensureTableExists(tableName)
table.configureTable(sft, tableName, tableOps)
}
}
}
private def ensureTableExists(table: String) =
if (!tableOps.exists(table)) {
try {
tableOps.create(table, true, TimeType.LOGICAL)
} catch {
case e: TableExistsException => // this can happen with multiple threads but shouldn't cause any issues
}
}
// Retrieves or computes the indexSchema
def computeSpatioTemporalSchema(sft: SimpleFeatureType): String = {
accumulo.index.getIndexSchema(sft) match {
case None => buildDefaultSpatioTemporalSchema(getFeatureName(sft))
case Some(schema) => schema
}
}
/**
* Compute the GeoMesa SpatioTemporal Schema, create tables, and write metadata to catalog.
* If the schema already exists, log a message and continue without error.
*
* @param sft
*/
override def createSchema(sft: SimpleFeatureType) =
if (getSchema(sft.getTypeName) == null) {
// inspect, warn and set SF_PROPERTY_START_TIME if appropriate
// do this before anything else so appropriate tables will be created
TemporalIndexCheck.extractNewDTGFieldCandidate(sft)
.foreach(sft.getUserData.put(accumulo.index.SF_PROPERTY_START_TIME, _))
val spatioTemporalSchema = computeSpatioTemporalSchema(sft)
checkSchemaRequirements(sft, spatioTemporalSchema)
createTablesForType(sft)
writeMetadata(sft, featureEncoding, spatioTemporalSchema)
}
// This function enforces the shared ST schema requirements.
// For a shared ST table, the IndexSchema must start with a partition number and a constant string.
// TODO: This function should check if the constant is equal to the featureType.getTypeName
def checkSchemaRequirements(featureType: SimpleFeatureType, schema: String) {
if(accumulo.index.getTableSharing(featureType)) {
val (rowf, _,_) = IndexSchema.parse(IndexSchema.formatter, schema).get
rowf.lf match {
case Seq(pf: PartitionTextFormatter, i: IndexOrDataTextFormatter, const: ConstantTextFormatter, r@_*) =>
case _ => throw new RuntimeException(s"Failed to validate the schema requirements for " +
s"the feature ${featureType.getTypeName} for catalog table : $catalogTable. " +
s"We require that features sharing a table have schema starting with a partition and a constant.")
}
}
}
/**
* Deletes the tables from Accumulo created from the Geomesa SpatioTemporal Schema, and deletes
* metadata from the catalog. If the table is an older 0.10.x table, we throw an exception.
*
* This version overrides the default geotools removeSchema function and uses 1 thread for
* querying during metadata deletion.
*
* @param featureName the name of the feature
*/
override def removeSchema(featureName: String) = removeSchema(featureName, 1)
/**
* Deletes the tables from Accumulo created from the Geomesa SpatioTemporal Schema, and deletes
* metadata from the catalog. If the table is an older 0.10.x table, we throw an exception.
*
* @param featureName the name of the feature
* @param numThreads the number of concurrent threads to spawn for querying during metadata deletion
*/
def removeSchema(featureName: String, numThreads: Int = 1) =
if (metadata.read(featureName, ST_IDX_TABLE_KEY).nonEmpty) {
val featureType = getSchema(featureName)
if (accumulo.index.getTableSharing(featureType)) {
deleteSharedTables(featureType)
} else {
deleteStandAloneTables(featureType)
}
metadata.delete(featureName, numThreads)
metadata.expireCache(featureName)
} else {
// TODO: Apply the SpatioTemporalTable.deleteFeaturesFromTable here?
// https://geomesa.atlassian.net/browse/GEOMESA-360
throw new RuntimeException("Cannot delete schema for this version of the data store")
}
private def deleteSharedTables(sft: SimpleFeatureType) = {
val auths = authorizationsProvider.getAuthorizations
val numThreads = queryThreadsConfig.getOrElse(Math.min(MAX_QUERY_THREADS,
Math.max(MIN_QUERY_THREADS, getSpatioTemporalMaxShard(sft))))
GeoMesaTable.getTablesAndNames(sft, this).foreach { case (table, name) =>
val deleter = connector.createBatchDeleter(name, auths, numThreads, defaultBWConfig)
table.deleteFeaturesForType(sft, deleter)
deleter.close()
}
}
// NB: We are *not* currently deleting the query table and/or query information.
private def deleteStandAloneTables(sft: SimpleFeatureType) =
GeoMesaTable.getTableNames(sft, this).filter(tableOps.exists).foreach(tableOps.delete)
/**
* Delete everything (all tables) associated with this datastore (index tables and catalog table)
* NB: We are *not* currently deleting the query table and/or query information.
*/
def delete() = {
val indexTables = getTypeNames.map(getSchema).flatMap(GeoMesaTable.getTableNames(_, this)).distinct
// Delete index tables first then catalog table in case of error
indexTables.filter(tableOps.exists).foreach(tableOps.delete)
tableOps.delete(catalogTable)
}
/**
* Validates the configuration of this data store instance against the stored configuration in
* Accumulo, if any. This is used to ensure that the visibilities (in particular) do not change
* from one instance to the next. This will fill in any missing metadata that may occur in an
* older (0.1.0) version of a table.
*/
protected def validateMetadata(featureName: String): Unit = {
metadata.read(featureName, ATTRIBUTES_KEY)
.getOrElse(throw new IOException(s"Feature '$featureName' has not been initialized. Please call 'createSchema' first."))
val ok = validated.getOrElseUpdate(featureName, checkMetadata(featureName))
if (!ok.isEmpty) {
throw new RuntimeException("Configuration of this DataStore does not match the schema values: " + ok)
}
}
/**
* Wraps the functionality of checking the metadata against this config
*
* @param featureName
* @return string with errors, or empty string
*/
private def checkMetadata(featureName: String): String = {
// check the different metadata options
val checks = List(checkVisibilitiesMetadata(featureName))
val errors = checks.flatten.mkString(", ")
// if no errors, check the feature encoding
if (errors.isEmpty) {
checkFeatureEncodingMetadata(featureName)
}
errors
}
/**
* Checks the visibility stored in the metadata table against the configuration of this data store.
*
* @param featureName
* @return
*/
private def checkVisibilitiesMetadata(featureName: String): Option[String] = {
// validate that visibilities have not changed
val storedVisibilities = metadata.read(featureName, VISIBILITIES_KEY).getOrElse("")
if (storedVisibilities != writeVisibilities) {
Some(s"$VISIBILITIES_KEY = '$writeVisibilities', should be '$storedVisibilities'")
} else {
None
}
}
/**
* Checks the feature encoding in the metadata against the configuration of this data store.
*
* @param featureName
*/
def checkFeatureEncodingMetadata(featureName: String): Unit = {
// for feature encoding, we are more lenient - we will use whatever is stored in the table
if (metadata.read(featureName, FEATURE_ENCODING_KEY).getOrElse("").isEmpty) {
throw new RuntimeException(s"No '$FEATURE_ENCODING_KEY' found for feature '$featureName'.")
}
}
/**
* Checks whether the current user can write - based on whether they can read data in this
* data store.
*
* @param featureName
*/
protected def checkWritePermissions(featureName: String): Unit = {
val visibilities = metadata.read(featureName, VISIBILITIES_KEY).getOrElse("")
// if no visibilities set, no need to check
if (!visibilities.isEmpty) {
// create a key for the user's auths that we will use to check the cache
val authString = authorizationsProvider.getAuthorizations.getAuthorizations
.map(a => new String(a)).sorted.mkString(",")
if (!checkWritePermissions(featureName, authString)) {
throw new RuntimeException(s"The current user does not have the required authorizations to " +
s"write $featureName features. Required authorizations: '$visibilities', " +
s"actual authorizations: '$authString'")
}
}
}
/**
* Wraps logic for checking write permissions for a given set of auths
*
* @param featureName
* @param authString
* @return
*/
private def checkWritePermissions(featureName: String, authString: String) = {
// if cache contains an entry, use that
visibilityCheckCache.getOrElse((featureName, authString), {
// check the 'visibilities check' metadata - it has visibilities applied, so if the user
// can read that row, then they can read any data in the data store
val visCheck = metadata.readRequiredNoCache(featureName, VISIBILITIES_CHECK_KEY)
.isInstanceOf[Some[String]]
visibilityCheckCache.put((featureName, authString), visCheck)
visCheck
})
}
/**
* Implementation of AbstractDataStore getTypeNames
*
* @return
*/
override def getTypeNames: Array[String] =
if (tableOps.exists(catalogTable)) {
metadata.getFeatureTypes
}
else {
Array()
}
// NB: By default, AbstractDataStore is "isWriteable". This means that createFeatureSource returns
// a featureStore
override def getFeatureSource(typeName: Name): SimpleFeatureSource = {
validateMetadata(typeName.getLocalPart)
if (cachingConfig) {
new AccumuloFeatureStore(this, typeName) with CachingFeatureSource
} else {
new AccumuloFeatureStore(this, typeName)
}
}
override def getFeatureSource(typeName: String): SimpleFeatureSource =
getFeatureSource(new NameImpl(typeName))
/**
* Reads the index schema format out of the metadata
*
* @param featureName
* @return
*/
def getIndexSchemaFmt(featureName: String) =
metadata.read(featureName, SCHEMA_KEY).getOrElse(EMPTY_STRING)
/**
* Updates the index schema format - WARNING don't use this unless you know what you're doing.
* @param sft
* @param schema
*/
def setIndexSchemaFmt(sft: String, schema: String) = {
metadata.insert(sft, SCHEMA_KEY, schema)
metadata.expireCache(sft)
}
/**
* Gets the internal geomesa version number for a given feature type
*
* @param sft
* @return
*/
def getGeomesaVersion(sft: SimpleFeatureType): Int = getGeomesaVersion(sft.getTypeName)
/**
* Gets the internal geomesa version number for a given feature type
*
* @param sft
* @return
*/
def getGeomesaVersion(sft: String): Int =
metadata.read(sft, VERSION_KEY).map(_.toInt).getOrElse {
// back compatible checks for before we wrote the explicit version
if (metadata.read(sft, ST_IDX_TABLE_KEY).isEmpty) {
0 // version 0 corresponds to the old 'non-catalog' table format
} else {
1 // version 1 corresponds to the split tables with unsorted STIDX
}
}
/**
* Update the geomesa version
*
* @param sft
* @param version
*/
def setGeomesaVersion(sft: String, version: Int): Unit = {
metadata.insert(sft, VERSION_KEY, version.toString)
metadata.expireCache(sft)
}
/**
* Reads the attributes out of the metadata
*
* @param featureName
* @return
*/
private def getAttributes(featureName: String) = metadata.read(featureName, ATTRIBUTES_KEY)
/**
* Reads the feature encoding from the metadata.
*
* @throws RuntimeException if the feature encoding is missing or invalid
*/
def getFeatureEncoding(sft: SimpleFeatureType): SerializationType = {
val name = metadata.readRequired(sft.getTypeName, FEATURE_ENCODING_KEY)
try {
SerializationType.withName(name)
} catch {
case e: NoSuchElementException => throw new RuntimeException(s"Invalid Feature Encoding '$name'.")
}
}
// We assume that they want the bounds for everything.
override def getBounds(query: Query): ReferencedEnvelope = {
val env = metadata.read(query.getTypeName, SPATIAL_BOUNDS_KEY).getOrElse(WHOLE_WORLD_BOUNDS)
val minMaxXY = env.split(":")
val curBounds = minMaxXY.size match {
case 4 => env
case _ => WHOLE_WORLD_BOUNDS
}
val sft = getSchema(query.getTypeName)
val crs = sft.getCoordinateReferenceSystem
stringToReferencedEnvelope(curBounds, crs)
}
def getTimeBounds(typeName: String): Interval = {
metadata.read(typeName, TEMPORAL_BOUNDS_KEY)
.map(stringToTimeBounds)
.getOrElse(ALL_TIME_BOUNDS)
}
def getRecordTableSize(featureName: String): Long = {
metadata.getTableSize(getRecordTable(featureName))
}
def stringToTimeBounds(value: String): Interval = {
val longs = value.split(":").map(java.lang.Long.parseLong)
require(longs(0) <= longs(1))
require(longs.length == 2)
new Interval(longs(0), longs(1))
}
private def stringToReferencedEnvelope(string: String,
crs: CoordinateReferenceSystem): ReferencedEnvelope = {
val minMaxXY = string.split(":")
require(minMaxXY.size == 4)
new ReferencedEnvelope(minMaxXY(0).toDouble, minMaxXY(1).toDouble, minMaxXY(2).toDouble,
minMaxXY(3).toDouble, crs)
}
/**
* Writes spatial bounds for this feature
*
* @param featureName
* @param bounds
*/
def writeSpatialBounds(featureName: String, bounds: ReferencedEnvelope) {
// prepare to write out properties to the Accumulo SHP-file table
val newbounds = metadata.read(featureName, SPATIAL_BOUNDS_KEY) match {
case Some(env) => getNewBounds(env, bounds)
case None => bounds
}
val minMaxXY = List(newbounds.getMinX, newbounds.getMaxX, newbounds.getMinY, newbounds.getMaxY)
val encoded = minMaxXY.mkString(":")
metadata.insert(featureName, SPATIAL_BOUNDS_KEY, encoded)
}
private def getNewBounds(env: String, bounds: ReferencedEnvelope) = {
val oldBounds = stringToReferencedEnvelope(env, DefaultGeographicCRS.WGS84)
oldBounds.expandToInclude(bounds)
oldBounds
}
/**
* Writes temporal bounds for this feature
*
* @param featureName
* @param timeBounds
*/
def writeTemporalBounds(featureName: String, timeBounds: Interval) {
val newTimeBounds = metadata.read(featureName, TEMPORAL_BOUNDS_KEY) match {
case Some(currentTimeBoundsString) => getNewTimeBounds(currentTimeBoundsString, timeBounds)
case None => Some(timeBounds)
}
// Only write expanded bounds.
newTimeBounds.foreach { newBounds =>
val encoded = s"${newBounds.getStartMillis}:${newBounds.getEndMillis}"
metadata.insert(featureName, TEMPORAL_BOUNDS_KEY, encoded)
}
}
def getNewTimeBounds(current: String, newBounds: Interval): Option[Interval] = {
val currentTimeBounds = stringToTimeBounds(current)
val expandedTimeBounds = currentTimeBounds.expandByInterval(newBounds)
if (!currentTimeBounds.equals(expandedTimeBounds)) {
Some(expandedTimeBounds)
} else {
None
}
}
/**
* Implementation of abstract method
*
* @param featureName
* @return the corresponding feature type (schema) for this feature name,
* or NULL if this feature name does not appear to exist
*/
override def getSchema(featureName: String): SimpleFeatureType = getSchema(new NameImpl(featureName))
override def getSchema(name: Name): SimpleFeatureType = {
val featureName = name.getLocalPart
getAttributes(featureName).map { attributes =>
val sft = SimpleFeatureTypes.createType(name.getURI, attributes)
val dtgField = metadata.read(featureName, DTGFIELD_KEY).getOrElse(accumulo.DEFAULT_DTG_PROPERTY_NAME)
val indexSchema = metadata.read(featureName, SCHEMA_KEY).orNull
// If no data is written, we default to 'false' in order to support old tables.
val sharingBoolean = metadata.read(featureName, SHARED_TABLES_KEY).getOrElse("false")
sft.getUserData.put(accumulo.index.SF_PROPERTY_START_TIME, dtgField)
sft.getUserData.put(accumulo.index.SF_PROPERTY_END_TIME, dtgField)
sft.getUserData.put(accumulo.index.SFT_INDEX_SCHEMA, indexSchema)
accumulo.index.setTableSharing(sft, new java.lang.Boolean(sharingBoolean))
sft
}.orNull
}
// Implementation of Abstract method
def getFeatureReader(featureName: String): AccumuloFeatureReader =
getFeatureReader(featureName, new Query(featureName))
// This override is important as it allows us to optimize and plan our search with the Query.
override def getFeatureReader(featureName: String, query: Query): AccumuloFeatureReader = {
val qp = getQueryPlanner(featureName, this)
new AccumuloFeatureReader(qp, query, this)
}
// override the abstract data store method - we already handle all projections, transformations, etc
override def getFeatureReader(query: Query, transaction: Transaction): AccumuloFeatureReader =
getFeatureReader(query.getTypeName, query)
/**
* Gets the query plan for a given query. The query plan consists of the tables, ranges, iterators etc
* required to run a query against accumulo.
*/
def getQueryPlan(query: Query): Seq[QueryPlan] = {
require(query.getTypeName != null, "Type name is required in the query")
planQuery(query.getTypeName, query, ExplainNull)
}
/**
* Prints the query plan for a given query to the provided output.
*/
def explainQuery(query: Query, o: ExplainerOutputType = ExplainPrintln): Unit = {
require(query.getTypeName != null, "Type name is required in the query")
planQuery(query.getTypeName, query, o)
}
/**
*
*/
private def planQuery(featureName: String, query: Query, o: ExplainerOutputType): Seq[QueryPlan] = {
val cc = new ExplainingConnectorCreator(this, o)
val qp = getQueryPlanner(featureName, cc)
qp.planQuery(query, None, o)
}
/**
* Gets a query planner. Also has side-effect of setting transforms in the query.
*/
private def getQueryPlanner(featureName: String, cc: AccumuloConnectorCreator): QueryPlanner = {
validateMetadata(featureName)
val sft = getSchema(featureName)
val indexSchemaFmt = getIndexSchemaFmt(featureName)
val featureEncoding = getFeatureEncoding(sft)
val version = getGeomesaVersion(sft)
val hints = strategyHints(sft)
new QueryPlanner(sft, featureEncoding, indexSchemaFmt, cc, hints, version)
}
/* create a general purpose writer that is capable of insert, deletes, and updates */
override def getFeatureWriter(typeName: String, filter: Filter, transaction: Transaction): SFFeatureWriter = {
validateMetadata(typeName)
checkWritePermissions(typeName)
val sft = getSchema(typeName)
val fe = SimpleFeatureSerializers(sft, getFeatureEncoding(sft))
val ive = IndexValueEncoder(sft, getGeomesaVersion(sft))
new ModifyAccumuloFeatureWriter(sft, fe, ive, this, writeVisibilities, filter)
}
/* optimized for GeoTools API to return writer ONLY for appending (aka don't scan table) */
override def getFeatureWriterAppend(typeName: String,
transaction: Transaction): SFFeatureWriter = {
validateMetadata(typeName)
checkWritePermissions(typeName)
val sft = getSchema(typeName)
val fe = SimpleFeatureSerializers(sft, getFeatureEncoding(sft))
val ive = IndexValueEncoder(sft, getGeomesaVersion(sft))
new AppendAccumuloFeatureWriter(sft, fe, ive, this, writeVisibilities)
}
override def getUnsupportedFilter(featureName: String, filter: Filter): Filter = Filter.INCLUDE
override def getSuggestedSpatioTemporalThreads(sft: SimpleFeatureType): Int = queryThreadsConfig.getOrElse{
val numShards = getSpatioTemporalMaxShard(sft)
Math.min(MAX_QUERY_THREADS, Math.max(MIN_QUERY_THREADS, numShards))
}
override def getSuggestedAttributeThreads(sft: SimpleFeatureType): Int = 1
override def getSuggestedRecordThreads(sft: SimpleFeatureType): Int = recordScanThreads
override def getSuggestedZ3Threads(sft: SimpleFeatureType): Int = queryThreadsConfig.getOrElse(8)
override def getBatchScanner(table: String, numThreads: Int): BatchScanner =
connector.createBatchScanner(table, authorizationsProvider.getAuthorizations, numThreads)
override def getScanner(table: String): Scanner =
connector.createScanner(table, authorizationsProvider.getAuthorizations)
// Accumulo assumes that the failures directory exists. This function assumes that you have already created it.
def importDirectory(tableName: String, dir: String, failureDir: String, disableGC: Boolean) {
tableOps.importDirectory(tableName, dir, failureDir, disableGC)
}
/**
* Gets the feature name from a feature type
*
* @param featureType
* @return
*/
private def getFeatureName(featureType: SimpleFeatureType) = featureType.getName.getLocalPart
override def strategyHints(sft: SimpleFeatureType) = new UserDataStrategyHints()
}
object AccumuloDataStore {
/**
* Format queries table name for Accumulo...table name is stored in metadata for other usage
* and provide compatibility moving forward if table names change
* @param catalogTable
* @return
*/
def formatQueriesTableName(catalogTable: String): String =
GeoMesaTable.concatenateNameParts(catalogTable, "queries")
}
| mcharles/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/data/AccumuloDataStore.scala | Scala | apache-2.0 | 33,923 |
package scaffvis.client.store.actions
import scaffvis.shared.model.{Scaffold, _}
import diode.Action
object ScaffoldsActions {
case class LoadChildren(scaffoldId: ScaffoldId) extends Action
case class UpdateChildren(scaffoldId: ScaffoldId, children: Seq[Scaffold]) extends Action
case class LoadScaffold(scaffoldId: ScaffoldId) extends Action
case class StoreScaffold(chain: Seq[Scaffold]) extends Action
case class LoadScaffoldSvg(scaffoldIds: Seq[ScaffoldId]) extends Action
case class UpdateScaffoldSvg(scaffoldIds: Seq[ScaffoldId], svg: Seq[String]) extends Action
}
| velkoborsky/scaffvis | client/src/main/scala/scaffvis/client/store/actions/ScaffoldsActions.scala | Scala | gpl-3.0 | 588 |
package memnets.models
import memnets.core.Library
/**
* if you'd like to contribute a model:
*
* 1) fork repository and do so here
* 2) refer to the StandardLibrary for best practices
* 3) if your model needs data, figure out a way to generate it in a function
* 4) if machine learning data, then add it to ml.DataSources/ml.DataGens or use existing
*
* if you have a reusable component:
*
* 1) add the class under the appropriate package (e.g., PredPrey is under biology)
* 1b) if a broad category is missing, create a new package, but use existing when possible
* 1c) if machine learning and can implement the Learner trait, then put it in memnets.ml
* 2) make a model here using it
*/
object CommunityLibrary extends Library {}
| MemoryNetworks/memnets | models/src/main/scala/memnets/models/CommunityLibrary.scala | Scala | apache-2.0 | 750 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
/**
* A class for tracking the statistics of a set of numbers (count, mean and variance) in a
* numerically robust way. Includes support for merging two StatCounters. Based on Welford
* and Chan's [[http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance algorithms]]
* for running variance.
*
* @constructor Initialize the StatCounter with the given values.
*/
class StatCounter(values: TraversableOnce[Double]) extends Serializable {
private var n: Long = 0 // Running count of our values
private var mu: Double = 0 // Running mean of our values
private var m2: Double = 0 // Running variance numerator (sum of (x - mean)^2)
private var maxValue: Double = Double.NegativeInfinity // Running max of our values
private var minValue: Double = Double.PositiveInfinity // Running min of our values
merge(values)
/** Initialize the StatCounter with no values. */
def this() = this(Nil)
/** Add a value into this StatCounter, updating the internal statistics. */
def merge(value: Double): StatCounter = {
val delta = value - mu
n += 1
mu += delta / n
m2 += delta * (value - mu)
maxValue = math.max(maxValue, value)
minValue = math.min(minValue, value)
this
}
/** Add multiple values into this StatCounter, updating the internal statistics. */
def merge(values: TraversableOnce[Double]): StatCounter = {
values.foreach(v => merge(v))
this
}
/** Merge another StatCounter into this one, adding up the internal statistics. */
def merge(other: StatCounter): StatCounter = {
if (other == this) {
merge(other.copy()) // Avoid overwriting fields in a weird order
} else {
if (n == 0) {
mu = other.mu
m2 = other.m2
n = other.n
maxValue = other.maxValue
minValue = other.minValue
} else if (other.n != 0) {
val delta = other.mu - mu
if (other.n * 10 < n) {
mu = mu + (delta * other.n) / (n + other.n)
} else if (n * 10 < other.n) {
mu = other.mu - (delta * n) / (n + other.n)
} else {
mu = (mu * n + other.mu * other.n) / (n + other.n)
}
m2 += other.m2 + (delta * delta * n * other.n) / (n + other.n)
n += other.n
maxValue = math.max(maxValue, other.maxValue)
minValue = math.min(minValue, other.minValue)
}
this
}
}
/** Clone this StatCounter */
def copy(): StatCounter = {
val other = new StatCounter
other.n = n
other.mu = mu
other.m2 = m2
other.maxValue = maxValue
other.minValue = minValue
other
}
def count: Long = n
def mean: Double = mu
def sum: Double = n * mu
def max: Double = maxValue
def min: Double = minValue
/** Return the variance of the values. */
def variance: Double = {
if (n == 0) {
Double.NaN
} else {
m2 / n
}
}
/**
* Return the sample variance, which corrects for bias in estimating the variance by dividing
* by N-1 instead of N.
*/
def sampleVariance: Double = {
if (n <= 1) {
Double.NaN
} else {
m2 / (n - 1)
}
}
/** Return the standard deviation of the values. */
def stdev: Double = math.sqrt(variance)
/**
* Return the sample standard deviation of the values, which corrects for bias in estimating the
* variance by dividing by N-1 instead of N.
*/
def sampleStdev: Double = math.sqrt(sampleVariance)
override def toString: String = {
"(count: %d, mean: %f, stdev: %f, max: %f, min: %f)".format(count, mean, stdev, max, min)
}
}
object StatCounter {
/** Build a StatCounter from a list of values. */
def apply(values: TraversableOnce[Double]): StatCounter = new StatCounter(values)
/** Build a StatCounter from a list of values passed as variable-length arguments. */
def apply(values: Double*): StatCounter = new StatCounter(values)
}
| gioenn/xSpark | core/src/main/scala/org/apache/spark/util/StatCounter.scala | Scala | apache-2.0 | 4,727 |
package tu.coreservice.thinkinglifecycle
import tu.coreservice.action.Action
import tu.coreservice.action.way2think.Way2Think
import tu.model.knowledge.communication.ShortTermMemory
/**
* @author max talanov
* date 2012-07-12
* time: 4:04 PM
*/
case class JoinWay2Think(actions: List[Action]) extends Way2Think{
def start() = false
def stop() = false
/**
* Way2Think interface.
*
* @param inputContext ShortTermMemory of all inbound parameters.
* @return outputContext
*/
def apply(inputContext: ShortTermMemory) = null
}
| tu-team/2 | coreservice.thinkinglifecycle/src/main/scala/tu/coreservice/thinkinglifecycle/JoinWay2Think.scala | Scala | gpl-3.0 | 567 |
package example
import scala.collection.mutable.ListBuffer
trait GameState {
var isGameOver = false // TODO(jfrench): We can fix this later to be in simulator or something
var turnCounter = 0
var maxTurns = 30 // Just for brevity... what is the actual limit?
}
| osake/EternalCardGameSimulator | src/main/scala/example/GameState.scala | Scala | gpl-3.0 | 269 |
package info.mukel.codeforces4s.api
/**
* Submission
* Represents a submission.
*
* @param id Integer.
* @param contestId Integer.
* @param creationTimeSeconds Integer. Time, when submission was created, in unix-format.
* @param relativeTimeSeconds Integer. Number of seconds, passed after the start of the contest (or a virtual start for virtual parties), before the submission.
* @param problem Problem object.
* @param author Party object.
* @param programmingLanguage String.
* @param verdict Enum: FAILED, OK, PARTIAL, COMPILATION_ERROR, RUNTIME_ERROR, WRONG_ANSWER, PRESENTATION_ERROR, TIME_LIMIT_EXCEEDED, MEMORY_LIMIT_EXCEEDED, IDLENESS_LIMIT_EXCEEDED, SECURITY_VIOLATED, CRASHED, INPUT_PREPARATION_CRASHED, CHALLENGED, SKIPPED, TESTING, REJECTED. Can be absent.
* @param testset Enum: SAMPLES, PRETESTS, TESTS, CHALLENGES, TESTS1, ..., TESTS10. Testset used for judging the submission.
* @param passedTestCount Integer. Number of passed tests.
* @param timeConsumedMillis Integer. Maximum time in milliseconds, consumed by solution for one test.
* @param memoryConsumedBytes Integer. Maximum memory in bytes, consumed by solution for one test.
*/
case class Submission(
id : Int,
contestId : Int,
creationTimeSeconds : Int,
relativeTimeSeconds : Int,
problem : Problem,
author : Party,
programmingLanguage : String,
verdict : Option[String], // Enum: FAILED, OK, PARTIAL, COMPILATION_ERROR, RUNTIME_ERROR, WRONG_ANSWER, PRESENTATION_ERROR, TIME_LIMIT_EXCEEDED, MEMORY_LIMIT_EXCEEDED, IDLENESS_LIMIT_EXCEEDED, SECURITY_VIOLATED, CRASHED, INPUT_PREPARATION_CRASHED, CHALLENGED, SKIPPED, TESTING, REJECTED. Can be absent.
testset : String, // Enum: SAMPLES, PRETESTS, TESTS, CHALLENGES, TESTS1, ..., TESTS10. Testset used for judging the submission.
passedTestCount : Int,
timeConsumedMillis : Int,
memoryConsumedBytes : Int
)
object SubmissionVerdict extends Enumeration {
type SubmissionVerdict = Value
val FAILED = Value
val OK = Value
val PARTIAL = Value
val COMPILATION_ERROR = Value
val RUNTIME_ERROR = Value
val WRONG_ANSWER = Value
val PRESENTATION_ERROR = Value
val TIME_LIMIT_EXCEEDED = Value
val MEMORY_LIMIT_EXCEEDED = Value
val IDLENESS_LIMIT_EXCEEDED = Value
val SECURITY_VIOLATED = Value
val CRASHED = Value
val INPUT_PREPARATION_CRASHED = Value
val CHALLENGED = Value
val SKIPPED = Value
val TESTING = Value
val REJECTED = Value
}
object SubmissionTestset extends Enumeration {
type SubmissionTestset = Value
val SAMPLES = Value
val PRETESTS = Value
val TESTS = Value
val CHALLENGES = Value
val TESTS1 = Value
val TESTS2 = Value
val TESTS3 = Value
val TESTS4 = Value
val TESTS5 = Value
val TESTS6 = Value
val TESTS7 = Value
val TESTS8 = Value
val TESTS9 = Value
val TESTS10 = Value
} | mukel/codeforces4s | src/main/scala/info/mukel/codeforces4s/api/Submission.scala | Scala | gpl-2.0 | 3,264 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.flatspec.tagging
import org.scalatest.Tag
object DbTest extends Tag("com.mycompany.tags.DbTest")
import org.scalatest.FlatSpec
import org.scalatest.tagobjects.Slow
class SetSpec extends FlatSpec {
behavior of "An empty Set"
it should "have size 0" taggedAs(Slow) in {
assert(Set.empty.size === 0)
}
it should "produce NoSuchElementException when head is invoked" taggedAs(Slow, DbTest) in {
assertThrows[NoSuchElementException] {
Set.empty.head
}
}
}
| dotty-staging/scalatest | examples/src/test/scala/org/scalatest/examples/flatspec/tagging/SetSpec.scala | Scala | apache-2.0 | 1,124 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala.subscriptions
import rx.lang.scala._
private [scala] object BooleanSubscription {
def apply(): BooleanSubscription = new BooleanSubscription(new rx.subscriptions.BooleanSubscription())
}
/**
* Represents a [[rx.lang.scala.Subscription]] that can be checked for status.
*/
private [scala] class BooleanSubscription private[scala] (boolean: rx.subscriptions.BooleanSubscription)
extends Subscription {
override val asJavaSubscription: rx.subscriptions.BooleanSubscription = boolean
}
/*
new rx.subscriptions.BooleanSubscription() {
override def unsubscribe(): Unit = {
if(unsubscribed.compareAndSet(false, true)) {
if(!boolean.isUnsubscribed) { boolean.unsubscribe() }
}
}
override def isUnsubscribed(): Boolean = unsubscribed.get() || boolean.isUnsubscribed
}
*/ | samuelgruetter/RxScala | src/main/scala/rx/lang/scala/subscriptions/BooleanSubscription.scala | Scala | apache-2.0 | 1,429 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, TensorModule}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.tensor._
import com.intel.analytics.bigdl.utils.Engine
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* Applies HardTanh to each element of input, HardTanh is defined:
* β§ maxValue, if x > maxValue
* f(x) = β¨ minValue, if x < minValue
* β© x, otherwise
*
* @param minValue minValue in f(x), default is -1.
* @param maxValue maxValue in f(x), default is 1.
* @param inplace inplace model.
*/
@SerialVersionUID(- 8953866090802444183L)
class HardTanh[T: ClassTag, D: ClassTag](
val minValue: Double = -1,
val maxValue: Double = 1,
val inplace: Boolean = false
)(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends AbstractModule[Tensor[D], Tensor[D], T] {
require(maxValue > minValue, "maxValue must be larger than minValue, " +
s"maxValue ${maxValue}, " +
s"minValue ${minValue}")
val min = ev2.fromType[Double](minValue)
val max = ev2.fromType[Double](maxValue)
override def updateOutput(input: Tensor[D]): Tensor[D] = {
if (inplace) {
output.set(input)
}
else {
output.resizeAs(input)
}
if (input.dim() == 1 || !input.isContiguous() || !output.isContiguous()) {
if (inplace) {
val func = new TensorFunc2[D] {
override def apply(data: Array[D], index: Int): Unit = {
if (ev2.isGreater(min, data(index))) {
data(index) = ev2.fromType[Double](minValue)
} else if (ev2.isGreater(data(index), max)) {
data(index) = ev2.fromType[Double](maxValue)
}
}
}
DenseTensorApply.apply1[D](input, func)
} else {
val func2 = new TensorFunc4[D] {
override def apply(data1: Array[D], index1: Int, data2: Array[D], index2: Int): Unit = {
if (ev2.isGreater(min, data2(index2))) {
data1(index1) = min
} else if (ev2.isGreaterEq(max, data2(index2))) {
data1(index1) = data2(index2)
} else {
data1(index1) = max
}
}
}
DenseTensorApply.apply2[D](output, input, func2)
}
} else {
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = input.storageOffset() - 1
var i = 0
if (inplace) {
while (i < input.nElement()) {
if (ev2.isGreater(min, inputData(i + inputOffset))) {
inputData.update(i + inputOffset, min)
} else if (ev2.isGreater(inputData(i + inputOffset), max)) {
inputData.update(i + inputOffset, max)
}
i += 1
}
} else {
while (i < input.nElement()) {
if (ev2.isGreater(min, inputData(i + inputOffset))) {
outputData.update(i + outputOffset, min)
} else if (ev2.isGreaterEq(max, inputData(i + inputOffset))) {
outputData.update(i + outputOffset, inputData(i + inputOffset))
} else {
outputData.update(i + outputOffset, max)
}
i += 1
}
}
}
output
}
override def updateGradInput(input: Tensor[D], gradOutput: Tensor[D]): Tensor[D] = {
require(input.nElement() == gradOutput.nElement(),
s"the number of input element (${input.nElement()}) " +
s"should equal the number of " +
s"gradOutput element (${gradOutput.nElement()}), ")
if (inplace) {
gradInput.set(gradOutput)
} else {
gradInput.resizeAs(input)
}
if (input.dim() == 1 || !input.isContiguous() || !gradOutput.isContiguous()
|| !gradInput.isContiguous()) {
if (inplace) {
val func = new TensorFunc4[D] {
override def apply(data1: Array[D], index1: Int, data2: Array[D], index2: Int): Unit = {
if (ev2.isGreaterEq(min, data2(index2)) || ev2.isGreaterEq(data2(index2), max)) {
data1(index1) = ev2.fromType[Double](0)
}
}
}
DenseTensorApply.apply2[D](gradOutput, input, func)
} else {
val func = new TensorFunc6[D] {
override def apply(data1: Array[D], offset1: Int, data2: Array[D],
offset2: Int, data3: Array[D], offset3: Int): Unit = {
if (ev2.isGreaterEq(min, data3(offset3)) || ev2.isGreaterEq(data3(offset3), max)) {
data1(offset1) = ev2.fromType[Double](0)
} else {
data1(offset1) = data2(offset2)
}
}
}
DenseTensorApply.apply3[D](gradInput, gradOutput, input, func)
}
} else {
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
var i = 0
if (inplace) {
while (i < input.nElement()) {
if (ev2.isGreaterEq(min, inputData(i + inputOffset))
|| ev2.isGreaterEq(inputData(i + inputOffset), max)) {
gradInputData.update(i + gradInputOffset, ev2.fromType[Double](0))
}
i += 1
}
} else {
while (i < input.nElement()) {
if (ev2.isGreaterEq(min, inputData(i + inputOffset))
|| ev2.isGreaterEq(inputData(i + inputOffset), max)) {
gradInputData.update(i + gradInputOffset, ev2.fromType[Double](0))
} else {
gradInputData.update(i + gradInputOffset, gradOutputData(i + gradOutputOffset))
}
i += 1
}
}
}
gradInput
}
override def toString: String = {
s"nn.HardTanh"
}
override def clearState(): this.type = {
if (!inplace) {
super.clearState()
}
this
}
}
object HardTanh {
def apply[@specialized(Float, Double) T: ClassTag, D: ClassTag](
minValue: Double = -1,
maxValue: Double = 1,
inplace: Boolean = false)
(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D]): HardTanh[T, D] = {
new HardTanh[T, D](minValue, maxValue, inplace)
}
}
| jenniew/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/HardTanh.scala | Scala | apache-2.0 | 7,033 |
package uk.gov.homeoffice.rabbitmq
import java.io.IOException
import scala.util.control.NonFatal
object RabbitErrorPolicy extends RabbitErrorPolicy
trait RabbitErrorPolicy extends ErrorPolicy {
val enforce: PartialFunction[Throwable, ErrorAction] = {
case _: IOException => Retry
case NonFatal(_) => Reject
case _ => Alert
}
} | UKHomeOffice/rtp-rabbit-lib | src/main/scala/uk/gov/homeoffice/rabbitmq/RabbitErrorPolicy.scala | Scala | mit | 345 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import monix.eval.Task
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Callback, Cancelable}
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.util.control.NonFatal
private[reactive] final class PaginateEvalObservable[S, A](seed: S, f: S => Task[(A, Option[S])]) extends Observable[A] {
def unsafeSubscribeFn(subscriber: Subscriber[A]): Cancelable = {
import subscriber.scheduler
var streamErrors = true
try {
val init = seed
streamErrors = false
Task
.defer(loop(subscriber, init))
.executeWithOptions(_.enableAutoCancelableRunLoops)
.runAsync(Callback.empty)
} catch {
case ex if NonFatal(ex) =>
if (streamErrors) subscriber.onError(ex)
else subscriber.scheduler.reportFailure(ex)
Cancelable.empty
}
}
def loop(subscriber: Subscriber[A], state: S): Task[Unit] =
try f(state).redeemWith(
{ ex =>
subscriber.onError(ex)
Task.unit
}, {
case (a, Some(newState)) =>
Task.fromFuture(subscriber.onNext(a)).flatMap {
case Continue =>
loop(subscriber, newState)
case Stop =>
Task.unit
}
case (a, None) =>
subscriber.onNext(a)
subscriber.onComplete()
Task.unit
}
)
catch {
case ex if NonFatal(ex) =>
Task.raiseError(ex)
}
}
| monifu/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/builders/PaginateEvalObservable.scala | Scala | apache-2.0 | 2,177 |
package fif.use
import scala.language.postfixOps
abstract class SortableContainer[A: Cmp] extends Serializable {
type Structure
val maxSize: Option[Int]
def empty: Structure
def merge(a: Structure, b: Structure): (Structure, Option[Iterable[A]])
def insert(item: A)(existing: Structure): (Structure, Option[A])
def sort(existing: Structure): Iterable[A]
}
object SortableContainer {
def insert[A](module: SortableContainer[A])(
existing: module.Structure,
elements: Iterable[A]
): (module.Structure, Option[Iterable[A]]) = {
val (newPq, kickedOut) =
elements.foldLeft((existing, Seq.empty[A])) {
case ((pq, removing), aItem) =>
val (resulting, maybeRemoved) = module.insert(aItem)(pq)
(
resulting,
maybeRemoved match {
case None =>
removing
case Some(removed) =>
removing :+ removed
}
)
}
(
newPq,
if (kickedOut isEmpty)
None
else
Some(kickedOut)
)
}
def insert[A](module: SortableContainer[A], elements: Iterable[A]): (module.Structure, Option[Iterable[A]]) =
insert(module)(module.empty, elements)
} | malcolmgreaves/demo_data_typeclass_sfscala_meetup | src/main/scala/fif/use/SortableContainer.scala | Scala | apache-2.0 | 1,240 |
/*
* Copyright 2015 Databricks Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.sql.perf
/**
* The performance results of all given queries for a single iteration.
* @param timestamp The timestamp indicates when the entire experiment is started.
* @param iteration The index number of the current iteration.
* @param tags Tags of this iteration (variations are stored at here).
* @param configuration Configuration properties of this iteration.
* @param results The performance results of queries for this iteration.
*/
case class ExperimentRun(
timestamp: Long,
iteration: Int,
tags: Map[String, String],
configuration: BenchmarkConfiguration,
results: Seq[BenchmarkResult])
/**
* The configuration used for an iteration of an experiment.
* @param sparkVersion The version of Spark.
* @param sqlConf All configuration properties related to Spark SQL.
* @param sparkConf All configuration properties of Spark.
* @param defaultParallelism The default parallelism of the cluster.
* Usually, it is the number of cores of the cluster.
*/
case class BenchmarkConfiguration(
sparkVersion: String = org.apache.spark.SPARK_VERSION,
sqlConf: Map[String, String],
sparkConf: Map[String,String],
defaultParallelism: Int)
/**
* The result of a query.
* @param name The name of the query.
* @param mode The ExecutionMode of this run.
* @param joinTypes The type of join operations in the query.
* @param tables The tables involved in the query.
* @param parsingTime The time used to parse the query.
* @param analysisTime The time used to analyze the query.
* @param optimizationTime The time used to optimize the query.
* @param planningTime The time used to plan the query.
* @param executionTime The time used to execute the query.
* @param result the result of this run. It is not necessarily the result of the query.
* For example, it can be the number of rows generated by this query or
* the sum of hash values of rows generated by this query.
* @param breakDown The breakdown results of the query plan tree.
*/
case class BenchmarkResult(
name: String,
mode: String,
joinTypes: Seq[String] = Nil,
tables: Seq[String] = Nil,
parsingTime: Option[Double] = None,
analysisTime: Option[Double] = None,
optimizationTime: Option[Double] = None,
planningTime: Option[Double] = None,
executionTime: Option[Double] = None,
result: Option[Long] = None,
breakDown: Seq[BreakdownResult] = Nil,
queryExecution: Option[String] = None,
failure: Option[Failure] = None)
/**
* The execution time of a subtree of the query plan tree of a specific query.
* @param nodeName The name of the top physical operator of the subtree.
* @param nodeNameWithArgs The name and arguments of the top physical operator of the subtree.
* @param index The index of the top physical operator of the subtree
* in the original query plan tree. The index starts from 0
* (0 represents the top physical operator of the original query plan tree).
* @param executionTime The execution time of the subtree.
*/
case class BreakdownResult(
nodeName: String,
nodeNameWithArgs: String,
index: Int,
children: Seq[Int],
executionTime: Double,
delta: Double)
case class Failure(className: String, message: String) | bit1129/spark-sql-perf | src/main/scala/com/databricks/spark/sql/perf/results.scala | Scala | apache-2.0 | 3,939 |
package jsons
import models.{AccessToken, User}
import play.api.libs.json.Json
object UserJson extends KiwiERPJson[User] {
def base(user: User) = Json.obj(
"createdAt" -> user.createdAt,
"id" -> user.id,
"name" -> user.name,
"userType" -> user.userType,
"updatedAt" -> user.updatedAt
)
def authenticate(accessToken: AccessToken) = Json.obj(
"createdAt" -> accessToken.createdAt,
"expiresIn" -> accessToken.expiresIn,
"token" -> accessToken.token,
"tokenType" -> accessToken.tokenType,
"userId" -> accessToken.userId
)
}
| KIWIKIGMBH/kiwierp | kiwierp-backend/app/jsons/UserJson.scala | Scala | mpl-2.0 | 573 |
package io.getquill.dsl.macroz
import scala.reflect.macros.whitebox.{ Context => MacroContext }
private[dsl] class DslMacro(val c: MacroContext)
extends LiftingMacro
with ExpandActionMacro | jcranky/quill | quill-core/src/main/scala/io/getquill/dsl/macroz/DslMacro.scala | Scala | apache-2.0 | 194 |
object Test {
trait Generic[T] {
type Repr
}
object Generic {
type Aux[T, R] = Generic[T] { type Repr = R }
}
trait GNil
trait Foo[T]
object Foo {
implicit val fooUnit: Foo[Unit] = ???
implicit val fooInt: Foo[Int] = ???
implicit def fooPair[T, U](implicit fooT: Foo[T], fooU: Foo[U]): Foo[(T, U)] = ???
implicit def fooGen[T, R](implicit gen: Generic.Aux[T, R], fr: Foo[R]): Foo[T] = ???
}
case class A(b: B, c: C, i: Int)
object A {
implicit val genA: Generic[A] { type Repr = (B, (C, (Int, Unit))) } = ???
}
case class B(c0: C, c1: C, c2: C, i: Int)
object B {
implicit val genB: Generic[B] { type Repr = (C, (C, (C, (Int, Unit)))) } = ???
}
case class C(b: A, i: Int)
object C {
implicit val genC: Generic[C] { type Repr = (A, (Int, Unit)) } = ???
}
implicitly[Foo[A]] // error
}
| som-snytt/dotty | tests/neg/byname-implicits-26.scala | Scala | apache-2.0 | 862 |
import org.specs2.mutable.Specification
import com.julianpeeters.avro.toolbox.provider.ToolBoxTypeProvider
import java.io.File
import com.novus.salat._
import global._
import com.mongodb.casbah.Imports._
class AvroSchemaFileSpec extends Specification {
"A case class that was generated at runtime from the schema in an Avro Schema File" should {
"serialize and deserialize correctly with a Salat" in {
val file = new File("src/test/resources/AvroTypeProviderTestSchemaFile.avsc")
val runtimeClass = ToolBoxTypeProvider.schemaToCaseClass(file)
val record = runtimeClass.runtimeInstance
type MyAVSCRecord = record.type
ctx.registerClassLoader(runtimeClass.loader)
val dbo = grater[MyAVSCRecord].asDBObject(record)
val sameRecord = grater[MyAVSCRecord].asObject(dbo)
sameRecord must ===(record)
}
}
}
| julianpeeters/avro-scala-toolbox-type-provider | src/test/scala/AvroSchemaFileSpec.scala | Scala | apache-2.0 | 864 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.