code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package com.cloudant.clouseau
import org.apache.commons.configuration.SystemConfiguration
import scalang.Node
import org.apache.lucene.document._
import org.apache.lucene.search.{ FieldDoc, ScoreDoc }
import org.specs2.mutable.SpecificationWithJUnit
import org.apache.lucene.util.BytesRef
import org.apache.lucene.facet.params.FacetIndexingParams
import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetFields
import org.apache.lucene.facet.taxonomy.CategoryPath
import scalang.Pid
import scala.Some
import java.io.File
import scala.collection.JavaConversions._
class IndexServiceSpec extends SpecificationWithJUnit {
sequential
"an index" should {
"not be closed if close_if_idle and idle_check_interval_secs not set" in new index_service {
indexNotClosedAfterTimeout(node, service)
}
"not be closed if idle_check_interval_secs set and close_if_idle set to false" in new index_service_with_idle_timeout_and_close_if_idle_false {
indexNotClosedAfterTimeout(node, service)
}
"not be closed if close_if_idle set to false" in new index_service_with_idle_timeout_only {
indexNotClosedAfterTimeout(node, service)
}
"be closed after idle timeout" in new index_service_with_idle_timeout_and_close_if_idle {
indexClosedAfterTimeOut(node, service)
}
"not be closed if there is any activity before two consecutive idle checks" in new index_service_with_idle_timeout_and_close_if_idle {
indexNotClosedAfterActivityBetweenTwoIdleChecks(node, service)
}
"perform basic queries" in new index_service {
isSearchable(node, service, "foo", "foo")
}
"be able to search uppercase _id" in new index_service {
isSearchable(node, service, "FOO", "FOO")
}
"be able to search uppercase _id with prefix" in new index_service {
isSearchable(node, service, "FOO", "FO*")
}
"be able to search uppercase _id with wildcards" in new index_service {
isSearchable(node, service, "FOO", "F?O*")
}
"be able to search uppercase _id with range" in new index_service {
isSearchable(node, service, "FOO", "[FOO TO FOO]")
}
"be able to search uppercase _id with regexp" in new index_service {
isSearchable(node, service, "FOO", "/FOO/")
}
"be able to search uppercase _id with fuzzy" in new index_service {
isSearchable(node, service, "FOO", "FO~")
}
"be able to search uppercase _id with perfield" in new index_service_perfield {
isSearchable(node, service, "FOO", "FOO")
}
"perform sorting" in new index_service {
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
val doc2 = new Document()
doc2.add(new StringField("_id", "bar", Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("bar", doc2)) must be equalTo 'ok
// First one way.
(node.call(service, SearchRequest(options =
Map('sort -> "_id<string>")))
must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(
Hit(_, List(("_id", "bar"))),
Hit(_, List(("_id", "foo")))
)))) => ok
})
// Then t'other.
(node.call(service, SearchRequest(options =
Map('sort -> "-_id<string>")))
must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(
Hit(_, List(("_id", "foo"))),
Hit(_, List(("_id", "bar")))
)))) => ok
})
// Can sort even if doc is missing that field
(node.call(service, SearchRequest(options =
Map('sort -> "foo<string>")))
must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(
Hit(_, List(("_id", "foo"))),
Hit(_, List(("_id", "bar")))
)))) => ok
})
}
"support highlighting" in new index_service {
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
doc1.add(new StringField("field1", "bar", Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
(node.call(service, SearchRequest(options =
Map('highlight_fields -> List("field1"), 'query -> "field1:bar")))
must beLike {
case ('ok, List(_, ('total_hits, 1),
('hits, List(
Hit(_, List(("_id", "foo"), ("field1", "bar"),
("_highlights", List(("field1", List("<em>bar</em>"))))))
)))) => ok
})
}
"when limit=0 return only the number of hits" in new index_service {
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
val doc2 = new Document()
doc2.add(new StringField("_id", "bar", Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("bar", doc2)) must be equalTo 'ok
node.call(service, SearchRequest(options =
Map('limit -> 0))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List()))) => ok
}
}
"support include_fields" in new index_service {
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
doc1.add(new StringField("field1", "f11", Field.Store.YES))
doc1.add(new StringField("field2", "f21", Field.Store.YES))
doc1.add(new StringField("field3", "f31", Field.Store.YES))
val doc2 = new Document()
doc2.add(new StringField("_id", "bar", Field.Store.YES))
doc2.add(new StringField("field1", "f12", Field.Store.YES))
doc2.add(new StringField("field2", "f22", Field.Store.YES))
doc2.add(new StringField("field3", "f32", Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("bar", doc2)) must be equalTo 'ok
//Include only field1
(node.call(service, SearchRequest(options =
Map('include_fields -> List("field1"))))
must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(
Hit(_, List(("_id", "foo"), ("field1", "f11"))),
Hit(_, List(("_id", "bar"), ("field1", "f12")))
)))) => ok
})
//Include only field1 and field2
(node.call(service, SearchRequest(options =
Map('include_fields -> List("field1", "field2"))))
must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(
Hit(_, List(("_id", "foo"), ("field1", "f11"), ("field2", "f21"))),
Hit(_, List(("_id", "bar"), ("field1", "f12"), ("field2", "f22")))
)))) => ok
})
//Include no field
(node.call(service, SearchRequest(options =
Map('include_fields -> List())))
must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(
Hit(_, List(("_id", "foo"))),
Hit(_, List(("_id", "bar")))
)))) => ok
})
}
"support faceting and drilldown" in new index_service {
val facets = new SortedSetDocValuesFacetFields(FacetIndexingParams.DEFAULT)
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
doc1.add(new StringField("ffield", "f1", Field.Store.YES))
facets.addFields(doc1, List(new CategoryPath("ffield", "f1")))
val doc2 = new Document()
doc2.add(new StringField("_id", "foo2", Field.Store.YES))
doc2.add(new StringField("ffield", "f1", Field.Store.YES))
facets.addFields(doc2, List(new CategoryPath("ffield", "f1")))
val doc3 = new Document()
doc3.add(new StringField("_id", "foo3", Field.Store.YES))
doc3.add(new StringField("ffield", "f3", Field.Store.YES))
facets.addFields(doc3, List(new CategoryPath("ffield", "f3")))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("foo2", doc2)) must be equalTo 'ok
node.call(service, UpdateDocMsg("foo3", doc3)) must be equalTo 'ok
//counts
(node.call(service, SearchRequest(options =
Map('counts -> List("ffield"))))
must beLike {
case ('ok, List(_, ('total_hits, 3), _,
('counts, List((
List("ffield"), 0.0, List(
(List("ffield", "f1"), 2.0, List()),
(List("ffield", "f3"), 1.0, List()))
))))) => ok
})
//drilldown - one value
(node.call(service, SearchRequest(options =
Map('counts -> List("ffield"), 'drilldown -> List(List("ffield", "f1")))))
must beLike {
case ('ok, List(_, ('total_hits, 2), _,
('counts, List((
List("ffield"), 0.0, List(
(List("ffield", "f1"), 2.0, List()))
))))) => ok
})
//drilldown - multivalued
(node.call(service, SearchRequest(options =
Map('counts -> List("ffield"), 'drilldown -> List(List("ffield", "f1", "f3")))))
must beLike {
case ('ok, List(_, ('total_hits, 3), _,
('counts, List((
List("ffield"), 0.0, List(
(List("ffield", "f1"), 2.0, List()),
(List("ffield", "f3"), 1.0, List()))
))))) => ok
})
}
"support bookmarks" in new index_service {
val foo = new BytesRef("foo")
val bar = new BytesRef("bar")
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
val doc2 = new Document()
doc2.add(new StringField("_id", "bar", Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("bar", doc2)) must be equalTo 'ok
node.call(service, SearchRequest(options =
Map('limit -> 1))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(1.0, 0), List(("_id", "foo"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'after -> (1.0, 0)))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(1.0, 1), List(("_id", "bar"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'sort -> "_id<string>"))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(_, 1), List(("_id", "bar"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'after -> List(new BytesRef("bar"), 1),
'sort -> "_id<string>"))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(_, 0), List(("_id", "foo"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'after -> List(null, 0),
'sort -> "nonexistent<string>"))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List('null, 1), List(("_id", "bar"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'sort -> List("<score>")))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(1.0, 0), List(("_id", "foo"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'sort -> List("<doc>")))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(0, 0), List(("_id", "foo"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'sort -> List("<score>", "_id<string>")))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(1.0, bar, 1), List(("_id", "bar"))))))) => ok
}
node.call(service, SearchRequest(options =
Map('limit -> 1, 'sort -> List("<doc>", "_id<string>")))) must beLike {
case ('ok, List(_, ('total_hits, 2),
('hits, List(Hit(List(0, foo, 0), List(("_id", "foo"))))))) => ok
}
}
"support only group by string" in new index_service {
val foo = new BytesRef("foo")
val bar = new BytesRef("bar")
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
doc1.add(new DoubleField("num", 1.0, Field.Store.YES))
val doc2 = new Document()
doc2.add(new StringField("_id", "bar", Field.Store.YES))
doc1.add(new DoubleField("num", 2.0, Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("bar", doc2)) must be equalTo 'ok
node.call(service, Group1Msg("_id:foo", "_id", true, "num", 0, 10)) must beLike {
case ('ok, List((foo, List(2.0)))) => ok
}
node.call(service, Group1Msg("_id:foo", "_id<string>", true, "num", 0, 10)) must beLike {
case ('ok, List((foo, List(2.0)))) => ok
}
node.call(service, Group1Msg("_id:foo", "num<number>", true, "num", 0, 10)) must beLike {
case ('error, ('bad_request, "Group by number not supported. Group by string terms only.")) => ok
}
node.call(service, Group1Msg("_id:foo", "_id<number>", true, "num", 0, 10)) must beLike {
case ('error, ('bad_request, "Group by number not supported. Group by string terms only.")) => ok
}
}
"support sort by distance in group search" in new index_service {
val foo = new BytesRef("foo")
val bar = new BytesRef("bar")
val zzz = new BytesRef("zzz")
val doc1 = new Document()
doc1.add(new StringField("_id", "foo", Field.Store.YES))
doc1.add(new DoubleField("lon", 0.5, Field.Store.YES))
doc1.add(new DoubleField("lat", 57.15, Field.Store.YES))
val doc2 = new Document()
doc2.add(new StringField("_id", "bar", Field.Store.YES))
doc1.add(new DoubleField("lon", 10, Field.Store.YES))
doc1.add(new DoubleField("lat", 57.15, Field.Store.YES))
val doc3 = new Document()
doc3.add(new StringField("_id", "zzz", Field.Store.YES))
doc3.add(new DoubleField("lon", 3, Field.Store.YES))
doc3.add(new DoubleField("lat", 57.15, Field.Store.YES))
node.call(service, UpdateDocMsg("foo", doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg("bar", doc2)) must be equalTo 'ok
node.call(service, UpdateDocMsg("zzz", doc3)) must be equalTo 'ok
node.call(service, Group1Msg("*:*", "_id", true, "<distance,lon,lat,0.2,57.15,km>", 0, 10)) must beLike {
case ('ok, List((foo, _), (zzz, _), (bar, _))) => ok
}
node.call(service, Group1Msg("*:*", "_id", true, "<distance,lon,lat,12,57.15,km>", 0, 10)) must beLike {
case ('ok, List((bar, _), (zzz, _), (foo, _))) => ok
}
}
"supports partitioned databases" in new index_service {
val doc1 = new Document()
val id1 = "foo:hello"
doc1.add(new StringField("_id", id1, Field.Store.YES))
doc1.add(new StringField("field", "fieldvalue", Field.Store.YES))
doc1.add(new StringField("_partition", "foo", Field.Store.YES))
val doc2 = new Document()
val id2 = "bar:world"
doc2.add(new StringField("_id", id2, Field.Store.YES))
doc2.add(new StringField("field", "fieldvalue", Field.Store.YES))
doc2.add(new StringField("_partition", "bar", Field.Store.YES))
node.call(service, UpdateDocMsg(id1, doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg(id2, doc2)) must be equalTo 'ok
val req = SearchRequest(
options = Map(
'query -> "field:fieldvalue",
'partition -> "foo"
)
)
(node.call(service, req)
must beLike {
case ('ok, (List(_, ('total_hits, 1), _))) => ok
})
}
"ignores partitioned key if partition missing" in new index_service {
val doc1 = new Document()
val id1 = "foo:hello"
doc1.add(new StringField("_id", id1, Field.Store.YES))
doc1.add(new StringField("field", "fieldvalue", Field.Store.YES))
doc1.add(new StringField("_partition", "foo", Field.Store.YES))
val doc2 = new Document()
val id2 = "bar:world"
doc2.add(new StringField("_id", id2, Field.Store.YES))
doc2.add(new StringField("field", "fieldvalue", Field.Store.YES))
doc2.add(new StringField("_partition", "bar", Field.Store.YES))
node.call(service, UpdateDocMsg(id1, doc1)) must be equalTo 'ok
node.call(service, UpdateDocMsg(id2, doc2)) must be equalTo 'ok
val req = SearchRequest(
options = Map(
'query -> "field:fieldvalue"
)
)
(node.call(service, req)
must beLike {
case ('ok, (List(_, ('total_hits, 2), _))) => ok
})
}
"can make a snapshot" in new index_service {
val doc1 = new Document()
val id1 = "foo:hello"
doc1.add(new StringField("_id", id1, Field.Store.YES))
node.call(service, UpdateDocMsg(id1, doc1)) must be equalTo 'ok
node.call(service, SetUpdateSeqMsg(10)) must be equalTo 'ok
node.send(service, 'maybe_commit)
Thread.sleep(1000)
val snapshotDir = new File(new File("target", "indexes"), System.currentTimeMillis().toString)
snapshotDir.exists must beFalse
node.call(service, ('create_snapshot, snapshotDir.getAbsolutePath)) must be equalTo 'ok
snapshotDir.exists must beTrue
snapshotDir.list.sorted must be equalTo Array("_0.cfe", "_0.cfs", "_0.si", "segments_1")
}
}
private def isSearchable(node: Node, service: Pid,
value: String, query: String) {
val doc = new Document()
doc.add(new StringField("_id", value, Field.Store.YES))
doc.add(new NumericDocValuesField("timestamp", System.currentTimeMillis()))
node.call(service, UpdateDocMsg(value, doc)) must be equalTo 'ok
val req = SearchRequest(options = Map('query -> "_id:%s".format(query)))
(node.call(service, req)
must beLike {
case ('ok, (List(_, ('total_hits, 1), _))) => ok
})
}
private def indexNotClosedAfterTimeout(node: Node, service: Pid) {
val value, query = "foo"
val doc = new Document()
doc.add(new StringField("_id", value, Field.Store.YES))
doc.add(new NumericDocValuesField("timestamp", System.currentTimeMillis()))
node.call(service, UpdateDocMsg(value, doc)) must be equalTo 'ok
val req = SearchRequest(options = Map('query -> "_id:%s".format(query)))
(node.call(service, req)
must beLike {
case ('ok, (List(_, ('total_hits, 1), _))) => ok
})
Thread.sleep(4200)
(node.isAlive(service) must beTrue)
}
private def indexClosedAfterTimeOut(node: Node, service: Pid) {
val value, query = "foo"
val doc = new Document()
doc.add(new StringField("_id", value, Field.Store.YES))
doc.add(new NumericDocValuesField("timestamp", System.currentTimeMillis()))
node.call(service, UpdateDocMsg(value, doc)) must be equalTo 'ok
val req = SearchRequest(options = Map('query -> "_id:%s".format(query)))
(node.call(service, req)
must beLike {
case ('ok, (List(_, ('total_hits, 1), _))) => ok
})
Thread.sleep(4200)
(node.isAlive(service) must beFalse)
}
private def indexNotClosedAfterActivityBetweenTwoIdleChecks(node: Node,
service: Pid) {
var value, query = "foo"
var doc = new Document()
doc.add(new StringField("_id", value, Field.Store.YES))
doc.add(new NumericDocValuesField("timestamp", System.currentTimeMillis()))
node.call(service, UpdateDocMsg(value, doc)) must be equalTo 'ok
val req = SearchRequest(options = Map('query -> "_id:%s".format(query)))
(node.call(service, req)
must beLike {
case ('ok, (List(_, ('total_hits, 1), _))) => ok
})
Thread.sleep(3000)
value = "foo2"
query = "foo2"
doc = new Document()
doc.add(new StringField("_id", value, Field.Store.YES))
doc.add(new NumericDocValuesField("timestamp", System.currentTimeMillis()))
node.call(service, UpdateDocMsg(value, doc)) must be equalTo 'ok
Thread.sleep(2000)
(node.isAlive(service) must beTrue)
Thread.sleep(1200)
(node.isAlive(service) must beFalse)
}
}
trait index_service extends RunningNode {
val config = new SystemConfiguration()
val args = new ConfigurationArgs(config)
var service: Pid = null
val path = System.currentTimeMillis().toString
override def before {
val dir = new File(new File("target", "indexes"), path)
if (dir.exists) {
for (f <- dir.listFiles) {
f.delete
}
}
val (_, pid: Pid) = IndexService.start(node, config, path, options())
service = pid
}
def options(): Any = {
"standard"
}
override def after {
if (service != null) {
node.send(service, 'delete)
}
super.after
}
}
trait index_service_perfield extends index_service {
override def options(): Any = {
Map("name" -> "perfield", "default" -> "english")
}
}
trait index_service_with_idle_timeout_and_close_if_idle extends index_service {
override val config = new SystemConfiguration()
config.addProperty("clouseau.close_if_idle", true)
config.addProperty("clouseau.idle_check_interval_secs", 2)
override val args = new ConfigurationArgs(config)
}
trait index_service_with_idle_timeout_only extends index_service {
override val config = new SystemConfiguration()
config.addProperty("clouseau.idle_check_interval_secs", 2)
override val args = new ConfigurationArgs(config)
}
trait index_service_with_idle_timeout_and_close_if_idle_false extends index_service {
override val config = new SystemConfiguration()
config.addProperty("clouseau.close_if_idle", false)
config.addProperty("clouseau.idle_check_interval_secs", 2)
override val args = new ConfigurationArgs(config)
}
| cloudant-labs/clouseau | src/test/scala/com/cloudant/clouseau/IndexServiceSpec.scala | Scala | apache-2.0 | 22,647 |
package dao
import play.api.libs.json._
import play.modules.reactivemongo.ReactiveMongoApi
import play.modules.reactivemongo.json._
import reactivemongo.api.commands.WriteResult
import reactivemongo.play.json.collection.JSONCollection
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
trait Repository[T] {
val collectionName: String
implicit val format: OFormat[T]
def collection()(implicit reactiveMongoApi: ReactiveMongoApi) = reactiveMongoApi.database.map(_.collection[JSONCollection](collectionName))
def save(doc: T)(implicit reactiveMongoApi: ReactiveMongoApi): Future[WriteResult] = {
collection.flatMap { c =>
c.insert(doc)
}
}
def update(selector: JsObject, update: JsObject)(implicit reactiveMongoApi: ReactiveMongoApi): Future[WriteResult] = {
collection.flatMap { c =>
c.update(selector, update, upsert = false)
}
}
def upsert(selector: JsObject, doc: T)(implicit reactiveMongoApi: ReactiveMongoApi): Future[WriteResult] = {
collection.flatMap { c =>
c.update(selector, Json.obj("$set" -> doc), upsert = true)
}
}
def remove(query: JsObject)(implicit reactiveMongoApi: ReactiveMongoApi): Future[WriteResult] = {
collection.flatMap { c =>
c.remove(query)
}
}
def list(query: JsObject = Json.obj())(implicit reactiveMongoApi: ReactiveMongoApi): Future[Seq[T]] = {
collection.flatMap { c =>
c.find(query).cursor[T]().collect[Seq]()
}
}
def list(implicit reactiveMongoApi: ReactiveMongoApi): Future[Seq[T]] = list()
def findByOpt(query: JsObject)(implicit reactiveMongoApi: ReactiveMongoApi): Future[Option[T]] = {
collection.flatMap { c =>
c.find(query).one[T]
}
}
}
| denis-mludek/zencroissants | server/app/dao/Repository.scala | Scala | mit | 1,745 |
package models
import java.util.ArrayList
class Route(
var stations: ArrayList[String] = new ArrayList[String]()
) {
}
| diegofruizs/sdm | app/models/Route.scala | Scala | mit | 150 |
/* Expand: Lift Scored structure out to the top level
*
* AST and Denotations both contain constructs which hide Scored[A]
* instances at lower levels. This reduces exponential blowup, since
* it represents Cartesian product structure in an unexpanded manner.
*
* In the end, however, one usually needs to expand these low level
* Scored instances to produce a flat list of top level options. This
* is currently necessary in two places:
*
* 1. For Den, before showing alternatives to the user.
* 2. In TestParse, to make it easier to write parsing unit tests.
*/
package tarski
import tarski.AST._
import tarski.Arounds._
import tarski.Denotations._
import tarski.Scores._
import utility.Locations._
import scala.language.implicitConversions
object Expand {
type Expand[A] = A => Scored[A]
@inline def expand[A](x: A)(implicit e: Expand[A]) = e(x)
implicit def expandPair[A,B](ab: (A,B))(implicit a: Expand[A], b: Expand[B]): Scored[(A,B)] =
product(a(ab._1),b(ab._2))
implicit def expandOption[A](xs: Option[A])(implicit e: Expand[A]): Scored[Option[A]] =
product(xs map e)
implicit def expandList[A](xs: List[A])(implicit e: Expand[A]): Scored[List[A]] =
product(xs map e)
implicit def expandCommaList[A](xs: CommaList[A])(implicit e: Expand[A]): Scored[CommaList[A]] = xs match {
case EmptyList => known(xs)
case SingleList(x) => e(x) map (SingleList(_))
case CommaList2(l,sep) => product(l map e) map (CommaList2(_,sep))
}
implicit def expandKList[A](xs: KList[A])(implicit e: Expand[A]): Scored[KList[A]] = xs match {
case EmptyList => known(xs)
case SingleList(x) => e(x) map (SingleList(_))
case CommaList2(l,sep) => product(l map e) map (CommaList2(_,sep))
case AndList2(l,sep) => product(l map e) map (AndList2(_,sep))
case JuxtList2(l) => product(l map e) map (JuxtList2(_))
}
implicit def expandSRange(r: SRange): Scored[SRange] = known(r)
implicit def expandGrouped[A](x: Grouped[A])(implicit e: Expand[A]): Scored[Grouped[A]] =
expand(x.x) map (Grouped(_,x.a))
implicit def expandAStmt(s: AStmt): Scored[AStmt] = s match {
case ScoredAStmt(s,_) => s flatMap (expand(_))
case SemiAStmt(s,sr) => expand(s) map (SemiAStmt(_,sr))
case ParenAStmt(x,a) => expand(x) map (ParenAStmt(_,a))
case VarAStmt(m,t,vs) => productWith(expand(t),expand(vs))(VarAStmt(m,_,_))
case BlockAStmt(b,a) => product(b map (expand(_))) map (BlockAStmt(_,a))
case ExpAStmt(e) => expand(e) map ExpAStmt
case AssertAStmt(ar,c,m) => productWith(expand(c),expand(m))(AssertAStmt(ar,_,_))
case ReturnAStmt(rr,e) => expand(e) map (ReturnAStmt(rr,_))
case ThrowAStmt(tr,e) => expand(e) map (ThrowAStmt(tr,_))
case SyncAStmt(sr,e,a,s) => productWith(expand(e),expand(s))(SyncAStmt(sr,_,a,_))
case TryAStmt(tr,s,cs,f) => productWith(expand(s),expand(cs),expand(f))(TryAStmt(tr,_,_,_))
case IfAStmt(ir,c,a,x) => productWith(expand(c),expand(x))(IfAStmt(ir,_,a,_))
case IfElseAStmt(ir,c,a,x,er,y) => productWith(expand(c),expand(x),expand(y))(IfElseAStmt(ir,_,a,_,er,_))
case WhileAStmt(wr,flip,c,a,s) => productWith(expand(c),expand(s))(WhileAStmt(wr,flip,_,a,_))
case DoAStmt(dr,s,wr,flip,c,a) => productWith(expand(s),expand(c))(DoAStmt(dr,_,wr,flip,_,a))
case ForAStmt(fr,i,a,s) => productWith(expand(i),expand(s))(ForAStmt(fr,_,a,_))
case _:EmptyAStmt|_:HoleAStmt|_:TokAStmt|_:BreakAStmt|_:ContinueAStmt => known(s)
}
implicit def expandAVarDecl(d: AVarDecl): Scored[AVarDecl] = d match {
case AVarDecl(x,xr,n,i) => expand(i) map (AVarDecl(x,xr,n,_))
}
implicit def expandCatchInfo(c: CatchInfo): Scored[CatchInfo] = c match {
case CatchInfo(cr,m,t,i,a,c) => expand(t) map (CatchInfo(cr,m,_,i,a,c))
}
implicit def expandForInfo(f: ForInfo): Scored[ForInfo] = f match {
case For(i,sr0,c,sr1,u) => productWith(expand(i),expand(c),expand(u))(For(_,sr0,_,sr1,_))
case Foreach(m,t,v,vr,n,cr,e) => productWith(expand(t),expand(e))(Foreach(m,_,v,vr,n,cr,_))
}
implicit def expandAExp(e: AExp): Scored[AExp] = e match {
case ScoredAExp(s,_) => s flatMap (expand(_))
case ParenAExp(e,a) => expand(e) map (ParenAExp(_,a))
case FieldAExp(e,dot,t,f,fr) => productWith(expand(e),expand(t))(FieldAExp(_,dot,_,f,fr))
case MethodRefAExp(e,ccr,t,f,fr) => productWith(expand(e),expand(t))(MethodRefAExp(_,ccr,_,f,fr))
case NewRefAExp(e,cc,t,newr) => productWith(expand(e),expand(t))(NewRefAExp(_,cc,_,newr))
case TypeApplyAExp(e,t,tr,after) => productWith(expand(e),expand(t))(TypeApplyAExp(_,_,tr,after))
case ApplyAExp(e,xs,l) => productWith(expand(e),expand(xs))(ApplyAExp(_,_,l))
case NewAExp(qe,newr,t,e,ns) => productWith(expand(qe),expand(t),expand(e))(NewAExp(_,newr,_,_,ns))
case AAnonClassExp(e,as,aa,b) => productWith(expand(e),expand(as))(AAnonClassExp(_,_,aa,b))
case UnaryAExp(op,opr,e) => expand(e) map (UnaryAExp(op,opr,_))
case BinaryAExp(op,opr,e0,e1) => productWith(expand(e0),expand(e1))(BinaryAExp(op,opr,_,_))
case CastAExp(t,a,e) => productWith(expand(t),expand(e))(CastAExp(_,a,_))
case CondAExp(c,qr,x,cr,y) => productWith(expand(c),expand(x),expand(y))(CondAExp(_,qr,_,cr,_))
case AssignAExp(op,opr,x,y) => productWith(expand(x),expand(y))(AssignAExp(op,opr,_,_))
case ArrayAExp(e,a) => expand(e) map (ArrayAExp(_,a))
case InstanceofAExp(e,ir,t) => productWith(expand(e),expand(t))(InstanceofAExp(_,ir,_))
case _:NameAExp|_:WildAExp|_:ALit => known(e)
}
implicit def expandCallable(c: Callable): Scored[Callable] = c match {
case TypeApply(f,ts,a,hide) => expandCallable(f) map (f => TypeApply(f.asInstanceOf[NotTypeApply],ts,a,hide))
case MethodDen(x,t,f,fr) => expand(x) map (MethodDen(_,t,f,fr))
case _:ForwardDen => known(c)
case NewDen(nr,x,f,fr,ts) => expand(x) map (NewDen(nr,_,f,fr,ts))
case NewArrayDen(nr,t,tr,ns,ds) => expand(ns) map (NewArrayDen(nr,t,tr,_,ds))
}
implicit def expandStmt(s: Stmt): Scored[Stmt] = s match {
case SemiStmt(x,sr) => expand(x) map (SemiStmt(_,sr))
case _:EmptyStmt|_:HoleStmt|_:BreakStmt|_:ContinueStmt|_:TokStmt => known(s)
case VarStmt(m,t,tr,vs,env) => expand(vs) map (VarStmt(m,t,tr,_,env))
case ExpStmt(e,env) => expandExp(e) map (e => ExpStmt(e.asInstanceOf[StmtExp],env))
case BlockStmt(b,a,env) => expand(b) map (BlockStmt(_,a,env))
case MultipleStmt(b) => expand(b) map MultipleStmt
case AssertStmt(ar,c,m,env) => productWith(expand(c),expand(m))(AssertStmt(ar,_,_,env))
case ReturnStmt(rr,e,env) => expand(e) map (ReturnStmt(rr,_,env))
case ThrowStmt(tr,e,env) => expand(e) map (ThrowStmt(tr,_,env))
case IfStmt(ir,c,a,x) => productWith(expand(c),expand(x))(IfStmt(ir,_,a,_))
case IfElseStmt(ir,c,a,x,er,y) => productWith(expand(c),expand(x),expand(y))(IfElseStmt(ir,_,a,_,er,_))
case WhileStmt(wr,c,a,x) => productWith(expand(c),expand(x))(WhileStmt(wr,_,a,_))
case DoStmt(dr,x,wr,c,a) => productWith(expand(x),expand(c))(DoStmt(dr,_,wr,_,a))
case ForStmt(fr,i,c,sr,u,a,x) => productWith(expand(i),expand(c),expand(u),expand(x))(ForStmt(fr,_,_,sr,_,a,_))
case ForeachStmt(fr,m,t,tr,v,vr,e,a,x,env) => productWith(expand(e),expand(x))(ForeachStmt(fr,m,t,tr,v,vr,_,a,_,env))
case SyncStmt(sr,e,a,x) => productWith(expand(e),expand(x))(SyncStmt(sr,_,a,_))
case TryStmt(tr,x,cs,f) => productWith(expand(x),expand(cs),expand(f))(TryStmt(tr,_,_,_))
}
implicit def expandVarDecl(d: VarDecl): Scored[VarDecl] = d match {
case VarDecl(x,xr,d,i,env) => expand(i) map (VarDecl(x,xr,d,_,env))
}
implicit def expandForInit(i: ForInit): Scored[ForInit] = i match {
case VarStmt(m,t,tr,vs,env) => expand(vs) map (VarStmt(m,t,tr,_,env))
case ForExps(xs,sr,env) => expand(xs) map (ForExps(_,sr,env))
}
implicit def expandCatchBlock(c: CatchBlock): Scored[CatchBlock] = c match {
case CatchBlock(m,tr,v,vr,a,s) => expand(s) map (CatchBlock(m,tr,v,vr,a,_))
}
def expandStmts(ss: List[Stmt]): Scored[List[Stmt]] = expandList(ss)(expandStmt)
implicit def expandExp(e: Exp): Scored[Exp] = e match {
case WhateverExp(_,_,s) => s flatMap expandExp
case _:Lit|_:LocalExp|_:ThisOrSuperExp => known(e)
case FieldExp(x,f,fr) => expand(x) map (FieldExp(_,f,fr))
case CastExp(ty,a,x,g) => expand(x) map (CastExp(ty,a,_,g))
case ImpExp(op,opr,x) => expand(x) map (ImpExp(op,opr,_))
case NonImpExp(op,opr,x) => expand(x) map (NonImpExp(op,opr,_))
case BinaryExp(op,opr,x,y) => productWith(expand(x),expand(y))(BinaryExp(op,opr,_,_))
case InstanceofExp(x,ir,t,tr) => expand(x) map (InstanceofExp(_,ir,t,tr))
case AssignExp(op,opr,x,y) => productWith(expand(x),expand(y))(AssignExp(op,opr,_,_))
case ParenExp(x,a) => expand(x) map (ParenExp(_,a))
case ApplyExp(f,xs,a,auto) => productWith(expandCallable(f),expand(xs))((f,xs) => ApplyExp(f.asInstanceOf[NormalCallable],xs,a,auto))
case IndexExp(x,i,a) => productWith(expand(x),expand(i))(IndexExp(_,_,a))
case CondExp(c,qr,x,cr,y,ty) => productWith(expand(c),expand(x),expand(y))(CondExp(_,qr,_,cr,_,ty))
case ArrayExp(nr,t,tr,xs,a) => expand(xs) map (ArrayExp(nr,t,tr,_,a))
case EmptyArrayExp(nr,t,tr,is) => expand(is) map (EmptyArrayExp(nr,t,tr,_))
case AnonClassExp(c,as,ar,b) => productWith(expandCallable(c),expand(as))( (c,args) => AnonClassExp(c,args,ar,b))
}
}
| eddysystems/eddy | tarski/src/tarski/Expand.scala | Scala | bsd-2-clause | 9,391 |
package com.basho.riak.spark.examples.streaming
import java.util.UUID
import kafka.serializer.StringDecoder
import org.apache.spark.sql.Row
import org.apache.spark.streaming.Durations
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.{SparkConf, SparkContext}
import com.basho.riak.spark.streaming._
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
/**
* Example shows how Spark Streaming can be used with Riak TS Dataframes
* For correct execution:
* kafka broker must be installed and running;
* 'streaming' topic must be created in kafka;
* riak ts, kafka and spark master hostnames should be provided as spark configs (or local versions will be used);
* riak ts table should be created and activated:
*
* CREATE TABLE ts_weather_demo
* (
* weather varchar not null,
* family varchar not null,
* time timestamp not null,
* temperature double,
* humidity double,
* pressure double,
* PRIMARY KEY (
* (weather, family, quantum(time, 1, 'h')), weather, family, time
* )
* )
**/
object StreamingTSExample {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf(true)
.setAppName("Simple Spark Streaming to Riak TS Demo")
setSparkOpt(sparkConf, "spark.master", "local")
setSparkOpt(sparkConf, "spark.riak.connection.host", "127.0.0.1:8087")
setSparkOpt(sparkConf, "kafka.broker", "127.0.0.1:9092")
val sc = new SparkContext(sparkConf)
val streamCtx = new StreamingContext(sc, Durations.seconds(15))
val kafkaProps = Map[String, String](
"metadata.broker.list" -> sparkConf.get("kafka.broker"),
"client.id" -> UUID.randomUUID().toString
)
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](streamCtx, kafkaProps, Set[String]("ingest-ts")
) map { case (key, value) =>
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
val wr = mapper.readValue(value, classOf[Map[String,String]])
Row(
wr("weather"),
wr("family"),
DateTime.parse(wr("time"),DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSS")).getMillis,
wr("temperature"),
wr("humidity"),
wr("pressure"))
} saveToRiakTS "ts_weather_demo"
streamCtx.start()
println("Spark streaming context started. Spark UI could be found at http://SPARK_MASTER_HOST:4040")
println("NOTE: if you're running job on the 'local' master open http://localhost:4040")
streamCtx.awaitTermination()
}
private def setSparkOpt(sparkConf: SparkConf, option: String, defaultOptVal: String): SparkConf = {
val optval = sparkConf.getOption(option).getOrElse(defaultOptVal)
sparkConf.set(option, optval)
}
}
| basho/spark-riak-connector | examples/src/main/scala/com/basho/riak/spark/examples/streaming/StreamingTSExample.scala | Scala | apache-2.0 | 2,995 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.mesos.kafka.interface.impl
import kafka.utils.{ZkUtils => KafkaZkUtils, ZKStringSerializer}
import kafka.common.TopicAndPartition
import kafka.controller.{LeaderIsrAndControllerEpoch, ReassignedPartitionsContext}
import ly.stealth.mesos.kafka.interface.ZkUtilsProxy
import org.I0Itec.zkclient.ZkClient
import scala.collection.{Map, Set, mutable}
class ZkUtils(zkUrl: String) extends ZkUtilsProxy {
private val DEFAULT_TIMEOUT_MS = 30000
private val zkClient = new ZkClient(zkUrl, DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS, ZKStringSerializer)
override def getAllTopics(): Seq[String] = KafkaZkUtils.getAllTopics(zkClient)
override def getReplicaAssignmentForTopics(topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]]
= KafkaZkUtils.getReplicaAssignmentForTopics(zkClient, topics)
override def getPartitionsBeingReassigned(): Map[TopicAndPartition, ReassignedPartitionsContext]
= KafkaZkUtils.getPartitionsBeingReassigned(zkClient)
override def getReplicasForPartition(
topic: String,
partition: Int
): Seq[Int] = KafkaZkUtils.getReplicasForPartition(zkClient, topic, partition)
override def updatePartitionReassignmentData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): Unit
= KafkaZkUtils.updatePartitionReassignmentData(zkClient, partitionsToBeReassigned)
override def createPersistentPath(
path: String,
data: String
): Unit = KafkaZkUtils.createPersistentPath(zkClient, path, data)
override def getPartitionAssignmentForTopics(topics: Seq[String]): mutable.Map[String, Map[Int, Seq[Int]]]
= KafkaZkUtils.getPartitionAssignmentForTopics(zkClient, topics)
override def getPartitionLeaderAndIsrForTopics(topicAndPartitions: Set[TopicAndPartition]): mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch]
= KafkaZkUtils.getPartitionLeaderAndIsrForTopics(zkClient, topicAndPartitions)
override def getSortedBrokerList(): Seq[Int]
= KafkaZkUtils.getSortedBrokerList(zkClient)
}
| tc-dc/kafka-mesos | src/scala/iface/0_8/ly/stealth/mesos/kafka/interface/impl/ZkUtils.scala | Scala | apache-2.0 | 2,789 |
package com.komanov.mysql.streaming
import com.wix.mysql.EmbeddedMysql
import com.wix.mysql.EmbeddedMysql._
import com.wix.mysql.config.Charset
import com.wix.mysql.config.MysqldConfig._
import com.wix.mysql.config.SchemaConfig._
import com.wix.mysql.distribution.Version
object MysqlRunner {
private lazy val mysql: EmbeddedMysql = {
val config = aMysqldConfig(Version.v5_6_latest)
.withPort(Drivers.Port)
.withUser(Drivers.UserName, Drivers.Password)
.withCharset(Charset.UTF8MB4)
.build()
val mysqld = anEmbeddedMysql(config)
.start()
mysqld.addSchema(
aSchemaConfig(Drivers.DataBase)
.withCommands(Query.CreateSql)
.build()
)
mysqld
}
def run(): Unit = {
mysql.toString
}
}
| dkomanov/mysql-streaming | mysql-streaming-core/src/main/scala/com/komanov/mysql/streaming/MysqlRunner.scala | Scala | mit | 770 |
package be.cmpg.walk.neighbourhoodScoring
import org.specs2.mutable.Specification
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import java.nio.file.Paths
import be.cmpg.graph.Gene
import be.cmpg.graph.NetworkReader
import be.cmpg.graph.Network
import collection.immutable.ListMap
import java.io.FileWriter
@RunWith(classOf[JUnitRunner])
class UniformNetworkTest extends Specification {
"A NeighbourhoodTreeGenerator" should {
"give back a acore of 5 if every gene in the network has a score of 5" in {
val mutationDirectoryPath = Paths.get("src/test/resources/be/cmpg/NeighbourhoodScoring/uniformScoresMutations/")
val geneReferenceFilePath = Paths.get("src/test/resources/be/cmpg/NeighbourhoodScoring/referenceGenomes/EcoliK12MG1655.gff")
val mutationList = new MutationListGenerator(mutationDirectoryPath, geneReferenceFilePath).getExtendedMutationList
val mutationScoringMap = new MutationScoresGenerator(mutationList).getScoresCount
val network = new Network(NetworkReader.fromFile(raw"src\\test\\resources\\be\\cmpg\\NeighbourhoodScoring\\graph\\Network.txt"))
val startGene = new Gene("b001")
val searchdepth = 3
val normalizedScore = new NeighbourhoodTreeGenerator(startGene, searchdepth, null).expand
normalizedScore._1 must be equalTo 5
}
}
} | spulido99/SSA | src/test/scala/be/cmpg/walk/neighbourhoodScoring/UniformNetworkTest.scala | Scala | gpl-2.0 | 1,347 |
/**
* Copyright (C) 2016 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.test
class PlatformProcessorTest extends
ProcessorTestBase("oxf:/ops/unit-tests/tests-platform.xml") | orbeon/orbeon-forms | src/test/scala/org/orbeon/oxf/test/PlatformProcessorTest.scala | Scala | lgpl-2.1 | 787 |
package edu.arizona.sista.struct
/**
* Stores a mutable number of type T
* User: mihais
* Date: 3/18/13
*/
class MutableNumber[T](var value:T) extends Serializable {
override def hashCode = value.hashCode
override def equals(other:Any):Boolean = {
other match {
case that:MutableNumber[T] => value == that.value
case _ => false
}
}
override def toString:String = value.toString
}
| michaelcapizzi/processors | src/main/scala/edu/arizona/sista/struct/MutableNumber.scala | Scala | apache-2.0 | 415 |
/*
This file is part of Intake24.
Copyright 2015, 2016 Newcastle University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uk.ac.ncl.openlab.intake24.services
import uk.ac.ncl.openlab.intake24.errors.UnexpectedDatabaseError
trait NdnsCompoundFoodGroupsService {
def getCompoundFoodGroupsData(ndnsCodes: Set[Int]): Either[UnexpectedDatabaseError, Map[Int, Map[Int, Double]]]
}
| digitalinteraction/intake24 | FoodDataServices/src/main/scala/uk/ac/ncl/openlab/intake24/services/NdnsCompoundFoodGroupsService.scala | Scala | apache-2.0 | 873 |
import com.typesafe.sbt.packager.archetypes.JavaAppPackaging
import sbt._
import Keys._
import org.scalatra.sbt._
object KoauthsamplescalatraBuild extends Build {
val Organization = "com.hunorkovacs"
val Name = "koauth-sample-scala-consumer-scalatra"
val Version = "1.1.1-SNAPSHOT"
val ScalaVersion = "2.11.6"
val ScalatraVersion = "2.4.0.RC1"
lazy val project = Project (
Name,
file("."),
settings = ScalatraPlugin.scalatraWithJRebel ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers += Classpaths.typesafeReleases,
resolvers += "Scalaz Bintray Repo" at "http://dl.bintray.com/scalaz/releases",
libraryDependencies ++= Seq(
"org.scalatra" %% "scalatra" % ScalatraVersion,
"ch.qos.logback" % "logback-classic" % "1.1.2" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "9.1.5.v20140505" % "container;compile",
"org.eclipse.jetty" % "jetty-plus" % "9.1.5.v20140505" % "container",
"javax.servlet" % "javax.servlet-api" % "3.1.0",
"io.spray" %% "spray-client" % "1.3.3",
"com.typesafe.akka" %% "akka-actor" % "2.3.11",
"com.hunorkovacs" %% "koauth" % "1.1.0"
)
)
).enablePlugins(JavaAppPackaging)
}
| kovacshuni/koauth-sample-scala-consumer-scalatra-heroku | project/build.scala | Scala | apache-2.0 | 1,306 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.bridge.scala.internal
import java.util.{Collections, List => JList}
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.dag.Transformation
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.catalog.FunctionCatalog
import org.apache.flink.table.module.ModuleManager
import org.apache.flink.table.operations.ModifyOperation
import org.apache.flink.table.utils.{CatalogManagerMocks, ExecutorMock, PlannerMock}
import org.apache.flink.types.Row
import org.hamcrest.CoreMatchers.equalTo
import org.junit.Assert.assertThat
import org.junit.Test
/**
* Tests for [[StreamTableEnvironmentImpl]].
*/
class StreamTableEnvironmentImplTest {
@Test
def testAppendStreamDoesNotOverwriteTableConfig(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val elements = env.fromElements(1, 2, 3)
val tEnv: StreamTableEnvironmentImpl = getStreamTableEnvironment(env, elements)
val minRetention = Time.minutes(1)
val maxRetention = Time.minutes(10)
tEnv.getConfig.setIdleStateRetentionTime(minRetention, maxRetention)
val table = tEnv.fromDataStream(elements)
tEnv.toAppendStream[Row](table)
assertThat(
tEnv.getConfig.getMinIdleStateRetentionTime,
equalTo(minRetention.toMilliseconds))
assertThat(
tEnv.getConfig.getMaxIdleStateRetentionTime,
equalTo(maxRetention.toMilliseconds))
}
@Test
def testRetractStreamDoesNotOverwriteTableConfig(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val elements = env.fromElements(1, 2, 3)
val tEnv: StreamTableEnvironmentImpl = getStreamTableEnvironment(env, elements)
val minRetention = Time.minutes(1)
val maxRetention = Time.minutes(10)
tEnv.getConfig.setIdleStateRetentionTime(minRetention, maxRetention)
val table = tEnv.fromDataStream(elements)
tEnv.toRetractStream[Row](table)
assertThat(
tEnv.getConfig.getMinIdleStateRetentionTime,
equalTo(minRetention.toMilliseconds))
assertThat(
tEnv.getConfig.getMaxIdleStateRetentionTime,
equalTo(maxRetention.toMilliseconds))
}
private def getStreamTableEnvironment(
env: StreamExecutionEnvironment,
elements: DataStream[Int]) = {
val config = new TableConfig
val catalogManager = CatalogManagerMocks.createEmptyCatalogManager()
val moduleManager = new ModuleManager
new StreamTableEnvironmentImpl(
catalogManager,
moduleManager,
new FunctionCatalog(config, catalogManager, moduleManager),
config,
env,
new TestPlanner(elements.javaStream.getTransformation),
new ExecutorMock,
true,
this.getClass.getClassLoader)
}
private class TestPlanner(transformation: Transformation[_]) extends PlannerMock {
override def translate(modifyOperations: JList[ModifyOperation])
: JList[Transformation[_]] = {
Collections.singletonList(transformation)
}
}
}
| tzulitai/flink | flink-table/flink-table-api-scala-bridge/src/test/scala/org/apache/flink/table/api/bridge/scala/internal/StreamTableEnvironmentImplTest.scala | Scala | apache-2.0 | 3,880 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.impl
import scala.collection.mutable
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.spark.rdd.RDD
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.mllib.tree.configuration.FeatureType._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.mllib.tree.model.{Bin, Node, Split}
/**
* :: DeveloperApi ::
* This is used by the node id cache to find the child id that a data point would belong to.
* @param split Split information.
* @param nodeIndex The current node index of a data point that this will update.
*/
@DeveloperApi
private[tree] case class NodeIndexUpdater(
split: Split,
nodeIndex: Int) {
/**
* Determine a child node index based on the feature value and the split.
* @param binnedFeatures Binned feature values.
* @param bins Bin information to convert the bin indices to approximate feature values.
* @return Child node index to update to.
*/
def updateNodeIndex(binnedFeatures: Array[Int], bins: Array[Array[Bin]]): Int = {
if (split.featureType == Continuous) {
val featureIndex = split.feature
val binIndex = binnedFeatures(featureIndex)
val featureValueUpperBound = bins(featureIndex)(binIndex).highSplit.threshold
if (featureValueUpperBound <= split.threshold) {
Node.leftChildIndex(nodeIndex)
} else {
Node.rightChildIndex(nodeIndex)
}
} else {
if (split.categories.contains(binnedFeatures(split.feature).toDouble)) {
Node.leftChildIndex(nodeIndex)
} else {
Node.rightChildIndex(nodeIndex)
}
}
}
}
/**
* :: DeveloperApi ::
* A given TreePoint would belong to a particular node per tree.
* Each row in the nodeIdsForInstances RDD is an array over trees of the node index
* in each tree. Initially, values should all be 1 for root node.
* The nodeIdsForInstances RDD needs to be updated at each iteration.
* @param nodeIdsForInstances The initial values in the cache
* (should be an Array of all 1's (meaning the root nodes)).
* @param checkpointInterval The checkpointing interval
* (how often should the cache be checkpointed.).
*/
@DeveloperApi
private[spark] class NodeIdCache(
var nodeIdsForInstances: RDD[Array[Int]],
val checkpointInterval: Int) {
// Keep a reference to a previous node Ids for instances.
// Because we will keep on re-persisting updated node Ids,
// we want to unpersist the previous RDD.
private var prevNodeIdsForInstances: RDD[Array[Int]] = null
// To keep track of the past checkpointed RDDs.
private val checkpointQueue = mutable.Queue[RDD[Array[Int]]]()
private var rddUpdateCount = 0
/**
* Update the node index values in the cache.
* This updates the RDD and its lineage.
* TODO: Passing bin information to executors seems unnecessary and costly.
* @param data The RDD of training rows.
* @param nodeIdUpdaters A map of node index updaters.
* The key is the indices of nodes that we want to update.
* @param bins Bin information needed to find child node indices.
*/
def updateNodeIndices(
data: RDD[BaggedPoint[TreePoint]],
nodeIdUpdaters: Array[mutable.Map[Int, NodeIndexUpdater]],
bins: Array[Array[Bin]]): Unit = {
if (prevNodeIdsForInstances != null) {
// Unpersist the previous one if one exists.
prevNodeIdsForInstances.unpersist()
}
prevNodeIdsForInstances = nodeIdsForInstances
nodeIdsForInstances = data.zip(nodeIdsForInstances).map {
case (point, node) => {
var treeId = 0
while (treeId < nodeIdUpdaters.length) {
val nodeIdUpdater = nodeIdUpdaters(treeId).getOrElse(node(treeId), null)
if (nodeIdUpdater != null) {
val newNodeIndex = nodeIdUpdater.updateNodeIndex(
binnedFeatures = point.datum.binnedFeatures,
bins = bins)
node(treeId) = newNodeIndex
}
treeId += 1
}
node
}
}
// Keep on persisting new ones.
nodeIdsForInstances.persist(StorageLevel.MEMORY_AND_DISK)
rddUpdateCount += 1
// Handle checkpointing if the directory is not None.
if (nodeIdsForInstances.sparkContext.getCheckpointDir.nonEmpty &&
(rddUpdateCount % checkpointInterval) == 0) {
// Let's see if we can delete previous checkpoints.
var canDelete = true
while (checkpointQueue.size > 1 && canDelete) {
// We can delete the oldest checkpoint iff
// the next checkpoint actually exists in the file system.
if (checkpointQueue.get(1).get.getCheckpointFile.isDefined) {
val old = checkpointQueue.dequeue()
// Since the old checkpoint is not deleted by Spark,
// we'll manually delete it here.
val fs = FileSystem.get(old.sparkContext.hadoopConfiguration)
fs.delete(new Path(old.getCheckpointFile.get), true)
} else {
canDelete = false
}
}
nodeIdsForInstances.checkpoint()
checkpointQueue.enqueue(nodeIdsForInstances)
}
}
/**
* Call this after training is finished to delete any remaining checkpoints.
*/
def deleteAllCheckpoints(): Unit = {
while (checkpointQueue.nonEmpty) {
val old = checkpointQueue.dequeue()
for (checkpointFile <- old.getCheckpointFile) {
val fs = FileSystem.get(old.sparkContext.hadoopConfiguration)
fs.delete(new Path(checkpointFile), true)
}
}
if (prevNodeIdsForInstances != null) {
// Unpersist the previous one if one exists.
prevNodeIdsForInstances.unpersist()
}
}
}
@DeveloperApi
private[spark] object NodeIdCache {
/**
* Initialize the node Id cache with initial node Id values.
* @param data The RDD of training rows.
* @param numTrees The number of trees that we want to create cache for.
* @param checkpointInterval The checkpointing interval
* (how often should the cache be checkpointed.).
* @param initVal The initial values in the cache.
* @return A node Id cache containing an RDD of initial root node Indices.
*/
def init(
data: RDD[BaggedPoint[TreePoint]],
numTrees: Int,
checkpointInterval: Int,
initVal: Int = 1): NodeIdCache = {
new NodeIdCache(
data.map(_ => Array.fill[Int](numTrees)(initVal)),
checkpointInterval)
}
}
| chenc10/Spark-PAF | mllib/src/main/scala/org/apache/spark/mllib/tree/impl/NodeIdCache.scala | Scala | apache-2.0 | 7,274 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.types.MetadataBuilder
class RemoveRedundantAliasAndProjectSuite extends PlanTest with PredicateHelper {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches = Batch(
"RemoveAliasOnlyProject",
FixedPoint(50),
PushProjectionThroughUnion,
RemoveRedundantAliases,
RemoveRedundantProject) :: Nil
}
test("all expressions in project list are aliased child output") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.select('a as 'a, 'b as 'b).analyze
val optimized = Optimize.execute(query)
comparePlans(optimized, relation)
}
test("all expressions in project list are aliased child output but with different order") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.select('b as 'b, 'a as 'a).analyze
val optimized = Optimize.execute(query)
val expected = relation.select('b, 'a).analyze
comparePlans(optimized, expected)
}
test("some expressions in project list are aliased child output") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.select('a as 'a, 'b).analyze
val optimized = Optimize.execute(query)
comparePlans(optimized, relation)
}
test("some expressions in project list are aliased child output but with different order") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.select('b as 'b, 'a).analyze
val optimized = Optimize.execute(query)
val expected = relation.select('b, 'a).analyze
comparePlans(optimized, expected)
}
test("some expressions in project list are not Alias or Attribute") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.select('a as 'a, 'b + 1).analyze
val optimized = Optimize.execute(query)
val expected = relation.select('a, 'b + 1).analyze
comparePlans(optimized, expected)
}
test("some expressions in project list are aliased child output but with metadata") {
val relation = LocalRelation('a.int, 'b.int)
val metadata = new MetadataBuilder().putString("x", "y").build()
val aliasWithMeta = Alias('a, "a")(explicitMetadata = Some(metadata))
val query = relation.select(aliasWithMeta, 'b).analyze
val optimized = Optimize.execute(query)
comparePlans(optimized, query)
}
test("retain deduplicating alias in self-join") {
val relation = LocalRelation('a.int)
val fragment = relation.select('a as 'a)
val query = fragment.select('a as 'a).join(fragment.select('a as 'a)).analyze
val optimized = Optimize.execute(query)
val expected = relation.join(relation.select('a as 'a)).analyze
comparePlans(optimized, expected)
}
test("alias removal should not break after push project through union") {
val r1 = LocalRelation('a.int)
val r2 = LocalRelation('b.int)
val query = r1.select('a as 'a).union(r2.select('b as 'b)).select('a).analyze
val optimized = Optimize.execute(query)
val expected = r1.union(r2)
comparePlans(optimized, expected)
}
test("remove redundant alias from aggregate") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.groupBy('a as 'a)('a as 'a, sum('b)).analyze
val optimized = Optimize.execute(query)
val expected = relation.groupBy('a)('a, sum('b)).analyze
comparePlans(optimized, expected)
}
test("remove redundant alias from window") {
val relation = LocalRelation('a.int, 'b.int)
val query = relation.window(Seq('b as 'b), Seq('a as 'a), Seq()).analyze
val optimized = Optimize.execute(query)
val expected = relation.window(Seq('b), Seq('a), Seq()).analyze
comparePlans(optimized, expected)
}
test("do not remove output attributes from a subquery") {
val relation = LocalRelation('a.int, 'b.int)
val query = Subquery(relation.select('a as "a", 'b as "b").where('b < 10).select('a).analyze)
val optimized = Optimize.execute(query)
val expected = Subquery(relation.select('a as "a", 'b).where('b < 10).select('a).analyze)
comparePlans(optimized, expected)
}
}
| aokolnychyi/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/RemoveRedundantAliasAndProjectSuite.scala | Scala | apache-2.0 | 5,231 |
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.server.priming.json
import java.math.BigInteger
import java.net.InetAddress
import java.util.UUID
import com.typesafe.scalalogging.LazyLogging
import org.scassandra.codec.Consistency
import org.scassandra.codec.Consistency.Consistency
import org.scassandra.codec.datatype.DataType
import org.scassandra.codec.messages.BatchQueryKind.BatchQueryKind
import org.scassandra.codec.messages.BatchType.BatchType
import org.scassandra.codec.messages.{BatchQueryKind, BatchType}
import org.scassandra.cql._
import org.scassandra.server.actors._
import org.scassandra.server.priming._
import org.scassandra.server.priming.batch.{BatchPrimeSingle, BatchQueryPrime, BatchWhen}
import org.scassandra.server.priming.prepared._
import org.scassandra.server.priming.query.{PrimeCriteria, PrimeQuerySingle, Then, When}
import org.scassandra.server.priming.routes.Version
import scodec.bits.ByteVector
import spray.httpx.SprayJsonSupport
import spray.json._
import scala.collection.Set
import scala.util.{Failure, Try, Success => TSuccess}
object PrimingJsonImplicits extends DefaultJsonProtocol with SprayJsonSupport with LazyLogging {
implicit object VariableMatchFormat extends JsonFormat[VariableMatch] {
override def write(obj: VariableMatch): JsValue = {
obj match {
case ExactMatch(value) => {
JsObject(Map(
"type" -> JsString("exact"),
"matcher" -> AnyJsonFormat.write(value)
))
}
case AnyMatch => JsObject(Map("type" -> JsString("any")))
}
}
override def read(json: JsValue): VariableMatch = {
json match {
case obj: JsObject => {
obj.fields("type") match {
case JsString("exact") => ExactMatch(Some(AnyJsonFormat.read(obj.fields("matcher"))))
case JsString("any") => AnyMatch
case _ => AnyMatch
}
}
case _ => AnyMatch
}
}
}
implicit object AnyJsonFormat extends JsonFormat[Any] {
def write(x: Any) = x match {
case n: Int => JsNumber(n)
case n: Long => JsNumber(n)
case short: Short => JsNumber(short)
case byte: Byte => JsNumber(byte)
case bd: BigDecimal => JsString(bd.bigDecimal.toPlainString)
case s: String => JsString(s)
case seq: Seq[_] => seqFormat[Any].write(seq)
case m: Map[_, _] =>
val keysAsString: Map[String, Any] = m.map({ case (k, v) => (k.toString, v)})
mapFormat[String, Any].write(keysAsString)
case set: Set[_] => setFormat[Any].write(set.map(s => s))
case b: Boolean if b => JsTrue
case b: Boolean if !b => JsFalse
// sending as strings to not lose precision
case double: Double => JsString(double.toString)
case float: Float => JsString(float.toString)
case uuid: UUID => JsString(uuid.toString)
case bigInt: BigInt => JsNumber(bigInt)
case bigInt: BigInteger => JsNumber(bigInt)
case bigD: java.math.BigDecimal => JsString(bigD.toPlainString)
case inet: InetAddress => JsString(inet.getHostAddress)
case bytes: Array[Byte] => JsString("0x" + bytes2hex(bytes))
case bytes: ByteVector => JsString("0x" + bytes2hex(bytes.toArray))
case null => JsNull
case None => JsNull
case Some(s) => this.write(s)
case p: Product => seqFormat[Any].write(p.productIterator.toList) // To support tuples
case other => serializationError("Do not understand object of type " + other.getClass.getName)
}
def read(value: JsValue) = value match {
case jsNumber : JsNumber => jsNumber.value
case JsString(s) => s
case a: JsArray => listFormat[Any].read(value)
case o: JsObject => mapFormat[String, Any].read(value)
case JsTrue => true
case JsFalse => false
case x => deserializationError("Do not understand how to deserialize " + x)
}
def bytes2hex(bytes: Array[Byte]): String = {
bytes.map("%02x".format(_)).mkString
}
}
implicit object ConsistencyJsonFormat extends RootJsonFormat[Consistency] {
def write(c: Consistency) = JsString(c.toString)
def read(value: JsValue) = value match {
case JsString(consistency) => Consistency.withName(consistency)
case _ => throw new IllegalArgumentException("Expected Consistency as JsString")
}
}
implicit object BatchQueryKindJsonFormat extends RootJsonFormat[BatchQueryKind] {
def write(c: BatchQueryKind) = JsString(c match {
case BatchQueryKind.Simple => "query"
case BatchQueryKind.Prepared => "prepared_statement"
})
def read(value: JsValue) = value match {
case JsString(v) => v match {
case "query" => BatchQueryKind.Simple
case "prepared_statement" => BatchQueryKind.Prepared
}
case _ => throw new IllegalArgumentException("Expected BatchQueryKind as JsString")
}
}
implicit object BatchTypeJsonFormat extends RootJsonFormat[BatchType] {
def write(c: BatchType) = JsString(c.toString)
def read(value: JsValue) = value match {
case JsString(batchType) => BatchType.withName(batchType)
case _ => throw new IllegalArgumentException("Expected BatchType as JsString")
}
}
implicit object DataTypeJsonFormat extends RootJsonFormat[DataType] {
lazy val cqlTypeFactory = new CqlTypeFactory
def convertJavaToScalaType(javaType: CqlType): DataType = javaType match {
// TODO: Update for UDTs when supported.
case primitive: PrimitiveType => DataType.primitiveTypeMap(primitive.serialise())
case map: MapType => DataType.Map(convertJavaToScalaType(map.getKeyType), convertJavaToScalaType(map.getValueType))
case set: SetType => DataType.Set(convertJavaToScalaType(set.getType))
case list: ListType => DataType.List(convertJavaToScalaType(list.getType))
case tuple: TupleType => DataType.Tuple(tuple.getTypes.map(convertJavaToScalaType):_*)
}
def fromString(typeString: String): Try[DataType] = {
try {
val cqlType = cqlTypeFactory.buildType(typeString)
TSuccess(convertJavaToScalaType(cqlType))
} catch {
case e: Exception => Failure(e)
}
}
def write(d: DataType) = JsString(d.stringRep)
def read(value: JsValue) = value match {
case JsString(string) => fromString(string) match {
case TSuccess(columnType) => columnType
case Failure(e) =>
logger.warn(s"Received invalid column type '$string'", e)
throw new IllegalArgumentException(s"Not a valid column type '$string'")
}
case _ => throw new IllegalArgumentException("Expected ColumnType as JsString")
}
}
implicit object ResultJsonFormat extends RootJsonFormat[ResultJsonRepresentation] {
def write(result: ResultJsonRepresentation) = JsString(result.string)
def read(value: JsValue) = value match {
case JsString(string) => ResultJsonRepresentation.fromString(string)
case _ => throw new IllegalArgumentException("Expected Result as JsString")
}
}
implicit val impThen = jsonFormat6(Then)
implicit val impWhen = jsonFormat5(When)
implicit val impPrimeQueryResult = jsonFormat(PrimeQuerySingle, "when", "then")
implicit val impConnection = jsonFormat1(Connection)
implicit val impQuery = jsonFormat6(Query)
implicit val impPrimeCriteria = jsonFormat3(PrimeCriteria)
implicit val impConflictingPrimes = jsonFormat1(ConflictingPrimes)
implicit val impTypeMismatch = jsonFormat3(TypeMismatch)
implicit val impTypeMismatches = jsonFormat1(TypeMismatches)
implicit val impWhenPreparedSingle = jsonFormat3(WhenPrepared)
implicit val impThenPreparedSingle = jsonFormat6(ThenPreparedSingle)
implicit val impPrimePreparedSingle = jsonFormat(PrimePreparedSingle, "when", "then")
implicit val impPreparedStatementExecution = jsonFormat6(PreparedStatementExecution)
implicit val impPreparedStatementPreparation = jsonFormat1(PreparedStatementPreparation)
implicit val impVersion = jsonFormat1(Version)
implicit val impBatchQuery = jsonFormat4(BatchQuery)
implicit val impBatchExecution = jsonFormat5(BatchExecution)
implicit val impBatchQueryPrime = jsonFormat2(BatchQueryPrime)
implicit val impBatchWhen = jsonFormat3(BatchWhen)
implicit val impBatchPrimeSingle = jsonFormat(BatchPrimeSingle, "when", "then")
implicit val impClientConnection = jsonFormat2(ClientConnection)
implicit val impClientConnections = jsonFormat1(ClientConnections)
implicit val impClosedConnections = jsonFormat(ClosedConnections, "closed_connections", "operation")
implicit val impAcceptNewConnectionsEnabled = jsonFormat1(AcceptNewConnectionsEnabled)
implicit val impRejectNewConnectionsEnabled = jsonFormat1(RejectNewConnectionsEnabled)
implicit val impCriterna = jsonFormat1(Criteria)
implicit val impAction = jsonFormat5(Action)
implicit val impOutcoe = jsonFormat2(Outcome)
implicit val impPrimePreparedMultiThen = jsonFormat2(ThenPreparedMulti)
implicit val impPrimePreparedMulti = jsonFormat(PrimePreparedMulti, "when", "then")
}
| mikefero/cpp-driver | gtests/src/integration/scassandra/server/server/src/main/scala/org/scassandra/server/priming/json/PrimingJsonImplicits.scala | Scala | apache-2.0 | 9,683 |
/**
* Copyright 2015 Frank Austin Nothaft
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.gnocchi.sql
import net.fnothaft.gnocchi.models.GenotypeState
import org.scalatest.FunSuite
import scala.collection.JavaConverters._
class GenotypeStateMatrixSuite extends FunSuite {
test("should filter out reference calls") {
val gt = GenotypeState("1",
1000L,
1001L,
"A",
"G",
"mySample",
0)
val row = GenotypeStateMatrix.filterAndJoin(gt, Map.empty)
assert(row.isEmpty)
}
test("correctly process nonref calls") {
val gt = GenotypeState("1",
1000L,
1001L,
"A",
"G",
"mySample",
2)
val row = GenotypeStateMatrix.filterAndJoin(gt, Map(("mySample" -> 2),
("yourSample" -> 3)))
assert(row.isDefined)
val (_, (id, count)) = row.get
assert(id === 2)
assert(count > 1.99999 && count < 2.00001)
}
}
| fnothaft/gnocchi | gnocchi-core/src/test/scala/net/fnothaft/gnocchi/sql/GenotypeStateMatrixSuite.scala | Scala | apache-2.0 | 1,743 |
package org.eol.globi.collections
import org.anormcypher.{Cypher, Neo4jREST}
import play.api.libs.json.{JsObject, Json, JsArray}
object CollectionBuilder {
def namesForTaxonExternalId(taxonId: Long): Option[(String, Option[String])] = {
val language: String = """@en"""
val query = Cypher(
"""START taxon = node:taxons(externalId='EOL:""" + taxonId +
"""')
| WHERE has(taxon.rank) AND taxon.rank =~ '(.*[Ss]pecies)|(.*[Gg]enus)|(.*[Ff]amily)'
| RETURN taxon.name as name, taxon.commonNames? as commonNames""".stripMargin)
val rez = query.apply().map(row => {
val commonNameList: Option[String] = row[Option[String]]("commonNames")
(row[String]("name"), firstEnglishCommonName(commonNameList))
}
)
if (rez.isEmpty) None else Some(rez.head)
}
def firstEnglishCommonName(commonNameList: Option[String]): Option[String] = {
parseCommonNames(commonNameList).flatten.filter(_._2 == "en") map {
_._1
} headOption
}
def parseCommonNames(commonNamesString: Option[String]): Array[Option[(String, String)]] = {
val names: Array[String] = commonNamesString.getOrElse("").split( """\s+\|\s+""")
val pattern = """(.*)@(\w\w)""".r
names map {
case pattern(name, lang) => Some(name.trim, lang)
case _ => None
}
}
implicit def connection: Neo4jREST = {
Neo4jREST("neo4j.globalbioticinteractions.org", 80, "/db/data/")
}
def preyOf(id: Long): Stream[Long] = {
val query = Cypher(
"""START taxon = node:taxonPaths(""" + buildLucenePathQuery(Seq(id)) +
""")
|MATCH taxon-[:ATE|PREYS_ON]->otherTaxon
| WHERE has(otherTaxon.externalId) AND otherTaxon.externalId =~ 'EOL:.*'
| RETURN replace(otherTaxon.externalId, "EOL:", "") as preyId""".stripMargin)
query.apply().map(row => {
row[String]("preyId").toLong
})
}
def buildLucenePathQuery(taxonConceptIds: Seq[Long]): String = {
val luceneQuery =
taxonConceptIds.map(id => """path:EOL\\:""" + id.toString).mkString("'", " OR ", "'")
luceneQuery
}
def asEOLCollection(name: String, description: String, preyIds: Seq[Long]): JsObject = {
Json.obj("collection" -> Json.obj("name" -> name
, "description" -> description
, "collection_items" -> (
preyIds map (preyId => {
Json.obj(
"collected_item_type" -> "TaxonConcept",
"collected_item_id" -> preyId
)
})
)
))
}
def mkCollectionReference(id: Long, name: String): String = {
List( """This collection was automatically generated from <a href="http://globalbioticinteractions.org">Global Biotic Interactions</a> (GloBI) data. Please visit <a href="""",
"""http://eol.org/pages/""",
id,
"""/data">this EOL data page</a> for more detailed information about the GloBI interaction data and to find other trait data for """,
name,
"""."""
).mkString("")
}
def mkCollectionInfo(commonName: Option[String], scientificName: String, interactionType: String): (String, String) = {
val interactionTargetTitle = Map("preysOn" -> "Food")
val interactionTargetNouns = Map("preysOn" -> List("prey", "food"))
val interactionVerbs = Map("preysOn" -> List("eat", "prey on", "hunt"))
val pluralCommonNames = List(commonName).flatten.map {
name => if (name.endsWith("s")) name else name + "s"
}
val pluralNames = pluralCommonNames ++ commonizePlural(scientificName)
val singularNames = List(commonName).flatten ++ commonize(scientificName)
val sentences = interactionVerbs(interactionType).map {
verb => pluralNames.map { name => "what do " + name.toLowerCase + " " + verb + "?"}
}.flatten
val phrases = interactionTargetNouns(interactionType).map {
noun => singularNames.map { name => name.toLowerCase + " " + noun}
}.flatten
val name: String = List(commonName, Some(scientificName)).flatten.head.split(" ").map(_.capitalize).mkString(" ") + " " + interactionTargetTitle(interactionType)
val description: String = (sentences ++ phrases).mkString(" ")
(name, description)
}
def commonize(name: String): List[String] = {
val idae = """(.*)(idae)$""".r
name match {
case idae(prefix, suffix) => List(prefix + "id", name)
case _ => List(name)
}
}
def commonizePlural(name: String): List[String] = {
val idae = """(.*)(idae)$""".r
name match {
case idae(prefix, suffix) => List(prefix + "ids", name)
case _ => List(name)
}
}
}
| jhpoelen/foragus | src/main/scala/org/eol/globi/collections/CollectionBuilder.scala | Scala | mit | 4,579 |
package no.digipost.labs.legacy
import org.scalatra.test.scalatest.{ScalatraFunSuite, ScalatraSuite}
import no.digipost.labs.items._
import no.digipost.labs.Settings
import org.bson.types.ObjectId
import no.digipost.labs.items.DbItem
import java.util.Date
class LegacyRedirectResourceTest extends ScalatraFunSuite {
val itemsRepo = new TestItemsRepository
val settings = new Settings(Settings.load().config)
private val itemWithBody = DbItem(new ObjectId(), ItemType.news, new Date, author = "Test Testesen", body = "body", source = Some("Original source"), oldId = Some("n125"))
val newsDbItem = itemsRepo.insert(itemWithBody)
private val ideaItemWithBody = DbItem(new ObjectId(), ItemType.idea, new Date, author = "Test Testesen", body = "body", source = Some("Original source"), oldId = Some("i12"))
val ideaDbItem = itemsRepo.insert(ideaItemWithBody)
addServlet(new LegacyRedirectResource(settings, new ItemsService(itemsRepo)), "/legacy/*")
test("should be able to parse old ids from legacy idea id strings") {
LegacyRedirectResource.parseLegacyIdString("8687;Opplasting-av-flere-dokumenter-samtidig") should equal (Some("8687"))
LegacyRedirectResource.parseLegacyIdString("/nor/8687;Opplasting-av-flere-dokumenter-samtidig") should equal (Some("8687"))
LegacyRedirectResource.parseLegacyIdString("8687-Opplasting-av-flere-dokumenter-samtidig") should equal (Some("8687"))
LegacyRedirectResource.parseLegacyIdString("8687") should equal (Some("8687"))
LegacyRedirectResource.parseLegacyIdString("51;Elektronisk-budgivning") should equal (Some("51"))
LegacyRedirectResource.parseLegacyIdString(";Elektronisk-budgivning") should equal (None)
LegacyRedirectResource.parseLegacyIdString("/nor/125-flyttemeldinger_for_pensjonsavtaler") should equal (Some("125"))
LegacyRedirectResource.parseLegacyIdString("125-flyttemeldinger_for_pensjonsavtaler") should equal (Some("125"))
LegacyRedirectResource.parseLegacyIdString("a125-flyttemeldinger_for_pensjonsavtaler") should equal (None)
}
test("should return 301 for known old legacy news urls") {
get("/legacy/pages/125-flyttemeldinger_for_pensjonsavtaler") {
status should equal (301)
header("Location") should equal(s"https://localhost:7000/#!/item/${newsDbItem.get._id.toHexString}")
}
}
test("should return 301 for known old legacy news urls with nor") {
get("/legacy/pages/nor/125-flyttemeldinger_for_pensjonsavtaler") {
status should equal (301)
header("Location") should equal(s"https://localhost:7000/#!/item/${newsDbItem.get._id.toHexString}")
}
}
test("shoukd return 301 for known old legacy idea urls") {
get("/legacy/ideer/12;Rekommandert-sending-digitalt") {
status should equal(301)
header("Location") should equal(s"https://localhost:7000/#!/item/${ideaDbItem.get._id.toHexString}")
}
}
test("should return 404 for unknown legacy urls") {
get("/legacy/pages/1-sak_med_ugyldig_id") {
status should equal (404)
}
}
test("should return 404 when legacy news url contains old id from idea") {
get("/legacy/ideer/125-flyttemeldinger_for_pensjonsavtaler") {
status should equal (404)
}
}
}
| digipost/labs | backend/src/test/scala/no/digipost/labs/legacy/LegacyRedirectResourceTest.scala | Scala | apache-2.0 | 3,224 |
package eu.shiftforward.icfpc2015.model
import eu.shiftforward.icfpc2015.model.{ Command, PowerPhrase }
import org.specs2.mutable.Specification
class PowerPhraseSpec extends Specification {
"The PowerPhrase" should {
val powerphrase1 = PowerPhrase("ei!".toList)
val powerphrase2 = PowerPhrase("lae".toList)
val powerphrase3 = PowerPhrase("aa".toList)
val powerphrase4 = PowerPhrase("aeaa".toList)
val powerphrase5 = PowerPhrase("la".toList)
"match power phrases in commands" in {
val command = Command.string("olaei!ie")
PowerPhrase.getMatchings(command, List(powerphrase1)) must be_==(
Map(powerphrase1 -> List(3))
)
}
"match powerphrases n times in command sequentially" in {
val command = Command.string("laelaelae")
PowerPhrase.getMatchings(command, List(powerphrase2)) must be_==(
Map(
powerphrase2 -> List(0, 3, 6)
)
)
}
"match powerphrases n times in command with overlaps" in {
val command = Command.string("aaaa")
PowerPhrase.getMatchings(command, List(powerphrase3)) must be_==(
Map(
powerphrase3 -> List(0, 1, 2)
))
}
"match several powerphrases" in {
val command = Command.string("laeaaaa")
PowerPhrase.getMatchings(command,
List(powerphrase1, powerphrase3, powerphrase2, powerphrase4, powerphrase5)) must be_==(
Map(
powerphrase2 -> List(0),
powerphrase5 -> List(0),
powerphrase4 -> List(1),
powerphrase3 -> List(3, 4, 5)
))
}
"flatten in case of overlaps" in {
// 1. 1 power phrase with inner overlaps
val command = Command.string("aaaa")
val matchings = PowerPhrase.getMatchings(command, List(powerphrase3))
PowerPhrase.flatten(command.length, matchings) must beEqualTo {
Map(0 -> powerphrase3, 2 -> powerphrase3)
}
// 2. Several power phrases with inner overlaps
val command2 = Command.string("laeaaaa")
val matchings2 = PowerPhrase.getMatchings(command2,
List(powerphrase1, powerphrase3, powerphrase2, powerphrase4, powerphrase5))
PowerPhrase.flatten(command2.length, matchings2) must beEqualTo {
Map(
0 -> powerphrase2,
3 -> powerphrase3,
5 -> powerphrase3
)
}
}
"convert to a string containing the most power of phrases" in {
PowerPhrase.getBestString(Command.string("l4ea4aa"),
List(powerphrase1, powerphrase3, powerphrase2, powerphrase4, powerphrase5)) must beEqualTo("laeaaaa")
PowerPhrase.getBestString(Command.string("aeaaaeaa"),
List(powerphrase1, powerphrase3, powerphrase2, powerphrase4, powerphrase5)) must beEqualTo("aeaaaeaa")
PowerPhrase.getBestString(Command.string("axexaxaxaxexaxa"), // preserve other chars if no power phrase found
List(powerphrase1, powerphrase3, powerphrase2, powerphrase4, powerphrase5)) must beEqualTo("axexaxaxaxexaxa")
}
}
}
| ShiftForward/icfpc2015 | src/test/scala/eu/shiftforward/icfpc2015/model/PowerPhraseSpec.scala | Scala | mit | 3,034 |
package megaforms.mapping
import megaforms.FormError
import megaforms.validation.Constraint
import scala.collection.immutable.Seq
/**
* Allows for optional mappings, turning a ```Mapping[T]``` into a ```Mapping[Option[T]]```
* where the value might be missing. Will not introduce
* a new level in the path, just handle that the value might be missing when mapping and unmapping
*/
private[megaforms] case class OptionalMapping[T](wrapped: Mapping[T], constraints: Seq[Constraint[Option[T]]] = Seq.empty) extends Mapping[Option[T]] {
override def path = wrapped.path
override def map(t: Option[T]): Map[String, Any] =
t.fold[Map[String, Any]](
Map.empty
)(value =>
wrapped.map(value)
)
override def unmap(data: Map[String, Any]): Either[Seq[FormError], Option[T]] = {
if (data.contains(path)) {
data.get(path) match {
case Some(v: String) if v.isEmpty => Right(None)
case _ => wrapped.unmap(data).right.map(Option(_))
}
}
else Right(None)
}
override def validate(t: Option[T]): Seq[FormError] = {
t match {
case None =>
Seq.empty
case Some (_) =>
constraints.flatMap (_.validate (t).map (FormError (path, _) ) ) ++
t.fold[Seq[FormError]] (
Seq.empty
) (value =>
wrapped.validate (value)
)
}
}
override def verifying(additionalConstraints: Constraint[Option[T]]*): Mapping[Option[T]] =
copy(constraints = constraints ++ additionalConstraints)
override def withPrefix(prefix: String): Mapping[Option[T]] = copy(wrapped = wrapped.withPrefix(prefix))
}
| klasekwall/megaforms | megaforms/shared/src/main/scala/megaforms/mapping/OptionalMapping.scala | Scala | apache-2.0 | 1,635 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import org.apache.hadoop.io._
import org.apache.orc.mapred.{OrcList, OrcMap, OrcStruct, OrcTimestamp}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{SpecificInternalRow, UnsafeArrayData}
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* A deserializer to deserialize ORC structs to Spark rows.
*/
class OrcDeserializer(
requiredSchema: StructType,
requestedColIds: Array[Int]) {
private val resultRow = new SpecificInternalRow(requiredSchema.map(_.dataType))
// `fieldWriters(index)` is
// - null if the respective source column is missing, since the output value
// is always null in this case
// - a function that updates target column `index` otherwise.
private val fieldWriters: Array[WritableComparable[_] => Unit] = {
requiredSchema.zipWithIndex
.map { case (f, index) =>
if (requestedColIds(index) == -1) {
null
} else {
val writer = newWriter(f.dataType, new RowUpdater(resultRow))
(value: WritableComparable[_]) => writer(index, value)
}
}.toArray
}
def deserialize(orcStruct: OrcStruct): InternalRow = {
var targetColumnIndex = 0
while (targetColumnIndex < fieldWriters.length) {
if (fieldWriters(targetColumnIndex) != null) {
val value = orcStruct.getFieldValue(requestedColIds(targetColumnIndex))
if (value == null) {
resultRow.setNullAt(targetColumnIndex)
} else {
fieldWriters(targetColumnIndex)(value)
}
}
targetColumnIndex += 1
}
resultRow
}
def deserializeFromValues(orcValues: Seq[WritableComparable[_]]): InternalRow = {
var targetColumnIndex = 0
while (targetColumnIndex < fieldWriters.length) {
if (fieldWriters(targetColumnIndex) != null) {
val value = orcValues(requestedColIds(targetColumnIndex))
if (value == null) {
resultRow.setNullAt(targetColumnIndex)
} else {
fieldWriters(targetColumnIndex)(value)
}
}
targetColumnIndex += 1
}
resultRow
}
/**
* Creates a writer to write ORC values to Catalyst data structure at the given ordinal.
*/
private def newWriter(
dataType: DataType, updater: CatalystDataUpdater): (Int, WritableComparable[_]) => Unit =
dataType match {
case NullType => (ordinal, _) =>
updater.setNullAt(ordinal)
case BooleanType => (ordinal, value) =>
updater.setBoolean(ordinal, value.asInstanceOf[BooleanWritable].get)
case ByteType => (ordinal, value) =>
updater.setByte(ordinal, value.asInstanceOf[ByteWritable].get)
case ShortType => (ordinal, value) =>
updater.setShort(ordinal, value.asInstanceOf[ShortWritable].get)
case IntegerType | _: YearMonthIntervalType => (ordinal, value) =>
updater.setInt(ordinal, value.asInstanceOf[IntWritable].get)
case LongType | _: DayTimeIntervalType => (ordinal, value) =>
updater.setLong(ordinal, value.asInstanceOf[LongWritable].get)
case FloatType => (ordinal, value) =>
updater.setFloat(ordinal, value.asInstanceOf[FloatWritable].get)
case DoubleType => (ordinal, value) =>
updater.setDouble(ordinal, value.asInstanceOf[DoubleWritable].get)
case StringType => (ordinal, value) =>
updater.set(ordinal, UTF8String.fromBytes(value.asInstanceOf[Text].copyBytes))
case BinaryType => (ordinal, value) =>
val binary = value.asInstanceOf[BytesWritable]
val bytes = new Array[Byte](binary.getLength)
System.arraycopy(binary.getBytes, 0, bytes, 0, binary.getLength)
updater.set(ordinal, bytes)
case DateType => (ordinal, value) =>
updater.setInt(ordinal, OrcShimUtils.getGregorianDays(value))
case TimestampType => (ordinal, value) =>
updater.setLong(ordinal, DateTimeUtils.fromJavaTimestamp(value.asInstanceOf[OrcTimestamp]))
case TimestampNTZType => (ordinal, value) =>
updater.setLong(ordinal, OrcUtils.fromOrcNTZ(value.asInstanceOf[OrcTimestamp]))
case DecimalType.Fixed(precision, scale) => (ordinal, value) =>
val v = OrcShimUtils.getDecimal(value)
v.changePrecision(precision, scale)
updater.set(ordinal, v)
case st: StructType => (ordinal, value) =>
val result = new SpecificInternalRow(st)
val fieldUpdater = new RowUpdater(result)
val fieldConverters = st.map(_.dataType).map { dt =>
newWriter(dt, fieldUpdater)
}.toArray
val orcStruct = value.asInstanceOf[OrcStruct]
var i = 0
while (i < st.length) {
val value = orcStruct.getFieldValue(i)
if (value == null) {
result.setNullAt(i)
} else {
fieldConverters(i)(i, value)
}
i += 1
}
updater.set(ordinal, result)
case ArrayType(elementType, _) => (ordinal, value) =>
val orcArray = value.asInstanceOf[OrcList[WritableComparable[_]]]
val length = orcArray.size()
val result = createArrayData(elementType, length)
val elementUpdater = new ArrayDataUpdater(result)
val elementConverter = newWriter(elementType, elementUpdater)
var i = 0
while (i < length) {
val value = orcArray.get(i)
if (value == null) {
result.setNullAt(i)
} else {
elementConverter(i, value)
}
i += 1
}
updater.set(ordinal, result)
case MapType(keyType, valueType, _) => (ordinal, value) =>
val orcMap = value.asInstanceOf[OrcMap[WritableComparable[_], WritableComparable[_]]]
val length = orcMap.size()
val keyArray = createArrayData(keyType, length)
val keyUpdater = new ArrayDataUpdater(keyArray)
val keyConverter = newWriter(keyType, keyUpdater)
val valueArray = createArrayData(valueType, length)
val valueUpdater = new ArrayDataUpdater(valueArray)
val valueConverter = newWriter(valueType, valueUpdater)
var i = 0
val it = orcMap.entrySet().iterator()
while (it.hasNext) {
val entry = it.next()
keyConverter(i, entry.getKey)
val value = entry.getValue
if (value == null) {
valueArray.setNullAt(i)
} else {
valueConverter(i, value)
}
i += 1
}
// The ORC map will never have null or duplicated map keys, it's safe to create a
// ArrayBasedMapData directly here.
updater.set(ordinal, new ArrayBasedMapData(keyArray, valueArray))
case udt: UserDefinedType[_] => newWriter(udt.sqlType, updater)
case _ =>
throw QueryExecutionErrors.dataTypeUnsupportedYetError(dataType)
}
private def createArrayData(elementType: DataType, length: Int): ArrayData = elementType match {
case BooleanType => UnsafeArrayData.fromPrimitiveArray(new Array[Boolean](length))
case ByteType => UnsafeArrayData.fromPrimitiveArray(new Array[Byte](length))
case ShortType => UnsafeArrayData.fromPrimitiveArray(new Array[Short](length))
case IntegerType | _: YearMonthIntervalType =>
UnsafeArrayData.fromPrimitiveArray(new Array[Int](length))
case LongType | _: DayTimeIntervalType =>
UnsafeArrayData.fromPrimitiveArray(new Array[Long](length))
case FloatType => UnsafeArrayData.fromPrimitiveArray(new Array[Float](length))
case DoubleType => UnsafeArrayData.fromPrimitiveArray(new Array[Double](length))
case _ => new GenericArrayData(new Array[Any](length))
}
/**
* A base interface for updating values inside catalyst data structure like `InternalRow` and
* `ArrayData`.
*/
sealed trait CatalystDataUpdater {
def set(ordinal: Int, value: Any): Unit
def setNullAt(ordinal: Int): Unit = set(ordinal, null)
def setBoolean(ordinal: Int, value: Boolean): Unit = set(ordinal, value)
def setByte(ordinal: Int, value: Byte): Unit = set(ordinal, value)
def setShort(ordinal: Int, value: Short): Unit = set(ordinal, value)
def setInt(ordinal: Int, value: Int): Unit = set(ordinal, value)
def setLong(ordinal: Int, value: Long): Unit = set(ordinal, value)
def setDouble(ordinal: Int, value: Double): Unit = set(ordinal, value)
def setFloat(ordinal: Int, value: Float): Unit = set(ordinal, value)
}
final class RowUpdater(row: InternalRow) extends CatalystDataUpdater {
override def setNullAt(ordinal: Int): Unit = row.setNullAt(ordinal)
override def set(ordinal: Int, value: Any): Unit = row.update(ordinal, value)
override def setBoolean(ordinal: Int, value: Boolean): Unit = row.setBoolean(ordinal, value)
override def setByte(ordinal: Int, value: Byte): Unit = row.setByte(ordinal, value)
override def setShort(ordinal: Int, value: Short): Unit = row.setShort(ordinal, value)
override def setInt(ordinal: Int, value: Int): Unit = row.setInt(ordinal, value)
override def setLong(ordinal: Int, value: Long): Unit = row.setLong(ordinal, value)
override def setDouble(ordinal: Int, value: Double): Unit = row.setDouble(ordinal, value)
override def setFloat(ordinal: Int, value: Float): Unit = row.setFloat(ordinal, value)
}
final class ArrayDataUpdater(array: ArrayData) extends CatalystDataUpdater {
override def setNullAt(ordinal: Int): Unit = array.setNullAt(ordinal)
override def set(ordinal: Int, value: Any): Unit = array.update(ordinal, value)
override def setBoolean(ordinal: Int, value: Boolean): Unit = array.setBoolean(ordinal, value)
override def setByte(ordinal: Int, value: Byte): Unit = array.setByte(ordinal, value)
override def setShort(ordinal: Int, value: Short): Unit = array.setShort(ordinal, value)
override def setInt(ordinal: Int, value: Int): Unit = array.setInt(ordinal, value)
override def setLong(ordinal: Int, value: Long): Unit = array.setLong(ordinal, value)
override def setDouble(ordinal: Int, value: Double): Unit = array.setDouble(ordinal, value)
override def setFloat(ordinal: Int, value: Float): Unit = array.setFloat(ordinal, value)
}
}
| nchammas/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcDeserializer.scala | Scala | apache-2.0 | 11,198 |
package finloader
import org.joda.time.format.ISODateTimeFormat
/**
* @author Paul Lysak
* Date: 15.08.13
* Time: 22:44
*/
object FinloaderUtils {
def parseAmount(amt: String): (Long, String) = {
val parts = amt.split(" ", 2).toList
val amount = (parts.head.toDouble * 100).toLong
val currency = parts.tail.headOption.getOrElse("UAH")
(amount, currency)
}
def parseDate(dateStr: String) =
if(dateStr.isEmpty)
null
else
ISODateTimeFormat.date().parseLocalDate(dateStr)
}
| paul-lysak/finloader | src/main/scala/finloader/FinloaderUtils.scala | Scala | apache-2.0 | 536 |
/*
* Copyright (c) 2012 Michael Rose
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.xorlev.simon.handlers
import com.xorlev.simon.util.SimonException
/**
* 2012-12-13
* @author Michael Rose <elementation@gmail.com>
*/
case class HaltedHandlerException(code: Int, haltMessage: String = "") extends SimonException | Xorlev/Simon | simon-sinatra-handler/src/main/scala/com/xorlev/simon/handlers/HaltedHandlerException.scala | Scala | apache-2.0 | 871 |
package doobie.util
import shapeless._, shapeless.test._
import doobie.imports._
import doobie.contrib.postgresql.syntax._
import org.specs2.mutable.Specification
import scalaz.{ Free, Coyoneda, Monad }
import scalaz.syntax.monad._
object unapplyspec extends Specification {
"Unapply" should {
"allow use of sqlstate syntax" in {
1.point[ConnectionIO].map(_ + 1).void
1.point[ConnectionIO].map(_ + 1).onPrivilegeNotRevoked(2.point[ConnectionIO])
true
}
}
}
| jamescway/doobie | contrib/postgresql/src/test/scala/doobie/contrib/postgresql/unapply.scala | Scala | mit | 495 |
package lp.template.wizard
object Ascii {
def classToSnakeCase(name: String) = {
name
.replaceAll("([A-Z]+)([A-Z][a-z])", "$1_$2")
.replaceAll("([a-z\\\\d])([A-Z])", "$1_$2")
.toLowerCase
}
def classToMinusSnakeCase(name: String) = classToSnakeCase(name).replaceAll("_", "-")
def classToSnakeUpperCase(name: String) = {
name
.replaceAll("([A-Z]+)([A-Z][a-z])", "$1_$2")
.replaceAll("([a-z\\\\d])([A-Z])", "$1_$2")
.toUpperCase()
}
def classToMethodCase(name: String): String = {
name match {
case "" => name
case _ => name.substring(0, 1).toLowerCase + name.substring(1)
}
}
}
| leigh-perry/template-wizard | src/main/scala/lp/template/wizard/Ascii.scala | Scala | apache-2.0 | 655 |
package org.broadinstitute.dsde.workbench.sam.model
import monocle.macros.Lenses
import org.broadinstitute.dsde.workbench.model._
import org.broadinstitute.dsde.workbench.sam.service.ManagedGroupService.MangedGroupRoleName
import spray.json.{DefaultJsonProtocol, JsValue, RootJsonFormat}
/**
* Created by dvoet on 5/26/17.
*/
object SamJsonSupport {
import DefaultJsonProtocol._
import org.broadinstitute.dsde.workbench.model.WorkbenchIdentityJsonSupport._
implicit val ResourceActionPatternFormat = jsonFormat3(ResourceActionPattern.apply)
implicit val ResourceActionFormat = ValueObjectFormat(ResourceAction.apply)
implicit val ResourceRoleNameFormat = ValueObjectFormat(ResourceRoleName.apply)
implicit val ResourceTypeNameFormat = ValueObjectFormat(ResourceTypeName.apply)
implicit val ResourceRoleFormat = jsonFormat4(ResourceRole.apply)
implicit val ResourceTypeFormat = jsonFormat5(ResourceType.apply)
implicit val UserStatusDetailsFormat = jsonFormat2(UserStatusDetails.apply)
implicit val UserStatusFormat = jsonFormat2(UserStatus.apply)
implicit val UserStatusInfoFormat = jsonFormat4(UserStatusInfo.apply)
implicit val UserIdInfoFormat = jsonFormat3(UserIdInfo.apply)
implicit val TermsOfServiceAcceptanceFormat = ValueObjectFormat(TermsOfServiceAcceptance.apply)
implicit val UserStatusDiagnosticsFormat = jsonFormat5(UserStatusDiagnostics.apply)
implicit val AccessPolicyNameFormat = ValueObjectFormat(AccessPolicyName.apply)
implicit val ResourceIdFormat = ValueObjectFormat(ResourceId.apply)
implicit val FullyQualifiedResourceIdFormat = jsonFormat2(FullyQualifiedResourceId.apply)
implicit val AccessPolicyDescendantPermissionsFormat = jsonFormat3(AccessPolicyDescendantPermissions.apply)
implicit val AccessPolicyMembershipFormat = jsonFormat4(AccessPolicyMembership.apply)
implicit val AccessPolicyResponseEntryFormat = jsonFormat3(AccessPolicyResponseEntry.apply)
implicit val UserPolicyResponseFormat = jsonFormat5(UserPolicyResponse.apply)
implicit val RolesAndActionsFormat = jsonFormat2(RolesAndActions.apply)
implicit val UserResourcesResponseFormat = jsonFormat6(UserResourcesResponse.apply)
implicit val PolicyIdentityFormat = jsonFormat2(FullyQualifiedPolicyId.apply)
implicit val ManagedGroupMembershipEntryFormat = jsonFormat3(ManagedGroupMembershipEntry.apply)
implicit val ManagedGroupAccessInstructionsFormat = ValueObjectFormat(ManagedGroupAccessInstructions.apply)
implicit val GroupSyncResponseFormat = jsonFormat2(GroupSyncResponse.apply)
implicit val CreateResourceRequestFormat = jsonFormat5(CreateResourceRequest.apply)
implicit val CreateResourcePolicyResponseFormat = jsonFormat2(CreateResourcePolicyResponse.apply)
implicit val CreateResourceResponseFormat = jsonFormat4(CreateResourceResponse.apply)
}
object RootPrimitiveJsonSupport {
implicit val rootBooleanJsonFormat: RootJsonFormat[Boolean] = new RootJsonFormat[Boolean] {
import DefaultJsonProtocol.BooleanJsonFormat
override def write(obj: Boolean): JsValue = BooleanJsonFormat.write(obj)
override def read(json: JsValue): Boolean = BooleanJsonFormat.read(json)
}
}
object SamResourceActions {
val readPolicies = ResourceAction("read_policies")
val alterPolicies = ResourceAction("alter_policies")
val delete = ResourceAction("delete")
val notifyAdmins = ResourceAction("notify_admins")
val setAccessInstructions = ResourceAction("set_access_instructions")
val setPublic = ResourceAction("set_public")
val readAuthDomain = ResourceAction("read_auth_domain")
val testAnyActionAccess = ResourceAction("test_any_action_access")
val getParent = ResourceAction("get_parent")
val setParent = ResourceAction("set_parent")
val addChild = ResourceAction("add_child")
val removeChild = ResourceAction("remove_child")
val listChildren = ResourceAction("list_children")
val createPet = ResourceAction("create-pet")
def sharePolicy(policy: AccessPolicyName) = ResourceAction(s"share_policy::${policy.value}")
def readPolicy(policy: AccessPolicyName) = ResourceAction(s"read_policy::${policy.value}")
def setPublicPolicy(policy: AccessPolicyName) = ResourceAction(s"set_public::${policy.value}")
def testActionAccess(action: ResourceAction) = ResourceAction(s"test_action_access::${action.value}")
def deletePolicy(policy: AccessPolicyName) = ResourceAction(s"delete_policy::${policy.value}")
}
object SamResourceTypes {
val resourceTypeAdminName = ResourceTypeName("resource_type_admin")
val workspaceName = ResourceTypeName("workspace")
val googleProjectName = ResourceTypeName("google-project")
}
@Lenses final case class UserStatusDetails(userSubjectId: WorkbenchUserId, userEmail: WorkbenchEmail) //for backwards compatibility to old API
@Lenses final case class UserIdInfo(userSubjectId: WorkbenchUserId, userEmail: WorkbenchEmail, googleSubjectId: Option[GoogleSubjectId])
@Lenses final case class UserStatus(userInfo: UserStatusDetails, enabled: Map[String, Boolean])
@Lenses final case class UserStatusInfo(userSubjectId: String,
userEmail: String,
enabled: Boolean,
adminEnabled: Boolean)
@Lenses final case class UserStatusDiagnostics(enabled: Boolean,
inAllUsersGroup: Boolean,
inGoogleProxyGroup: Boolean,
tosAccepted: Option[Boolean],
adminEnabled: Boolean)
@Lenses final case class TermsOfServiceAcceptance(value: String) extends ValueObject
@Lenses final case class ResourceActionPattern(value: String, description: String, authDomainConstrainable: Boolean) {
def matches(other: ResourceAction) = value.r.pattern.matcher(other.value).matches()
}
@Lenses final case class ResourceAction(value: String) extends ValueObject
@Lenses case class ResourceRoleName(value: String) extends ValueObject
@Lenses final case class ResourceRole(roleName: ResourceRoleName,
actions: Set[ResourceAction],
includedRoles: Set[ResourceRoleName] = Set.empty,
descendantRoles: Map[ResourceTypeName, Set[ResourceRoleName]] = Map.empty)
@Lenses final case class ResourceTypeName(value: String) extends ValueObject
@Lenses final case class FullyQualifiedResourceId(resourceTypeName: ResourceTypeName, resourceId: ResourceId)
@Lenses final case class Resource(resourceTypeName: ResourceTypeName, resourceId: ResourceId, authDomain: Set[WorkbenchGroupName], accessPolicies: Set[AccessPolicy] = Set.empty, parent: Option[FullyQualifiedResourceId] = None) {
val fullyQualifiedId = FullyQualifiedResourceId(resourceTypeName, resourceId)
}
@Lenses final case class CreateResourceResponse(resourceTypeName: ResourceTypeName, resourceId: ResourceId, authDomain: Set[WorkbenchGroupName], accessPolicies: Set[CreateResourcePolicyResponse])
@Lenses final case class CreateResourcePolicyResponse(id: FullyQualifiedPolicyId, email: WorkbenchEmail)
@Lenses final case class ResourceType(
name: ResourceTypeName,
actionPatterns: Set[ResourceActionPattern],
roles: Set[ResourceRole],
ownerRoleName: ResourceRoleName,
reuseIds: Boolean = false) {
// Ideally we'd just store this boolean in a lazy val, but this will upset the spray/akka json serializers
// I can't imagine a scenario where we have enough action patterns that would make this def discernibly slow though
def isAuthDomainConstrainable: Boolean = actionPatterns.exists(_.authDomainConstrainable)
}
@Lenses final case class ResourceId(value: String) extends ValueObject
@Lenses final case class ResourceIdAndPolicyName(resourceId: ResourceId, accessPolicyName: AccessPolicyName)
/**
* Response from AccessPolicyDAO.listUserResourcesWithRolesAndActions.
* @param resourceId id of resource accessible to user
* @param direct RolesAndActions assigned to the resource via policy directly on the resource
* @param inherited RolesAndActions assigned to the resource via policy on resource's ancestor
* @param public RolesAndActions assigned to the resource via public policy, could be direct or inherited
*/
@Lenses final case class ResourceIdWithRolesAndActions(resourceId: ResourceId, direct: RolesAndActions, inherited: RolesAndActions, public: RolesAndActions) {
lazy val allRolesAndActions = direct ++ inherited // not necessary to include public as they should be included in both direct and inherited
}
@Lenses final case class RolesAndActions(roles: Set[ResourceRoleName], actions: Set[ResourceAction]) {
def ++ (other: RolesAndActions): RolesAndActions = {
RolesAndActions(this.roles ++ other.roles, this.actions ++ other.actions)
}
}
object RolesAndActions {
val empty = RolesAndActions(Set.empty, Set.empty)
def fromRoles(roles: Set[ResourceRoleName]) = RolesAndActions(roles, Set.empty)
def fromActions(actions: Set[ResourceAction]) = RolesAndActions(Set.empty, actions)
def fromPolicy(accessPolicy: AccessPolicy) = RolesAndActions(accessPolicy.roles, accessPolicy.actions)
def fromPolicyMembership(accessPolicy: AccessPolicyMembership) = RolesAndActions(accessPolicy.roles, accessPolicy.actions)
}
@Lenses final case class UserPolicyResponse(
resourceId: ResourceId,
accessPolicyName: AccessPolicyName,
authDomainGroups: Set[WorkbenchGroupName],
missingAuthDomainGroups: Set[WorkbenchGroupName],
public: Boolean)
@Lenses final case class UserResourcesResponse(
resourceId: ResourceId,
direct: RolesAndActions,
inherited: RolesAndActions,
public: RolesAndActions,
authDomainGroups: Set[WorkbenchGroupName],
missingAuthDomainGroups: Set[WorkbenchGroupName])
@Lenses final case class FullyQualifiedPolicyId(resource: FullyQualifiedResourceId, accessPolicyName: AccessPolicyName) extends WorkbenchGroupIdentity {
override def toString: String = s"${accessPolicyName.value}.${resource.resourceId.value}.${resource.resourceTypeName.value}"
}
@Lenses case class AccessPolicyName(value: String) extends ValueObject
@Lenses final case class CreateResourceRequest(
resourceId: ResourceId,
policies: Map[AccessPolicyName, AccessPolicyMembership],
authDomain: Set[WorkbenchGroupName],
returnResource: Option[Boolean] = Some(false),
parent: Option[FullyQualifiedResourceId] = None)
/*
Note that AccessPolicy IS A group because it was easier and more efficient to work with in ldap. In Postgres, it is
modeled with a "has a" relationship, but it retains the legacy "is a" relationship in code. Refactoring this into a
consistent "has a" relationship is tracked by this ticket: https://broadworkbench.atlassian.net/browse/CA-778
*/
@Lenses final case class AccessPolicy(
id: FullyQualifiedPolicyId,
members: Set[WorkbenchSubject],
email: WorkbenchEmail,
roles: Set[ResourceRoleName],
actions: Set[ResourceAction],
descendantPermissions: Set[AccessPolicyDescendantPermissions],
public: Boolean)
extends WorkbenchGroup
@Lenses final case class AccessPolicyDescendantPermissions(resourceType: ResourceTypeName, actions: Set[ResourceAction], roles: Set[ResourceRoleName])
@Lenses final case class AccessPolicyMembership(memberEmails: Set[WorkbenchEmail],
actions: Set[ResourceAction],
roles: Set[ResourceRoleName],
descendantPermissions: Option[Set[AccessPolicyDescendantPermissions]] = Option(Set.empty)) {
def getDescendantPermissions: Set[AccessPolicyDescendantPermissions] = descendantPermissions.getOrElse(Set.empty)
}
@Lenses final case class AccessPolicyResponseEntry(policyName: AccessPolicyName, policy: AccessPolicyMembership, email: WorkbenchEmail)
// Access Policy with no membership info to improve efficiency for calls that care about only the roles and actions of a policy, not the membership
@Lenses final case class AccessPolicyWithoutMembers(id: FullyQualifiedPolicyId, email: WorkbenchEmail, roles: Set[ResourceRoleName], actions: Set[ResourceAction], public: Boolean)
@Lenses final case class BasicWorkbenchGroup(id: WorkbenchGroupName, members: Set[WorkbenchSubject], email: WorkbenchEmail) extends WorkbenchGroup
@Lenses final case class ManagedGroupAndRole(groupName: WorkbenchGroupName, role: MangedGroupRoleName)
@Lenses final case class ManagedGroupMembershipEntry(groupName: ResourceId, role: ResourceRoleName, groupEmail: WorkbenchEmail)
@Lenses final case class ManagedGroupAccessInstructions(value: String) extends ValueObject
@Lenses final case class GroupSyncResponse(lastSyncDate: String, email: WorkbenchEmail)
object SamLenses {
val resourceIdentityAccessPolicy = AccessPolicy.id composeLens FullyQualifiedPolicyId.resource
val resourceTypeNameInAccessPolicy = resourceIdentityAccessPolicy composeLens FullyQualifiedResourceId.resourceTypeName
}
| broadinstitute/sam | src/main/scala/org/broadinstitute/dsde/workbench/sam/model/SamModel.scala | Scala | bsd-3-clause | 13,316 |
package com.mildlyskilled
import akka.actor.{ActorSystem, Props}
import com.mildlyskilled.actors.Client
import com.mildlyskilled.core.{ConsoleAction, Network}
import com.mildlyskilled.protocol.Message._
import com.typesafe.config.ConfigFactory
import java.util.logging.Logger
import scala.tools.jline.console.ConsoleReader
object ClientApplication extends App {
implicit val logger = Logger.getLogger("Client Logger")
logger.info(Console.GREEN_B + "Starting client" + Console.RESET)
val username = ConsoleAction.promptInput("Username")
val serverName = ConsoleAction.promptInput("Server Name")
val ipSelection = ConsoleAction.promptSelection(Network.addressMap, "Select an IP address", Some("127.0.0.1"))
val clientConfig = ConfigFactory.parseString(s"""akka.remote.netty.tcp.hostname="$ipSelection" """)
val defaultConfig = ConfigFactory.load.getConfig("client")
val completeConfig = clientConfig.withFallback(defaultConfig)
val system = ActorSystem("sIRC", completeConfig)
val client = system.actorOf(Props[Client], username)
val serverconfig = ConfigFactory.load.getConfig("server")
val serverAddress = ConsoleAction.promptInput(s"Server IP Address or hostname [$ipSelection]", s"$ipSelection")
val serverPort = serverconfig.getString("akka.remote.netty.tcp.port")
val serverPath = s"akka.tcp://sIRC@$serverAddress:$serverPort/user/$serverName"
val server = system.actorSelection(serverPath) // <-- this is where we get the server reference
server.tell(Login("username", "password"), client)
val cReader = new ConsoleReader
Iterator.continually(cReader.readLine("> ")).takeWhile(_ != "/exit").foreach {
case "/start" =>
server ! Start
case "/users" =>
server.tell(RegisteredUsers, client)
case "/channels" =>
server.tell(ListChannels, client)
case "/leave" =>
server.tell(Leave, client)
case msg =>
server ! Info(msg)
case userCommandMessageRegex("join", c) =>
server.tell(JoinChannel(ConsoleAction.clean(c)), client)
}
system.terminate()
}
| mildlyskilled/sIRC | src/main/scala/com/mildlyskilled/ClientApplication.scala | Scala | mit | 2,056 |
package controllers.c1
import play.api.Logger
import play.api.mvc._
import play.api.libs.json._
import scala.concurrent.Future
/**
* Created by trydofor on 7/2/15.
* @see https://playframework.com/documentation/2.4.x/ScalaContentNegotiation
* @see https://github.com/playframework/playframework/blob/2.4.x/documentation/manual/working/scalaGuide/main/http/code/ScalaContentNegotiation.scala
*/
class S7ScalaContentNegotiation extends Controller {
def a0 = Action {
Ok(views.html.c1.s7()).withSession("username" -> "trydofor")
}
object Item {
def findAll = List(1, 2, 3)
}
val a1 = Action { implicit request =>
val items = Item.findAll
render {
case Accepts.Html() => Ok(<b>items</b>).as(HTML)
case Accepts.Json() => Ok(Json.toJson(items))
}
}
val a2 = Action { implicit request =>
def ??? = Ok("ok")
//#extract_custom_accept_type
val AcceptsMp3 = Accepting("audio/mp3")
render {
case AcceptsMp3() => ???
}
}
}
| moilioncircle/playframework-2.4.x-scala | app/controllers/c1/S7ScalaContentNegotiation.scala | Scala | apache-2.0 | 997 |
package com.tresata.spark.scalding.demo
/**
* Created by Ivan Nikolov <nikolovivann@gmail.com> on 30/01/15.
*/
case class FruitLine(name: String, color: String, quantity: Int)
| tresata/spark-scalding | demo/src/main/scala/com/tresata/spark/scalding/demo/FruitLine.scala | Scala | apache-2.0 | 179 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.compile
import slamdata.Predef._
import quasar.common.{CIName, JoinType, SortDir}
import quasar.common.data.Data
import quasar.frontend.logicalplan.{JoinCondition, JoinDir, LogicalPlan => LP}
import quasar.sql._
import quasar.std._, StdLib._, agg._, array._, date._, identity._, math._
import pathy.Path._
import matryoshka.data.Fix
import matryoshka.implicits._
import scalaz.{Divide => _, Select => _, _}, Scalaz._
class CompilerSpec extends quasar.Qspec with CompilerHelpers {
// NB: imports are here to shadow duplicated names in [[quasar.sql]]. We
// need to do a better job of handling this.
import quasar.std.StdLib._, relations._, StdLib.set._, string._, structural._
import SemanticError._
"compiler" should {
"compile simple constant example 1" in {
testLogicalPlanCompile(
sqlE"select 1",
lpf.constant(Data.Int(1)))
}
"compile simple constant example 1 with field name" in {
testLogicalPlanCompile(
sqlE"select 1 as one",
makeObj("one" -> lpf.constant(Data.Int(1))))
}
"compile simple boolean literal (true)" in {
testLogicalPlanCompile(
sqlE"select true",
lpf.constant(Data.Bool(true)))
}
"compile simple boolean literal (false)" in {
testLogicalPlanCompile(
sqlE"select false",
lpf.constant(Data.Bool(false)))
}
"compile simple constant with multiple named projections" in {
testLogicalPlanCompile(
sqlE"""select 1.0 as a, "abc" as b""",
makeObj(
"a" -> lpf.constant(Data.Dec(1.0)),
"b" -> lpf.constant(Data.Str("abc"))))
}
"compile expression with datetime, date, time, and interval" in {
testFullLogicalPlanCompile(
sqlE"""select timestamp("2014-11-17T22:00:00Z") + interval("PT43M40S"), date("2015-01-19"), time("14:21")""",
lpf.invoke2(MapConcat,
lpf.invoke2(MapConcat,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("0")),
lpf.invoke2(Add,
lpf.invoke1(OffsetDateTime, lpf.constant(Data.Str("2014-11-17T22:00:00Z"))),
lpf.invoke1(Interval, lpf.constant(Data.Str("PT43M40S"))))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("1")),
lpf.invoke1(LocalDate, lpf.constant(Data.Str("2015-01-19"))))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("2")),
lpf.invoke1(LocalTime, lpf.constant(Data.Str("14:21"))))))
}
"compile simple constant from collection" in {
testFullLogicalPlanCompile(sqlE"select 1 from zips",
lpf.invoke1(Squash, lpf.constant(Data.Int(1))))
}
"compile query from Q#2755" in {
val query = sqlE"""select substring("abcdefg", 0, trunc(pop / 10000)) from zips"""
testFullLogicalPlanCompile(query,
lpf.invoke1(Squash,
lpf.invoke3(Substring,
lpf.constant(Data.Str("abcdefg")),
lpf.constant(Data.Int(0)),
lpf.invoke1(Trunc,
lpf.invoke2(Divide,
lpf.invoke2(MapProject, read("zips"), lpf.constant(Data.Str("pop"))),
lpf.constant(Data.Int(10000)))))))
}
"compile with typecheck in join condition" in {
val query = sqlE"select * from zips join smallZips on zips.x = smallZips.foo.bar"
testFullLogicalPlanCompile(query,
lpf.let('__tmp0,
lpf.join(
read("zips"),
read("smallZips"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10,
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin9), lpf.constant(Data.Str("x"))),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin10), lpf.constant(Data.Str("foo"))),
lpf.constant(Data.Str("bar")))))),
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
JoinDir.Right.projectFrom(lpf.free('__tmp0))))))
}
"compile with typecheck in multiple join condition" in {
val query =
sqlE"""select l.sha as child,
l.author.login as c_auth,
r.sha as parent,
r.author.login as p_auth
from slamengine_commits as l join slamengine_commits_dup as r
on r.sha = l.parents[0].sha and l.author.login = r.author.login"""
testFullLogicalPlanCompile(query,
lpf.let('__tmp0,
lpf.join(
read("slamengine_commits"),
read("slamengine_commits_dup"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10,
lpf.invoke2(And,
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin10), lpf.constant(Data.Str("sha"))),
lpf.invoke2(MapProject,
lpf.invoke2(ArrayProject,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin9), lpf.constant(Data.Str("parents"))),
lpf.constant(Data.Int(0))),
lpf.constant(Data.Str("sha")))),
lpf.invoke2(Eq,
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin9), lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin10), lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))))))),
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
lpf.invoke2(MapConcat,
lpf.invoke2(MapConcat,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("child")),
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("sha")))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("c_auth")),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("parent")),
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("sha"))))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("p_auth")),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))))))))
}
"compile with typecheck in multiple join condition followed by filter" in {
val query =
sqlE"""select l.sha as child,
l.author.login as c_auth,
r.sha as parent,
r.author.login as p_auth
from slamengine_commits as l join slamengine_commits_dup as r
on r.sha = l.parents[0].sha and l.author.login = r.author.login
where r.author.login || "," || l.author.login = "jdegoes,jdegoes" """
testFullLogicalPlanCompile(query,
lpf.let('__tmp0,
lpf.let('__tmp1,
lpf.join(
read("slamengine_commits"),
read("slamengine_commits_dup"),
JoinType.Inner,
JoinCondition('__leftJoin23, '__rightJoin24,
lpf.invoke2(And,
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin24), lpf.constant(Data.Str("sha"))),
lpf.invoke2(MapProject,
lpf.invoke2(ArrayProject,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin23), lpf.constant(Data.Str("parents"))),
lpf.constant(Data.Int(0))),
lpf.constant(Data.Str("sha")))),
lpf.invoke2(Eq,
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin23), lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin24), lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))))))),
lpf.invoke2(Filter,
lpf.free('__tmp1),
lpf.invoke2(Eq,
lpf.invoke2(Concat,
lpf.invoke2(Concat,
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp1)),
lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))),
lpf.constant(Data.Str(","))),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp1)),
lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login")))),
lpf.constant(Data.Str("jdegoes,jdegoes"))))),
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
lpf.invoke2(MapConcat,
lpf.invoke2(MapConcat,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("child")),
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("sha")))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("c_auth")),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("parent")),
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("sha"))))),
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("p_auth")),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("author"))),
lpf.constant(Data.Str("login"))))))))
}
"compile select substring" in {
testLogicalPlanCompile(
sqlE"select substring(bar, 2, 3) from foo",
lpf.invoke1(Squash,
lpf.invoke3(Substring,
lpf.invoke2(MapProject, read("foo"), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Int(2)),
lpf.constant(Data.Int(3)))))
}
"compile select length" in {
testLogicalPlanCompile(
sqlE"select length(bar) from foo",
lpf.invoke1(Squash,
lpf.invoke1(Length, lpf.invoke2(MapProject, read("foo"), lpf.constant(Data.Str("bar"))))))
}
"compile simple select *" in {
testLogicalPlanCompile(sqlE"select * from foo", lpf.invoke1(Squash, read("foo")))
}
"compile qualified select *" in {
testLogicalPlanCompile(sqlE"select foo.* from foo", lpf.invoke1(Squash, read("foo")))
}
"compile qualified select * with additional fields" in {
testLogicalPlanCompile(
sqlE"select foo.*, bar.address from foo, bar",
lpf.let('__tmp0,
lpf.join(
read("foo"),
read("bar"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10, lpf.constant(Data.Bool(true)))),
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
makeObj(
"address" ->
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("address"))))))))
}
"compile deeply-nested qualified select *" in {
testLogicalPlanCompile(
sqlE"select foo.bar.baz.*, bar.address from foo, bar",
lpf.let('__tmp0,
lpf.join(
read("foo"),
read("bar"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10, lpf.constant(Data.Bool(true)))),
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Str("baz"))),
makeObj(
"address" ->
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("address"))))))))
}
"compile simple select with unnamed projection which is just an identifier" in {
testLogicalPlanCompile(
sqlE"select name, place from city",
lpf.let('__tmp0, read("city"),
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("name"))),
"place" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("place")))))))
}
"compile basic let" in {
testLogicalPlanCompile(
sqlE"foo := 5; foo",
lpf.constant(Data.Int(5)))
}
"compile basic let, ignoring the form" in {
testLogicalPlanCompile(
sqlE"bar := 5; 7",
lpf.constant(Data.Int(7)))
}
"compile nested lets" in {
testLogicalPlanCompile(
sqlE"foo := 5; bar := 7; bar + foo",
lpf.invoke2(Add, lpf.constant(Data.Int(7)), lpf.constant(Data.Int(5))))
}
"compile let with select in body from let binding ident" in {
val query = sqlE"foo := (1,2,3); select * from foo"
val expectation =
lpf.invoke1(Squash,
lpf.invoke1(ShiftArray,
lpf.invoke2(ArrayConcat,
lpf.invoke2(ArrayConcat,
MakeArrayN[Fix[LP]](lpf.constant(Data.Int(1))).embed,
MakeArrayN[Fix[LP]](lpf.constant(Data.Int(2))).embed),
MakeArrayN[Fix[LP]](lpf.constant(Data.Int(3))).embed)))
testLogicalPlanCompile(query, expectation)
}
"compile let with select in body selecting let binding ident" in {
val query = sqlE"foo := 12; select foo from bar"
val expectation =
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("foo")),
lpf.constant(Data.Int(12))))
testLogicalPlanCompile(query, expectation)
}
"fail to compile let inside select with ambigious reference" in {
// TODO: Investigate why this is not producing an ambigious reference
compile(sqlE"select foo from (bar := 12; baz) as quag") must_===
compiledSubtableMissing("quag").wrapNel.left
}
"compile let inside select with table reference" in {
val query = sqlE"select foo from (bar := 12; select * from baz) as quag"
val expectation =
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("foo")),
lpf.invoke2(MapProject,
lpf.invoke1(Squash, read("baz")),
lpf.constant(Data.Str("foo")))))
testLogicalPlanCompile(query, expectation)
}
"compile let inside select with ident reference" in {
val query = sqlE"select foo from (bar := 12; select * from bar) as quag"
val expectation =
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("foo")),
lpf.invoke2(MapProject,
lpf.invoke1(Squash, lpf.constant(Data.Int(12))),
lpf.constant(Data.Str("foo")))))
testLogicalPlanCompile(query, expectation)
}
"compile selection with same ident as nested let" in {
val query = sqlE"select bar from (bar := 12; select * from bar) as quag"
val expectation =
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject,
lpf.invoke1(Squash, lpf.constant(Data.Int(12))),
lpf.constant(Data.Str("bar")))))
testLogicalPlanCompile(query, expectation)
}
"compile selection with same ident as nested let and alias" in {
val query = sqlE"select bar from (bar := 12; select * from bar) as bar"
val expectation =
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke1(Squash, lpf.constant(Data.Int(12)))))
testLogicalPlanCompile(query, expectation)
}
"compile let with select in form and body" in {
val query = sqlE"foo := select * from bar; select * from foo"
val expectation = lpf.invoke1(Squash, read("bar"))
testLogicalPlanCompile(query, expectation)
}
"compile let with inner context that shares a table reference" in {
val query = sqlE"select (foo := select * from bar; select * from foo) from foo"
val expectation =
lpf.invoke1(Squash, read("bar"))
testLogicalPlanCompile(query, expectation)
}
"compile let with an inner context of as that shares a binding name" in {
val query = sqlE"foo := 4; select * from bar as foo"
val expectation = lpf.invoke1(Squash, read("bar"))
testLogicalPlanCompile(query, expectation)
}
"fail to compile let with an inner context of let that shares a binding name in expression context" in {
// TODO: Investigate why this is not producing an ambigious reference
val query = sqlE"foo := 4; select * from (foo := bar; foo) as quag"
compile(query) must_=== compiledSubtableMissing("quag").wrapNel.left
}
"compile let with an inner context of as that shares a binding name in table context" in {
val query = sqlE"foo := 4; select * from (foo := select * from bar; foo) as quag"
val expectation = lpf.invoke1(Squash, read("bar"))
testLogicalPlanCompile(query, expectation)
}
"compile simple 1-table projection when root identifier is also a projection" in {
// 'foo' must be interpreted as a projection because only this interpretation is possible
testLogicalPlanCompile(
sqlE"select foo.bar from baz",
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, read("baz"), lpf.constant(Data.Str("foo"))),
lpf.constant(Data.Str("bar"))))))
}
"compile simple 1-table projection when root identifier is also a table ref" in {
// 'foo' must be interpreted as a table reference because this
// interpretation is possible and consistent with ANSI SQL.
testLogicalPlanCompile(
sqlE"select foo.bar from foo",
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject, read("foo"), lpf.constant(Data.Str("bar"))))))
}
"compile two term addition from one table" in {
testLogicalPlanCompile(
sqlE"select foo + bar from baz",
lpf.let('__tmp0, read("baz"),
lpf.invoke1(Squash,
lpf.invoke2(Add,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("foo"))),
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar")))))))
}
"compile negate" in {
testLogicalPlanCompile(
sqlE"select -foo from bar",
lpf.invoke1(Squash,
lpf.invoke1(Negate, lpf.invoke2(MapProject, read("bar"), lpf.constant(Data.Str("foo"))))))
}
"compile modulo" in {
testLogicalPlanCompile(
sqlE"select foo % baz from bar",
lpf.let('__tmp0, read("bar"),
lpf.invoke1(Squash,
lpf.invoke2(Modulo,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("foo"))),
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("baz")))))))
}
"compile coalesce" in {
testLogicalPlanCompile(
sqlE"select coalesce(bar, baz) from foo",
lpf.let('__tmp0, read("foo"),
lpf.let('__tmp1, lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.invoke1(Squash,
lpf.invoke3(Cond,
lpf.invoke2(Eq, lpf.free('__tmp1), lpf.constant(Data.Null)),
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("baz"))),
lpf.free('__tmp1))))))
}
"compile date field extraction" in {
testLogicalPlanCompile(
sqlE"""select date_part("day", baz) from foo""",
lpf.invoke1(Squash,
lpf.invoke1(ExtractDayOfMonth,
lpf.invoke2(MapProject, read("foo"), lpf.constant(Data.Str("baz"))))))
}
"compile conditional" in {
testLogicalPlanCompile(
sqlE"select case when pop < 10000 then city else loc end from zips",
lpf.let('__tmp0, read("zips"),
lpf.invoke1(Squash,
lpf.invoke3(Cond,
lpf.invoke2(Lt,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("pop"))),
lpf.constant(Data.Int(10000))),
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("city"))),
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("loc")))))))
}
"compile conditional (match) without else" in {
testLogicalPlanCompile(
sqlE"""select case when pop = 0 then "nobody" end from zips""",
compileExp(sqlE"""select case when pop = 0 then "nobody" else null end from zips"""))
}
"compile conditional (switch) without else" in {
testLogicalPlanCompile(
sqlE"""select case pop when 0 then "nobody" end from zips""",
compileExp(sqlE"""select case pop when 0 then "nobody" else null end from zips"""))
}
"have ~~ as alias for LIKE" in {
testLogicalPlanCompile(
sqlE"""select pop from zips where city ~~ "%BOU%" """,
compileExp(sqlE"""select pop from zips where city LIKE "%BOU%" """))
}
"have !~~ as alias for NOT LIKE" in {
testLogicalPlanCompile(
sqlE"""select pop from zips where city !~~ "%BOU%" """,
compileExp(sqlE"""select pop from zips where city NOT LIKE "%BOU%" """))
}
"compile array length" in {
testLogicalPlanCompile(
sqlE"select array_length(bar) from foo",
lpf.invoke1(Squash,
lpf.invoke1(ArrayLength,
lpf.invoke2(MapProject, read("foo"), lpf.constant(Data.Str("bar"))))))
}
"compile concat" in {
testLogicalPlanCompile(
sqlE"""select concat(foo, concat(" ", bar)) from baz""",
lpf.let('__tmp0, read("baz"),
lpf.invoke1(Squash,
lpf.invoke2(Concat,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("foo"))),
lpf.invoke2(Concat,
lpf.constant(Data.Str(" ")),
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))))))))
}
"compile between" in {
testLogicalPlanCompile(
sqlE"select * from foo where bar between 1 and 10",
lpf.let('__tmp0, read("foo"),
lpf.invoke1(Squash,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke3(Between,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Int(1)),
lpf.constant(Data.Int(10)))))))
}
"compile not between" in {
testLogicalPlanCompile(
sqlE"select * from foo where bar not between 1 and 10",
lpf.let('__tmp0, read("foo"),
lpf.invoke1(Squash,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke1(Not,
lpf.invoke3(Between,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Int(1)),
lpf.constant(Data.Int(10))))))))
}
"compile like" in {
testLogicalPlanCompile(
sqlE"""select bar from foo where bar like "a%" """,
lpf.let('__tmp0, read("foo"),
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke3(Like,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Str("a%")),
lpf.constant(Data.Str("\\\\")))),
lpf.constant(Data.Str("bar")))))))
}
"compile like with escape char" in {
testLogicalPlanCompile(
sqlE"""select bar from foo where bar like "a=%" escape "=" """,
lpf.let('__tmp0, read("foo"),
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke3(Like,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Str("a=%")),
lpf.constant(Data.Str("=")))),
lpf.constant(Data.Str("bar")))))))
}
"compile not like" in {
testLogicalPlanCompile(
sqlE"""select bar from foo where bar not like "a%" """,
lpf.let('__tmp0, read("foo"),
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject, lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke1(Not,
lpf.invoke3(Like,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Str("a%")),
lpf.constant(Data.Str("\\\\"))))),
lpf.constant(Data.Str("bar")))))))
}
"compile ~" in {
testLogicalPlanCompile(
sqlE"""select bar from foo where bar ~ "a.$$" """,
lpf.let('__tmp0, read("foo"),
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("bar")),
lpf.invoke2(MapProject,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke3(Search,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Str("a.$")),
lpf.constant(Data.Bool(false)))),
lpf.constant(Data.Str("bar")))))))
}
"compile complex expression" in {
testLogicalPlanCompile(
sqlE"select avgTemp*9/5 + 32 from cities",
lpf.invoke1(Squash,
lpf.invoke2(Add,
lpf.invoke2(Divide,
lpf.invoke2(Multiply,
lpf.invoke2(MapProject, read("cities"), lpf.constant(Data.Str("avgTemp"))),
lpf.constant(Data.Int(9))),
lpf.constant(Data.Int(5))),
lpf.constant(Data.Int(32)))))
}
"compile parenthesized expression" in {
testLogicalPlanCompile(
sqlE"select (avgTemp + 32)/5 from cities",
lpf.invoke1(Squash,
lpf.invoke2(Divide,
lpf.invoke2(Add,
lpf.invoke2(MapProject, read("cities"), lpf.constant(Data.Str("avgTemp"))),
lpf.constant(Data.Int(32))),
lpf.constant(Data.Int(5)))))
}
"compile cross select *" in {
testLogicalPlanCompile(
sqlE"select * from person, car",
lpf.let('__tmp0,
lpf.join(
read("person"),
read("car"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10, lpf.constant(Data.Bool(true)))),
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
JoinDir.Right.projectFrom(lpf.free('__tmp0))))))
}
"compile two term multiplication from two tables" in {
testLogicalPlanCompile(
sqlE"select person.age * car.modelYear from person, car",
lpf.let('__tmp0,
lpf.join(
read("person"),
read("car"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10, lpf.constant(Data.Bool(true)))),
lpf.invoke1(Squash,
lpf.invoke2(Multiply,
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("age"))),
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("modelYear")))))))
}
"compile simple where (with just a constant)" in {
testLogicalPlanCompile(
sqlE"select name from person where 1",
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("name")),
lpf.invoke2(MapProject,
lpf.invoke2(Filter, read("person"), lpf.constant(Data.Int(1))),
lpf.constant(Data.Str("name"))))))
}
"compile simple where" in {
testLogicalPlanCompile(
sqlE"select name from person where age > 18",
lpf.let('__tmp0, read("person"),
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("name")),
lpf.invoke2(MapProject,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke2(Gt,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("age"))),
lpf.constant(Data.Int(18)))),
lpf.constant(Data.Str("name")))))))
}
"compile simple group by" in {
testLogicalPlanCompile(
sqlE"select count(*) from person group by name",
lpf.let('__tmp0, read("person"),
lpf.invoke1(Squash,
lpf.invoke1(Count,
lpf.invoke2(GroupBy,
lpf.free('__tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("name")))).embed)))))
}
"compile group by with projected keys" in {
testLogicalPlanCompile(
sqlE"select lower(name), person.gender, avg(age) from person group by lower(person.name), gender",
lpf.let('__tmp0, read("person"),
lpf.let('__tmp1,
lpf.invoke2(GroupBy,
lpf.free('__tmp0),
MakeArrayN[Fix[LP]](
lpf.invoke1(Lower,
lpf.invoke2(MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("name")))),
lpf.invoke2(MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("gender")))).embed),
lpf.invoke1(Squash,
makeObj(
"0" ->
lpf.invoke1(Arbitrary,
lpf.invoke1(Lower,
lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("name"))))),
"gender" ->
lpf.invoke1(Arbitrary,
lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("gender")))),
"2" ->
lpf.invoke1(Avg,
lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("age")))))))))
}
"compile group by with perverse aggregated expression" in {
testLogicalPlanCompile(
sqlE"select count(name) from person group by name",
lpf.let('__tmp0, read("person"),
lpf.invoke1(Squash,
lpf.invoke1(Count,
lpf.invoke2(MapProject,
lpf.invoke2(GroupBy,
lpf.free('__tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("name")))).embed),
lpf.constant(Data.Str("name")))))))
}
"compile sum in expression" in {
testLogicalPlanCompile(
sqlE"select sum(pop) * 100 from zips",
lpf.invoke1(Squash,
lpf.invoke2(Multiply,
lpf.invoke1(Sum, lpf.invoke2(MapProject, read("zips"), lpf.constant(Data.Str("pop")))),
lpf.constant(Data.Int(100)))))
}
val setA =
lpf.let('__tmp0, read("zips"),
lpf.invoke1(Squash, makeObj(
"loc" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("loc"))),
"pop" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("pop"))))))
val setB =
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("city")),
lpf.invoke2(MapProject,
read("zips"),
lpf.constant(Data.Str("city")))))
"compile union" in {
testLogicalPlanCompile(
sqlE"select loc, pop from zips union select city from zips",
lpf.normalizeLets(
lpf.normalizeLets(
lpf.let('__tmp1, lpf.invoke2(Union, setA, setB),
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp1), lpf.free('__tmp1)))))))
}
"compile union all" in {
testLogicalPlanCompile(
sqlE"select loc, pop from zips union all select city from zips",
lpf.normalizeLets(lpf.normalizeLets(
lpf.invoke2(Union, setA, setB))))
}
"compile intersect" in {
testLogicalPlanCompile(
sqlE"select loc, pop from zips intersect select city from zips",
lpf.normalizeLets(lpf.normalizeLets(
lpf.let('__tmp1, lpf.invoke2(Intersect, setA, setB),
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp1), lpf.free('__tmp1)))))))
}
"compile intersect all" in {
testLogicalPlanCompile(
sqlE"select loc, pop from zips intersect all select city from zips",
lpf.normalizeLets(lpf.normalizeLets(
lpf.invoke2(Intersect, setA, setB))))
}
"compile except" in {
testLogicalPlanCompile(
sqlE"select loc, pop from zips except select city from zips",
lpf.normalizeLets(lpf.normalizeLets(
lpf.invoke2(Except, setA, setB))))
}
"have {*} as alias for {:*}" in {
testLogicalPlanCompile(
sqlE"SELECT bar{*} FROM foo",
compileExp(sqlE"SELECT bar{:*} FROM foo"))
}
"have [*] as alias for [:*]" in {
testLogicalPlanCompile(
sqlE"SELECT foo[*] FROM foo",
compileExp(sqlE"SELECT foo[:*] FROM foo"))
}
"expand top-level map flatten" in {
testLogicalPlanCompile(
sqlE"SELECT foo{:*} FROM foo",
compileExp(sqlE"SELECT Flatten_Map(foo) FROM foo"))
}
"expand nested map flatten" in {
testLogicalPlanCompile(
sqlE"SELECT foo.bar{:*} FROM foo",
compileExp(sqlE"SELECT Flatten_Map(foo.bar) FROM foo"))
}
"expand field map flatten" in {
testLogicalPlanCompile(
sqlE"SELECT bar{:*} FROM foo",
compileExp(sqlE"SELECT Flatten_Map(foo.bar) FROM foo"))
}
"expand top-level array flatten" in {
testLogicalPlanCompile(
sqlE"SELECT foo[:*] FROM foo",
compileExp(sqlE"SELECT Flatten_Array(foo) FROM foo"))
}
"expand nested array flatten" in {
testLogicalPlanCompile(
sqlE"SELECT foo.bar[:*] FROM foo",
compileExp(sqlE"SELECT Flatten_Array(foo.bar) FROM foo"))
}
"expand field array flatten" in {
testLogicalPlanCompile(
sqlE"SELECT bar[:*] FROM foo",
compileExp(sqlE"SELECT Flatten_Array(foo.bar) FROM foo"))
}
"compile top-level map flatten" in {
testLogicalPlanCompile(
sqlE"select zips{:*} from zips",
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("zips")),
lpf.invoke1(FlattenMap, read("zips")))))
}
"have {_} as alias for {:_}" in {
testLogicalPlanCompile(
sqlE"select length(commit.author{_}) from slamengine_commits",
compileExp(sqlE"select length(commit.author{:_}) from slamengine_commits"))
}
"have [_] as alias for [:_]" in {
testLogicalPlanCompile(
sqlE"select loc[_] / 10 from zips",
compileExp(sqlE"select loc[:_] / 10 from zips"))
}
"compile map shift / unshift" in {
val inner = lpf.invoke1(ShiftMap, lpf.invoke2(MapProject, lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("commit"))), lpf.constant(Data.Str("author"))))
testLogicalPlanCompile(
sqlE"select {commit.author{:_}: length(commit.author{:_}) ...} from slamengine_commits",
lpf.let('__tmp0, read("slamengine_commits"),
lpf.invoke1(Squash, lpf.invoke2(UnshiftMap, inner, lpf.invoke1(Length, inner)))))
}
"compile map shift / unshift keys" in {
val inner = lpf.invoke1(ShiftMapKeys, lpf.invoke2(MapProject, lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("commit"))), lpf.constant(Data.Str("author"))))
testLogicalPlanCompile(
sqlE"select {commit.author{_:}: length(commit.author{_:})...} from slamengine_commits",
lpf.let('__tmp0, read("slamengine_commits"),
lpf.invoke1(Squash, lpf.invoke2(UnshiftMap, inner, lpf.invoke1(Length, inner)))))
}
"compile array shift / unshift" in {
testLogicalPlanCompile(
sqlE"select [loc[:_] / 10 ...] from zips",
lpf.invoke1(Squash,
lpf.invoke1(UnshiftArray,
lpf.invoke2(Divide,
lpf.invoke1(ShiftArray, lpf.invoke2(MapProject, read("zips"), lpf.constant(Data.Str("loc")))),
lpf.constant(Data.Int(10))))))
}
"compile array shift / unshift indices" in {
testLogicalPlanCompile(
sqlE"select [loc[_:] * 10 ...] from zips",
lpf.invoke1(Squash,
lpf.invoke1(UnshiftArray,
lpf.invoke2(Multiply,
lpf.invoke1(ShiftArrayIndices, lpf.invoke2(MapProject, read("zips"), lpf.constant(Data.Str("loc")))),
lpf.constant(Data.Int(10))))))
}
"compile array flatten" in {
testLogicalPlanCompile(
sqlE"select loc[:*] from zips",
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("loc")),
lpf.invoke1(FlattenArray,
lpf.invoke2(MapProject,
read("zips"),
lpf.constant(Data.Str("loc")))))))
}
"compile simple order by" in {
testLogicalPlanCompile(
sqlE"select name from person order by height",
lpf.let('__tmp0, read("person"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("name"))),
"__sd__0" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("height"))))),
lpf.invoke2(DeleteKey,
lpf.sort(
lpf.free('__tmp1),
(lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("__sd__0"))), SortDir.asc).wrapNel),
lpf.constant(Data.Str("__sd__0"))))))
}
"compile order by reusing selected field" in {
testLogicalPlanCompile(
sqlE"select name from person order by name",
lpf.let('__tmp0,
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("name")),
lpf.invoke2(
MapProject,
read("person"),
lpf.constant(Data.Str("name"))))),
lpf.sort(
lpf.free('__tmp0),
(lpf.invoke2(MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("name"))), SortDir.asc).wrapNel)))
}
"compile order by reusing selected flattened field" in {
testLogicalPlanCompile(
sqlE"select quux[*] from foo order by quux[*]",
lpf.let('__tmp0,
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("quux")),
lpf.invoke1(
FlattenArray,
lpf.invoke2(MapProject,
read("foo"),
lpf.constant(Data.Str("quux")))))),
lpf.sort(
lpf.free('__tmp0),
(lpf.invoke2(
MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("quux"))), SortDir.asc).wrapNel)))
}
"compile simple order by with filter" in {
testLogicalPlanCompile(
sqlE"""select name from person where gender = "male" order by name, height""",
lpf.let('__tmp0, read("person"),
lpf.let('__tmp1,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("gender"))),
lpf.constant(Data.Str("male")))),
lpf.let('__tmp2,
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("name"))),
"__sd__0" -> lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("height"))))),
lpf.invoke2(DeleteKey,
lpf.sort(
lpf.free('__tmp2),
NonEmptyList(
(lpf.invoke2(MapProject, lpf.free('__tmp2), lpf.constant(Data.Str("name"))), SortDir.asc),
(lpf.invoke2(MapProject, lpf.free('__tmp2), lpf.constant(Data.Str("__sd__0"))), SortDir.asc))),
lpf.constant(Data.Str("__sd__0")))))))
}
"compile simple order by with wildcard" in {
testLogicalPlanCompile(
sqlE"select * from person order by height",
lpf.let('__tmp0, lpf.invoke1(Squash, read("person")),
lpf.sort(
lpf.free('__tmp0),
(lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("height"))), SortDir.asc).wrapNel)))
}
"compile simple order by with ascending and descending" in {
testLogicalPlanCompile(
sqlE"select * from person order by height desc, name",
lpf.let('__tmp0, lpf.invoke1(Squash, read("person")),
lpf.sort(
lpf.free('__tmp0),
NonEmptyList(
(lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("height"))), SortDir.desc),
(lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("name"))), SortDir.asc)))))
}
"compile simple order by with expression" in {
testLogicalPlanCompile(
sqlE"select * from person order by height*2.54",
lpf.let('__tmp0, read("person"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
lpf.invoke2(MapConcat,
lpf.free('__tmp0),
makeObj(
"__sd__0" -> lpf.invoke2(Multiply,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("height"))),
lpf.constant(Data.Dec(2.54)))))),
lpf.invoke2(DeleteKey,
lpf.sort(
lpf.free('__tmp1),
(lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("__sd__0"))), SortDir.asc).wrapNel),
lpf.constant(Data.Str("__sd__0"))))))
}
"compile order by with alias" in {
testLogicalPlanCompile(
sqlE"select firstName as name from person order by name",
lpf.let('__tmp0,
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, read("person"), lpf.constant(Data.Str("firstName"))))),
lpf.sort(
lpf.free('__tmp0),
(lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("name"))), SortDir.asc).wrapNel)))
}
"compile simple order by with expression in synthetic field" in {
testLogicalPlanCompile(
sqlE"select name from person order by height*2.54",
lpf.let('__tmp0, read("person"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("name"))),
"__sd__0" ->
lpf.invoke2(Multiply,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("height"))),
lpf.constant(Data.Dec(2.54))))),
lpf.invoke2(DeleteKey,
lpf.sort(
lpf.free('__tmp1),
(lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("__sd__0"))), SortDir.asc).wrapNel),
lpf.constant(Data.Str("__sd__0"))))))
}
"compile order by with nested projection" in {
testLogicalPlanCompile(
sqlE"select bar from foo order by foo.bar.baz.quux/3",
lpf.let('__tmp0, read("foo"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("bar"))),
"__sd__0" -> lpf.invoke2(Divide,
lpf.invoke2(MapProject,
lpf.invoke2(MapProject,
lpf.invoke2(MapProject, lpf.free('__tmp0),
lpf.constant(Data.Str("bar"))),
lpf.constant(Data.Str("baz"))),
lpf.constant(Data.Str("quux"))),
lpf.constant(Data.Int(3))))),
lpf.invoke2(DeleteKey,
lpf.sort(
lpf.free('__tmp1),
(lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("__sd__0"))), SortDir.asc).wrapNel),
lpf.constant(Data.Str("__sd__0"))))))
}
"compile order by with root projection a table ref" in {
// Note: not using wildcard here because the simple case is optimized
// differently
testLogicalPlanCompile(
sqlE"select foo from bar order by bar.baz",
compileExp(sqlE"select foo from bar order by baz"))
}
"compile order by with root projection a table ref with alias" in {
// Note: not using wildcard here because the simple case is optimized
// differently
testLogicalPlanCompile(
sqlE"select foo from bar as b order by b.baz",
compileExp(sqlE"select foo from bar as b order by baz"))
}
"compile order by with root projection a table ref with alias, mismatched" in {
testLogicalPlanCompile(
sqlE"select * from bar as b order by bar.baz",
compileExp(sqlE"select * from bar as b order by b.bar.baz"))
}
"compile order by with root projection a table ref, embedded in expr" in {
testLogicalPlanCompile(
sqlE"select * from bar order by bar.baz/10",
compileExp(sqlE"select * from bar order by baz/10"))
}
"compile order by with root projection a table ref, embedded in complex expr" in {
testLogicalPlanCompile(
sqlE"select * from bar order by bar.baz/10 - 3*bar.quux",
compileExp(sqlE"select * from bar order by baz/10 - 3*quux"))
}
"compile multiple stages" in {
// val sql =
// sqlE"""select height*2.54 as cm
// from person
// where height > 60
// group by gender, height
// having count(*) > 10
// order by cm
// offset 10
// limit 5"""
// this is required while the test is pending because we require valid sql to be passed
val sql = sqlE"select * from zips"
testLogicalPlanCompile(sql,
lpf.let('__tmp0, read("person"), // from person
lpf.let('__tmp1, // where height > 60
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke2(Gt,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("height"))),
lpf.constant(Data.Int(60)))),
lpf.let('__tmp2, // group by gender, height
lpf.invoke2(GroupBy,
lpf.free('__tmp1),
MakeArrayN[Fix[LP]](
lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("gender"))),
lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("height")))).embed),
lpf.let('__tmp3,
lpf.invoke1(Squash, // select height*2.54 as cm
makeObj(
"cm" ->
lpf.invoke2(Multiply,
lpf.invoke1(Arbitrary,
lpf.invoke2(MapProject,
lpf.invoke2(Filter, // having count(*) > 10
lpf.free('__tmp2),
lpf.invoke2(Gt, lpf.invoke1(Count, lpf.free('__tmp2)), lpf.constant(Data.Int(10)))),
lpf.constant(Data.Str("height")))),
lpf.constant(Data.Dec(2.54))))),
lpf.invoke2(Take,
lpf.invoke2(Drop,
lpf.sort( // order by cm
lpf.free('__tmp3),
(lpf.invoke2(MapProject, lpf.free('__tmp3), lpf.constant(Data.Str("cm"))), SortDir.asc).wrapNel),
lpf.constant(Data.Int(10))), // offset 10
lpf.constant(Data.Int(5)))))))) // limit 5
}.pendingUntilFixed("reported in issue qz-3686")
"compile simple sum" in {
testLogicalPlanCompile(
sqlE"select sum(height) from person",
lpf.invoke1(Squash,
lpf.invoke1(Sum, lpf.invoke2(MapProject, read("person"), lpf.constant(Data.Str("height"))))))
}
"compile simple inner equi-join" in {
val query =
sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id"
testLogicalPlanCompile(query,
lpf.let('__tmp0,
lpf.join(
read("foo"),
read("bar"),
JoinType.Inner,
JoinCondition('__leftJoin9, '__rightJoin10,
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin9), lpf.constant(Data.Str("id"))),
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin10), lpf.constant(Data.Str("foo_id")))))),
lpf.invoke1(Squash,
makeObj(
"name" ->
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("name"))),
"address" ->
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("address")))))))
}
"compile cross join to the equivalent inner equi-join" in {
val query = sqlE"select foo.name, bar.address from foo, bar where foo.id = bar.foo_id"
val equiv = sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id"
val expected = renameJoinSides(compileExp(equiv))(
'__leftJoin9, '__leftJoin23, '__rightJoin10, '__rightJoin24)
testLogicalPlanCompile(query, expected)
}
"compile inner join with additional equi-condition to the equivalent inner equi-join" in {
val query = sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id where foo.x = bar.y"
val equiv = sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id and foo.x = bar.y"
val expected = renameJoinSides(compileExp(equiv))(
'__leftJoin9, '__leftJoin23, '__rightJoin10, '__rightJoin24)
testLogicalPlanCompile(query, expected)
}
"compile inner non-equi join to the equivalent cross join" in {
val query = sqlE"select foo.name, bar.address from foo join bar on foo.x < bar.y"
val equiv = sqlE"select foo.name, bar.address from foo, bar where foo.x < bar.y"
val expected = renameJoinSides(compileExp(equiv))(
'__leftJoin23, '__leftJoin9, '__rightJoin24, '__rightJoin10)
testLogicalPlanCompile(query, expected)
}
"compile nested cross join to the equivalent inner equi-join" in {
val query = sqlE"select a.x, b.y, c.z from a, b, c where a.id = b.a_id and b.`_id` = c.b_id"
val equiv = sqlE"select a.x, b.y, c.z from (a join b on a.id = b.a_id) join c on b.`_id` = c.b_id"
testLogicalPlanCompile(query, compileExp(equiv))
}.pendingUntilFixed("SD-1190 (should these really be identical as of #1943?)")
"compile filtered cross join with one-sided conditions" in {
val query =
sqlE"select foo.name, bar.address from foo, bar where foo.id = bar.foo_id and foo.x < 10 and bar.y = 20"
val equiv =
sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id where foo.x < 10 and bar.y = 20"
testLogicalPlanCompile(query, compileExp(equiv))
}
"compile filtered join with one-sided conditions" in {
val query =
sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id where foo.x < 10 and bar.y = 20"
testLogicalPlanCompile(query,
lpf.let('__tmp0, read("foo"),
lpf.let('__tmp1, read("bar"),
lpf.let('__tmp2,
lpf.join(
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke2(Lt,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("x"))),
lpf.constant(Data.Int(10)))),
lpf.invoke2(Filter,
lpf.free('__tmp1),
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("y"))),
lpf.constant(Data.Int(20)))),
JoinType.Inner,
JoinCondition('__leftJoin23, '__rightJoin24,
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.joinSideName('__leftJoin23), lpf.constant(Data.Str("id"))),
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin24), lpf.constant(Data.Str("foo_id")))))),
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp2)),
lpf.constant(Data.Str("name"))),
"address" -> lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp2)),
lpf.constant(Data.Str("address")))))))))
}
"compile simple left ineq-join" in {
testLogicalPlanCompile(
sqlE"select foo.name, bar.address from foo left join bar on foo.id < bar.foo_id",
lpf.let('__tmp0,
lpf.join(
read("foo"),
read("bar"),
JoinType.LeftOuter,
JoinCondition('left1, 'right2,
lpf.invoke2(Lt,
lpf.invoke2(MapProject, lpf.joinSideName('left1), lpf.constant(Data.Str("id"))),
lpf.invoke2(MapProject, lpf.joinSideName('right2), lpf.constant(Data.Str("foo_id")))))),
lpf.invoke1(Squash,
makeObj(
"name" ->
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("name"))),
"address" ->
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.free('__tmp0)),
lpf.constant(Data.Str("address")))))))
}
"compile complex equi-join" in {
testLogicalPlanCompile(
sqlE"select foo.name, bar.address from foo join bar on foo.id = bar.foo_id join baz on baz.bar_id = bar.id",
lpf.let('__tmp0,
lpf.join(
lpf.join(
read("foo"),
read("bar"),
JoinType.Inner,
JoinCondition('left3, 'right4,
lpf.invoke2(Eq,
lpf.invoke2(MapProject,
lpf.joinSideName('left3),
lpf.constant(Data.Str("id"))),
lpf.invoke2(MapProject,
lpf.joinSideName('right4),
lpf.constant(Data.Str("foo_id")))))),
read("baz"),
JoinType.Inner,
JoinCondition('__leftJoin23, '__rightJoin24,
lpf.invoke2(Eq,
lpf.invoke2(MapProject, lpf.joinSideName('__rightJoin24),
lpf.constant(Data.Str("bar_id"))),
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(lpf.joinSideName('__leftJoin23)),
lpf.constant(Data.Str("id")))))),
lpf.invoke1(Squash,
makeObj(
"name" ->
lpf.invoke2(MapProject,
JoinDir.Left.projectFrom(JoinDir.Left.projectFrom(lpf.free('__tmp0))),
lpf.constant(Data.Str("name"))),
"address" ->
lpf.invoke2(MapProject,
JoinDir.Right.projectFrom(JoinDir.Left.projectFrom(lpf.free('__tmp0))),
lpf.constant(Data.Str("address")))))))
}
"compile sub-select in filter" in {
testLogicalPlanCompile(
sqlE"select city, pop from zips where pop > (select avg(pop) from zips)",
read("zips"))
}.pendingUntilFixed
"compile simple sub-select" in {
testLogicalPlanCompile(
sqlE"select temp.name, temp.size from (select zips.city as name, zips.pop as size from zips) as temp",
lpf.let('__tmp0, read("zips"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("city"))),
"size" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("pop"))))),
lpf.invoke1(Squash,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("name"))),
"size" -> lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("size"))))))))
}
"compile sub-select with same un-qualified names" in {
testLogicalPlanCompile(
sqlE"select city, pop from (select city, pop from zips) as temp",
lpf.let('__tmp0, read("zips"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("city"))),
"pop" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("pop"))))),
lpf.invoke1(Squash,
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("city"))),
"pop" -> lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("pop"))))))))
}
"compile simple distinct" in {
testLogicalPlanCompile(
sqlE"select distinct city from zips",
lpf.let(
'__tmp0,
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("city")),
lpf.invoke2(
MapProject,
read("zips"),
lpf.constant(Data.Str("city"))))),
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp0), lpf.free('__tmp0)))))
}
"compile simple distinct ordered" in {
testLogicalPlanCompile(
sqlE"select distinct city from zips order by city",
lpf.let(
'__tmp0,
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("city")),
lpf.invoke2(MapProject, read("zips"), lpf.constant(Data.Str("city"))))),
lpf.let(
'__tmp1,
lpf.sort(
lpf.free('__tmp0),
(lpf.invoke2(
MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("city"))), SortDir.asc).wrapNel),
lpf.sort(
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp1), lpf.free('__tmp1))),
(lpf.invoke2(
MapProject,
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp1), lpf.free('__tmp1))),
lpf.constant(Data.Str("city"))), SortDir.asc).wrapNel))))
}
"compile distinct with unrelated order by" in {
testLogicalPlanCompile(
sqlE"select distinct city from zips order by pop desc",
lpf.let('__tmp0,
read("zips"),
lpf.let('__tmp1,
lpf.invoke1(Squash,
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("city"))),
"__sd__0" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("pop"))))),
lpf.let('__tmp2,
lpf.sort(
lpf.free('__tmp1),
(lpf.invoke2(MapProject, lpf.free('__tmp1), lpf.constant(Data.Str("__sd__0"))), SortDir.desc).wrapNel),
lpf.invoke2(DeleteKey,
lpf.sort(
lpf.invoke1(First,
lpf.invoke2(GroupBy,
lpf.free('__tmp2),
lpf.invoke2(DeleteKey,
lpf.free('__tmp2),
lpf.constant(Data.Str("__sd__0"))))),
(lpf.invoke2(MapProject, lpf.invoke1(First,
lpf.invoke2(GroupBy,
lpf.free('__tmp2),
lpf.invoke2(DeleteKey,
lpf.free('__tmp2),
lpf.constant(Data.Str("__sd__0"))))), lpf.constant(Data.Str("__sd__0"))), SortDir.desc).wrapNel),
lpf.constant(Data.Str("__sd__0")))))))
}
"compile count(distinct(...))" in {
testLogicalPlanCompile(
sqlE"select count(distinct(lower(city))) from zips",
lpf.let(
'__tmp0,
lpf.invoke1(Lower,
lpf.invoke2(MapProject, read("zips"), lpf.constant(Data.Str("city")))),
lpf.invoke1(Squash,
lpf.invoke1(Count,
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp0), lpf.free('__tmp0)))))))
}
"compile simple distinct with two named projections" in {
testLogicalPlanCompile(
sqlE"select distinct city as CTY, state as ST from zips",
lpf.let('__tmp0, read("zips"),
lpf.let(
'__tmp1,
lpf.invoke1(Squash,
makeObj(
"CTY" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("city"))),
"ST" -> lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("state"))))),
lpf.invoke1(Arbitrary,
lpf.invoke2(GroupBy, lpf.free('__tmp1), lpf.free('__tmp1))))))
}
"compile count distinct with two exprs" in {
testLogicalPlanCompile(
sqlE"select count(distinct city, state) from zips",
read("zips"))
}.pendingUntilFixed
"compile distinct as function" in {
testLogicalPlanCompile(
sqlE"select distinct(city, state) from zips",
read("zips"))
}.pendingUntilFixed
"fail with ambiguous reference" in {
compile(sqlE"select foo from bar, baz") must be_-\\/(
NonEmptyList(
SemanticError.AmbiguousReference(
ident[Fix[Sql]]("foo").embed,
List(
TableRelationAST(currentDir </> file("baz"),None),
TableRelationAST(currentDir </> file("bar"),None)))))
}
"fail with ambiguous reference in cond" in {
compile(sqlE"""select (case when a = 1 then "ok" else "reject" end) from bar, baz""") must be_-\\/
}
"fail with ambiguous reference in else" in {
compile(sqlE"""select (case when bar.a = 1 then "ok" else foo end) from bar, baz""") must be_-\\/
}
"fail with duplicate alias" in {
compile(sqlE"select car.name as name, owner.name as name from owners as owner join cars as car on car.`_id` = owner.carId") must_===
SemanticError.duplicateAlias("name").wrapNel.left
}
"translate free variable" in {
testLogicalPlanCompile(sqlE"select name from zips where age < :age",
lpf.let('__tmp0, read("zips"),
lpf.invoke1(Squash,
lpf.invoke2(MakeMap,
lpf.constant(Data.Str("name")),
lpf.invoke2(MapProject,
lpf.invoke2(Filter,
lpf.free('__tmp0),
lpf.invoke2(Lt,
lpf.invoke2(MapProject, lpf.free('__tmp0), lpf.constant(Data.Str("age"))),
lpf.free('age))),
lpf.constant(Data.Str("name")))))))
}
}
"error when too few arguments passed to a function" in {
fullCompile(sqlE"""select substring("foo") from zips""") must_===
wrongArgumentCount(CIName("substring"), 3, 1).wrapNel.left
}
"error when too many arguments passed to a function" in {
fullCompile(sqlE"select count(*, 1, 2, 4) from zips") must_===
wrongArgumentCount(CIName("count"), 1, 4).wrapNel.left
}
"reduceGroupKeys" should {
import Compiler.reduceGroupKeys
"insert ARBITRARY" in {
val lp =
lpf.let('tmp0, read("zips"),
lpf.let('tmp1,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("city")))).embed),
lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("city")))))
val exp =
lpf.let('tmp0, read("zips"),
lpf.invoke1(Arbitrary,
lpf.invoke2(MapProject,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("city")))).embed), lpf.constant(Data.Str("city")))))
reduceGroupKeys(lp) must equalToPlan(exp)
}
"insert ARBITRARY with intervening filter" in {
val lp =
lpf.let('tmp0, read("zips"),
lpf.let('tmp1,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("city")))).embed),
lpf.let('tmp2,
lpf.invoke2(Filter, lpf.free('tmp1), lpf.invoke2(Gt, lpf.invoke1(Count, lpf.free('tmp1)), lpf.constant(Data.Int(10)))),
lpf.invoke2(MapProject, lpf.free('tmp2), lpf.constant(Data.Str("city"))))))
val exp =
lpf.let('tmp0, read("zips"),
lpf.let('tmp1,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("city")))).embed),
lpf.invoke1(Arbitrary,
lpf.invoke2(MapProject,
lpf.invoke2(Filter,
lpf.free('tmp1),
lpf.invoke2(Gt, lpf.invoke1(Count, lpf.free('tmp1)), lpf.constant(Data.Int(10)))),
lpf.constant(Data.Str("city"))))))
reduceGroupKeys(lp) must equalToPlan(exp)
}.pendingUntilFixed("reported in issue qz-3686")
"not insert redundant Reduction" in {
val lp =
lpf.let('tmp0, read("zips"),
lpf.invoke1(Count,
lpf.invoke2(MapProject,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](lpf.invoke2(MapProject, lpf.free('tmp0),
lpf.constant(Data.Str("city")))).embed), lpf.constant(Data.Str("city")))))
reduceGroupKeys(lp) must equalToPlan(lp)
}
"insert ARBITRARY with multiple keys and mixed projections" in {
val lp =
lpf.let('tmp0,
read("zips"),
lpf.let('tmp1,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](
lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("city"))),
lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("state")))).embed),
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("city"))),
"1" -> lpf.invoke1(Count, lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("state")))),
"loc" -> lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("loc"))),
"2" -> lpf.invoke1(Sum, lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("pop")))))))
val exp =
lpf.let('tmp0,
read("zips"),
lpf.let('tmp1,
lpf.invoke2(GroupBy,
lpf.free('tmp0),
MakeArrayN[Fix[LP]](
lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("city"))),
lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("state")))).embed),
makeObj(
"city" -> lpf.invoke1(Arbitrary, lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("city")))),
"1" -> lpf.invoke1(Count, lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("state")))),
"loc" -> lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("loc"))),
"2" -> lpf.invoke1(Sum, lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("pop")))))))
reduceGroupKeys(lp) must equalToPlan(exp)
}
}
"inferred names" should {
def inferred(s: String): ProjectionName =
Inferred(s).right
def namesOf(projections: List[Proj[Fix[Sql]]]): List[ProjectionName] =
projectionNames(projections, None).toOption.toList.join.firsts
"create unique names" >> {
"when two fields have the same name" in {
val query = sqlE"SELECT owner.name, car.name from owners as owner join cars as car on car.`_id` = owner.carId"
val projections = query.project.asInstanceOf[Select[Fix[Sql]]].projections
namesOf(projections) must_=== List(inferred("name"), inferred("name0"))
}
"when multiple flattened fields have the same name" in {
val query = sqlE"SELECT foo{:*}, foo{*:}, foo[:*], foo[*:] from bar"
val projections = query.project.asInstanceOf[Select[Fix[Sql]]].projections
namesOf(projections) must_=== List(inferred("foo"), inferred("foo0"), inferred("foo1"), inferred("foo2"))
}
"when a field and an alias have the same name" in {
val query = sqlE"SELECT owner.name, car.model as name from owners as owner join cars as car on car.`_id` = owner.carId"
val projections = query.project.asInstanceOf[Select[Fix[Sql]]].projections
namesOf(projections) must_=== List(inferred("name0"), inferred("name"))
}
}
}
}
| quasar-analytics/quasar | core/src/test/scala/quasar/compile/CompilerSpec.scala | Scala | apache-2.0 | 72,664 |
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* https://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator.schema.v1_0
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import uk.gov.nationalarchives.csv.validator.metadata.{Cell, Row}
import uk.gov.nationalarchives.csv.validator.schema._
import scalaz.{Success, Failure, IList}
@RunWith(classOf[JUnitRunner])
class OrRuleSpec extends Specification {
"OrRule" should {
"succeed when left rule validates" in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Country"))))
val leftInRule = InRule(Literal(Some("Germany")))
val rightInRule = InRule(Literal(Some("France")))
val orRule = OrRule(leftInRule, rightInRule)
orRule.evaluate(0, Row(List(Cell("Germany")), 1), schema) mustEqual Success(true)
}
"succeed when right rule validates" in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Country"))))
val leftInRule = InRule(Literal(Some("Germany")))
val rightInRule = InRule(Literal(Some("France")))
val orRule = OrRule(leftInRule, rightInRule)
orRule.evaluate(0, Row(List(Cell("France")), 1), schema) mustEqual Success(true)
}
"fail when left/right rules are invalid" in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("ThisOrThat"))))
val leftInRule = InRule(Literal(Some("This")))
val rightInRule = InRule(Literal(Some("That")))
val orRule = OrRule(leftInRule, rightInRule)
orRule.evaluate(0, Row(List(Cell("SomethingElse")), 1), schema) must beLike {
case Failure(messages) => messages.list mustEqual IList("""in("This") or in("That") fails for line: 1, column: ThisOrThat, value: "SomethingElse"""")
}
}
"fail when left cross reference rule is invalid and right rule is invalid" in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Country"))))
val leftInRule = InRule(ColumnReference(NamedColumnIdentifier("ConfigurableCountry")))
val rightInRule = InRule(Literal(Some("France")))
val orRule = OrRule(leftInRule, rightInRule)
orRule.evaluate(0, Row(List(Cell("UK")), 1), schema) must throwA[NoSuchElementException]
}
"succeed when 3 'or' rules valid for right rule" in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Direction"))))
val leftInRule = InRule(Literal(Some("left")))
val middleInRule = InRule(Literal(Some("middle")))
val rightInRule = InRule(Literal(Some("right")))
val orRule = OrRule( OrRule(leftInRule, middleInRule), rightInRule )
orRule.evaluate(0, Row(List(Cell("right")), 1), schema) mustEqual Success(true)
}
"succeed when 3 'or' rules valid for left/middle rule" in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Direction"))))
val leftInRule = InRule(Literal(Some("left")))
val middleInRule = InRule(Literal(Some("middle")))
val rightInRule = InRule(Literal(Some("right")))
val orRule = OrRule( OrRule(leftInRule, middleInRule), rightInRule )
orRule.evaluate(0, Row(List(Cell("middle")), 1), schema) mustEqual Success(true)
}
"fail when all 3 'or' rules are invalid " in {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Direction"))))
val leftInRule = InRule(Literal(Some("left")))
val middleInRule = InRule(Literal(Some("middle")))
val rightInRule = InRule(Literal(Some("right")))
val orRule = OrRule( OrRule(leftInRule, middleInRule), rightInRule )
orRule.evaluate(0, Row(List(Cell("up")), 1), schema) must beLike {
case Failure(messages) => messages.list mustEqual IList("""in("left") or in("middle") or in("right") fails for line: 1, column: Direction, value: "up"""")
}
}
}
} | adamretter/csv-validator | csv-validator-core/src/test/scala/uk/gov/nationalarchives/csv/validator/schema/v1_0/OrRuleSpec.scala | Scala | mpl-2.0 | 4,660 |
package cats.examples.typeclasses.functors
import cats._
import cats.data.Nested
import language.postfixOps
/**
* Apply extends the Functor type class with a new function, ap.
*
* ap is similar to map, in that we are transforming a value within a context
* such as Option, List, i.e. the F in F[A].
*
* ap differs from map in the type of transformation, which is F[A => B]
* instead of A => B for map.
*
* See http://typelevel.org/cats/typeclasses/apply.html
*/
object ApplyExample extends App {
// Some simple functions
val intToString: Int => String = _.toString
val double: Int => Int = _ * 2
val addTwo: Int => Int = _ + 2
// Apply provides ap and map methods which must be implemented.
implicit val optionApply: Apply[Option] = new Apply[Option] {
def ap[A, B](f: Option[A => B])(fa: Option[A]): Option[B] =
fa.flatMap (a => f.map (ff => ff(a)))
def map[A,B](fa: Option[A])(f: A => B): Option[B] = fa map f
}
implicit val listApply: Apply[List] = new Apply[List] {
def ap[A, B](f: List[A => B])(fa: List[A]): List[B] =
fa.flatMap (a => f.map (ff => ff(a)))
def map[A,B](fa: List[A])(f: A => B): List[B] = fa map f
}
// Apply extends Functor so we can map.
assert(Apply[Option].map(Some(1))(intToString) contains "1" )
assert(Apply[Option].map(Some(1))(double) contains 2 )
assert(Apply[Option].map(None)(double) isEmpty)
// Like Functors, Apply instances also compose, via the Nested type.
val listOption = Nested[List, Option, Int](List(Some(1), None, Some(3)))
val plusOne = (x: Int) => x + 1
val f = Nested[List, Option, Int => Int](List(Some(plusOne)))
assert(Apply[Nested[List, Option, ?]].ap(f)(listOption)
== Nested[List, Option, Int](List(Some(2), None, Some(4))))
// In addition to map from Functor, Apply provides the ap function.
// Compare the ap invocation below with that of map above.
// This highlights the difference in types between ap and map.
// ap describes a transformation of F[A => B] hence below, we define
// Some(function) which is applied to an option.
assert(Apply[Option].ap(Some(intToString))(Some(1)) contains "1")
assert(Apply[Option].ap(Some(double))(Some(1)) contains 2)
assert(Apply[Option].ap(Some(double))(None) isEmpty)
assert(Apply[Option].ap(None)(Some(1)) isEmpty)
assert(Apply[Option].ap(None)(None) isEmpty)
// Apply also provides variants of ap, ap2 to ap22 that support additional
// arguments. Note that ap just supports a single argument.
val addArity2 = (a: Int, b: Int) => a + b
assert(Apply[Option].ap2(Some(addArity2))(Some(1), Some(2)) contains 3)
val addArity3 = (a: Int, b: Int, c: Int) => a + b + c
assert(Apply[Option].ap3(Some(addArity3))(Some(1), Some(2), Some(3)) contains 6)
// ap22 left as an exercise for the reader...
// Note that if any of the arguments in the examples above are None, the
// result of the computation will be None too. The effects of the context we
// are operating on are applied to the entire computiation.
assert(Apply[Option].ap2(Some(addArity2))(Some(1), None) isEmpty)
assert(Apply[Option].ap2(None)(Some(1), Some(2)) isEmpty)
// Similarly map2..22 methods are provided...
assert(Apply[Option].map2(Some(1), Some(2))(addArity2) contains 3)
assert(Apply[Option].map3(Some(1), Some(2), Some(3))(addArity3) contains 6)
// ...along with tupleN.
assert(Apply[Option].tuple2(Some(1), Some(2)) contains (1,2))
assert(Apply[Option].tuple3(Some(1), Some(2), Some(3)) contains (1,2,3))
}
| carwynellis/cats-examples | src/main/scala/cats/examples/typeclasses/functors/ApplyExample.scala | Scala | mit | 3,547 |
package doobie.free
import scalaz.{ Catchable, Coyoneda, Free => F, Kleisli, Monad, ~>, \\/ }
import scalaz.concurrent.Task
import doobie.util.capture._
import java.lang.Class
import java.lang.Object
import java.lang.String
import java.sql.Blob
import java.sql.CallableStatement
import java.sql.Clob
import java.sql.Connection
import java.sql.DatabaseMetaData
import java.sql.Driver
import java.sql.NClob
import java.sql.PreparedStatement
import java.sql.Ref
import java.sql.ResultSet
import java.sql.SQLData
import java.sql.SQLInput
import java.sql.SQLOutput
import java.sql.SQLWarning
import java.sql.Statement
import nclob.NClobIO
import blob.BlobIO
import clob.ClobIO
import databasemetadata.DatabaseMetaDataIO
import driver.DriverIO
import ref.RefIO
import sqldata.SQLDataIO
import sqlinput.SQLInputIO
import sqloutput.SQLOutputIO
import connection.ConnectionIO
import statement.StatementIO
import preparedstatement.PreparedStatementIO
import callablestatement.CallableStatementIO
import resultset.ResultSetIO
/**
* Algebra and free monad for primitive operations over a `java.sql.Statement`. This is
* a low-level API that exposes lifecycle-managed JDBC objects directly and is intended mainly
* for library developers. End users will prefer a safer, higher-level API such as that provided
* in the `doobie.hi` package.
*
* `StatementIO` is a free monad that must be run via an interpreter, most commonly via
* natural transformation of its underlying algebra `StatementOp` to another monad via
* `Free.runFC`.
*
* The library provides a natural transformation to `Kleisli[M, Statement, A]` for any
* exception-trapping (`Catchable`) and effect-capturing (`Capture`) monad `M`. Such evidence is
* provided for `Task`, `IO`, and stdlib `Future`; and `transK[M]` is provided as syntax.
*
* {{{
* // An action to run
* val a: StatementIO[Foo] = ...
*
* // A JDBC object
* val s: Statement = ...
*
* // Unfolding into a Task
* val ta: Task[A] = a.transK[Task].run(s)
* }}}
*
* @group Modules
*/
object statement {
/**
* Sum type of primitive operations over a `java.sql.Statement`.
* @group Algebra
*/
sealed trait StatementOp[A] {
protected def primitive[M[_]: Monad: Capture](f: Statement => A): Kleisli[M, Statement, A] =
Kleisli((s: Statement) => Capture[M].apply(f(s)))
def defaultTransK[M[_]: Monad: Catchable: Capture]: Kleisli[M, Statement, A]
}
/**
* Module of constructors for `StatementOp`. These are rarely useful outside of the implementation;
* prefer the smart constructors provided by the `statement` module.
* @group Algebra
*/
object StatementOp {
// Lifting
case class LiftBlobIO[A](s: Blob, action: BlobIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftCallableStatementIO[A](s: CallableStatement, action: CallableStatementIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftClobIO[A](s: Clob, action: ClobIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftConnectionIO[A](s: Connection, action: ConnectionIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftDatabaseMetaDataIO[A](s: DatabaseMetaData, action: DatabaseMetaDataIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftDriverIO[A](s: Driver, action: DriverIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftNClobIO[A](s: NClob, action: NClobIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftPreparedStatementIO[A](s: PreparedStatement, action: PreparedStatementIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftRefIO[A](s: Ref, action: RefIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftResultSetIO[A](s: ResultSet, action: ResultSetIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftSQLDataIO[A](s: SQLData, action: SQLDataIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftSQLInputIO[A](s: SQLInput, action: SQLInputIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
case class LiftSQLOutputIO[A](s: SQLOutput, action: SQLOutputIO[A]) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = Kleisli(_ => action.transK[M].run(s))
}
// Combinators
case class Attempt[A](action: StatementIO[A]) extends StatementOp[Throwable \\/ A] {
import scalaz._, Scalaz._
def defaultTransK[M[_]: Monad: Catchable: Capture] =
Predef.implicitly[Catchable[Kleisli[M, Statement, ?]]].attempt(action.transK[M])
}
case class Pure[A](a: () => A) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_ => a())
}
case class Raw[A](f: Statement => A) extends StatementOp[A] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(f)
}
// Primitive Operations
case class AddBatch(a: String) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.addBatch(a))
}
case object Cancel extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.cancel())
}
case object ClearBatch extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.clearBatch())
}
case object ClearWarnings extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.clearWarnings())
}
case object Close extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.close())
}
case object CloseOnCompletion extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.closeOnCompletion())
}
case class Execute(a: String) extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.execute(a))
}
case class Execute1(a: String, b: Array[Int]) extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.execute(a, b))
}
case class Execute2(a: String, b: Int) extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.execute(a, b))
}
case class Execute3(a: String, b: Array[String]) extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.execute(a, b))
}
case object ExecuteBatch extends StatementOp[Array[Int]] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.executeBatch())
}
case class ExecuteQuery(a: String) extends StatementOp[ResultSet] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.executeQuery(a))
}
case class ExecuteUpdate(a: String, b: Array[String]) extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.executeUpdate(a, b))
}
case class ExecuteUpdate1(a: String, b: Int) extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.executeUpdate(a, b))
}
case class ExecuteUpdate2(a: String, b: Array[Int]) extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.executeUpdate(a, b))
}
case class ExecuteUpdate3(a: String) extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.executeUpdate(a))
}
case object GetConnection extends StatementOp[Connection] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getConnection())
}
case object GetFetchDirection extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getFetchDirection())
}
case object GetFetchSize extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getFetchSize())
}
case object GetGeneratedKeys extends StatementOp[ResultSet] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getGeneratedKeys())
}
case object GetMaxFieldSize extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getMaxFieldSize())
}
case object GetMaxRows extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getMaxRows())
}
case object GetMoreResults extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getMoreResults())
}
case class GetMoreResults1(a: Int) extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getMoreResults(a))
}
case object GetQueryTimeout extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getQueryTimeout())
}
case object GetResultSet extends StatementOp[ResultSet] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getResultSet())
}
case object GetResultSetConcurrency extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getResultSetConcurrency())
}
case object GetResultSetHoldability extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getResultSetHoldability())
}
case object GetResultSetType extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getResultSetType())
}
case object GetUpdateCount extends StatementOp[Int] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getUpdateCount())
}
case object GetWarnings extends StatementOp[SQLWarning] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.getWarnings())
}
case object IsCloseOnCompletion extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.isCloseOnCompletion())
}
case object IsClosed extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.isClosed())
}
case object IsPoolable extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.isPoolable())
}
case class IsWrapperFor(a: Class[_]) extends StatementOp[Boolean] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.isWrapperFor(a))
}
case class SetCursorName(a: String) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setCursorName(a))
}
case class SetEscapeProcessing(a: Boolean) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setEscapeProcessing(a))
}
case class SetFetchDirection(a: Int) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setFetchDirection(a))
}
case class SetFetchSize(a: Int) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setFetchSize(a))
}
case class SetMaxFieldSize(a: Int) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setMaxFieldSize(a))
}
case class SetMaxRows(a: Int) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setMaxRows(a))
}
case class SetPoolable(a: Boolean) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setPoolable(a))
}
case class SetQueryTimeout(a: Int) extends StatementOp[Unit] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.setQueryTimeout(a))
}
case class Unwrap[T](a: Class[T]) extends StatementOp[T] {
def defaultTransK[M[_]: Monad: Catchable: Capture] = primitive(_.unwrap(a))
}
}
import StatementOp._ // We use these immediately
/**
* Free monad over a free functor of [[StatementOp]]; abstractly, a computation that consumes
* a `java.sql.Statement` and produces a value of type `A`.
* @group Algebra
*/
type StatementIO[A] = F.FreeC[StatementOp, A]
/**
* Monad instance for [[StatementIO]] (can't be inferred).
* @group Typeclass Instances
*/
implicit val MonadStatementIO: Monad[StatementIO] =
F.freeMonad[({type λ[α] = Coyoneda[StatementOp, α]})#λ]
/**
* Catchable instance for [[StatementIO]].
* @group Typeclass Instances
*/
implicit val CatchableStatementIO: Catchable[StatementIO] =
new Catchable[StatementIO] {
def attempt[A](f: StatementIO[A]): StatementIO[Throwable \\/ A] = statement.attempt(f)
def fail[A](err: Throwable): StatementIO[A] = statement.delay(throw err)
}
/**
* Capture instance for [[StatementIO]].
* @group Typeclass Instances
*/
implicit val CaptureStatementIO: Capture[StatementIO] =
new Capture[StatementIO] {
def apply[A](a: => A): StatementIO[A] = statement.delay(a)
}
/**
* @group Constructors (Lifting)
*/
def liftBlob[A](s: Blob, k: BlobIO[A]): StatementIO[A] =
F.liftFC(LiftBlobIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftCallableStatement[A](s: CallableStatement, k: CallableStatementIO[A]): StatementIO[A] =
F.liftFC(LiftCallableStatementIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftClob[A](s: Clob, k: ClobIO[A]): StatementIO[A] =
F.liftFC(LiftClobIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftConnection[A](s: Connection, k: ConnectionIO[A]): StatementIO[A] =
F.liftFC(LiftConnectionIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftDatabaseMetaData[A](s: DatabaseMetaData, k: DatabaseMetaDataIO[A]): StatementIO[A] =
F.liftFC(LiftDatabaseMetaDataIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftDriver[A](s: Driver, k: DriverIO[A]): StatementIO[A] =
F.liftFC(LiftDriverIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftNClob[A](s: NClob, k: NClobIO[A]): StatementIO[A] =
F.liftFC(LiftNClobIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftPreparedStatement[A](s: PreparedStatement, k: PreparedStatementIO[A]): StatementIO[A] =
F.liftFC(LiftPreparedStatementIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftRef[A](s: Ref, k: RefIO[A]): StatementIO[A] =
F.liftFC(LiftRefIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftResultSet[A](s: ResultSet, k: ResultSetIO[A]): StatementIO[A] =
F.liftFC(LiftResultSetIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftSQLData[A](s: SQLData, k: SQLDataIO[A]): StatementIO[A] =
F.liftFC(LiftSQLDataIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftSQLInput[A](s: SQLInput, k: SQLInputIO[A]): StatementIO[A] =
F.liftFC(LiftSQLInputIO(s, k))
/**
* @group Constructors (Lifting)
*/
def liftSQLOutput[A](s: SQLOutput, k: SQLOutputIO[A]): StatementIO[A] =
F.liftFC(LiftSQLOutputIO(s, k))
/**
* Lift a StatementIO[A] into an exception-capturing StatementIO[Throwable \\/ A].
* @group Constructors (Lifting)
*/
def attempt[A](a: StatementIO[A]): StatementIO[Throwable \\/ A] =
F.liftFC[StatementOp, Throwable \\/ A](Attempt(a))
/**
* Non-strict unit for capturing effects.
* @group Constructors (Lifting)
*/
def delay[A](a: => A): StatementIO[A] =
F.liftFC(Pure(a _))
/**
* Backdoor for arbitrary computations on the underlying Statement.
* @group Constructors (Lifting)
*/
def raw[A](f: Statement => A): StatementIO[A] =
F.liftFC(Raw(f))
/**
* @group Constructors (Primitives)
*/
def addBatch(a: String): StatementIO[Unit] =
F.liftFC(AddBatch(a))
/**
* @group Constructors (Primitives)
*/
val cancel: StatementIO[Unit] =
F.liftFC(Cancel)
/**
* @group Constructors (Primitives)
*/
val clearBatch: StatementIO[Unit] =
F.liftFC(ClearBatch)
/**
* @group Constructors (Primitives)
*/
val clearWarnings: StatementIO[Unit] =
F.liftFC(ClearWarnings)
/**
* @group Constructors (Primitives)
*/
val close: StatementIO[Unit] =
F.liftFC(Close)
/**
* @group Constructors (Primitives)
*/
val closeOnCompletion: StatementIO[Unit] =
F.liftFC(CloseOnCompletion)
/**
* @group Constructors (Primitives)
*/
def execute(a: String): StatementIO[Boolean] =
F.liftFC(Execute(a))
/**
* @group Constructors (Primitives)
*/
def execute(a: String, b: Array[Int]): StatementIO[Boolean] =
F.liftFC(Execute1(a, b))
/**
* @group Constructors (Primitives)
*/
def execute(a: String, b: Int): StatementIO[Boolean] =
F.liftFC(Execute2(a, b))
/**
* @group Constructors (Primitives)
*/
def execute(a: String, b: Array[String]): StatementIO[Boolean] =
F.liftFC(Execute3(a, b))
/**
* @group Constructors (Primitives)
*/
val executeBatch: StatementIO[Array[Int]] =
F.liftFC(ExecuteBatch)
/**
* @group Constructors (Primitives)
*/
def executeQuery(a: String): StatementIO[ResultSet] =
F.liftFC(ExecuteQuery(a))
/**
* @group Constructors (Primitives)
*/
def executeUpdate(a: String, b: Array[String]): StatementIO[Int] =
F.liftFC(ExecuteUpdate(a, b))
/**
* @group Constructors (Primitives)
*/
def executeUpdate(a: String, b: Int): StatementIO[Int] =
F.liftFC(ExecuteUpdate1(a, b))
/**
* @group Constructors (Primitives)
*/
def executeUpdate(a: String, b: Array[Int]): StatementIO[Int] =
F.liftFC(ExecuteUpdate2(a, b))
/**
* @group Constructors (Primitives)
*/
def executeUpdate(a: String): StatementIO[Int] =
F.liftFC(ExecuteUpdate3(a))
/**
* @group Constructors (Primitives)
*/
val getConnection: StatementIO[Connection] =
F.liftFC(GetConnection)
/**
* @group Constructors (Primitives)
*/
val getFetchDirection: StatementIO[Int] =
F.liftFC(GetFetchDirection)
/**
* @group Constructors (Primitives)
*/
val getFetchSize: StatementIO[Int] =
F.liftFC(GetFetchSize)
/**
* @group Constructors (Primitives)
*/
val getGeneratedKeys: StatementIO[ResultSet] =
F.liftFC(GetGeneratedKeys)
/**
* @group Constructors (Primitives)
*/
val getMaxFieldSize: StatementIO[Int] =
F.liftFC(GetMaxFieldSize)
/**
* @group Constructors (Primitives)
*/
val getMaxRows: StatementIO[Int] =
F.liftFC(GetMaxRows)
/**
* @group Constructors (Primitives)
*/
val getMoreResults: StatementIO[Boolean] =
F.liftFC(GetMoreResults)
/**
* @group Constructors (Primitives)
*/
def getMoreResults(a: Int): StatementIO[Boolean] =
F.liftFC(GetMoreResults1(a))
/**
* @group Constructors (Primitives)
*/
val getQueryTimeout: StatementIO[Int] =
F.liftFC(GetQueryTimeout)
/**
* @group Constructors (Primitives)
*/
val getResultSet: StatementIO[ResultSet] =
F.liftFC(GetResultSet)
/**
* @group Constructors (Primitives)
*/
val getResultSetConcurrency: StatementIO[Int] =
F.liftFC(GetResultSetConcurrency)
/**
* @group Constructors (Primitives)
*/
val getResultSetHoldability: StatementIO[Int] =
F.liftFC(GetResultSetHoldability)
/**
* @group Constructors (Primitives)
*/
val getResultSetType: StatementIO[Int] =
F.liftFC(GetResultSetType)
/**
* @group Constructors (Primitives)
*/
val getUpdateCount: StatementIO[Int] =
F.liftFC(GetUpdateCount)
/**
* @group Constructors (Primitives)
*/
val getWarnings: StatementIO[SQLWarning] =
F.liftFC(GetWarnings)
/**
* @group Constructors (Primitives)
*/
val isCloseOnCompletion: StatementIO[Boolean] =
F.liftFC(IsCloseOnCompletion)
/**
* @group Constructors (Primitives)
*/
val isClosed: StatementIO[Boolean] =
F.liftFC(IsClosed)
/**
* @group Constructors (Primitives)
*/
val isPoolable: StatementIO[Boolean] =
F.liftFC(IsPoolable)
/**
* @group Constructors (Primitives)
*/
def isWrapperFor(a: Class[_]): StatementIO[Boolean] =
F.liftFC(IsWrapperFor(a))
/**
* @group Constructors (Primitives)
*/
def setCursorName(a: String): StatementIO[Unit] =
F.liftFC(SetCursorName(a))
/**
* @group Constructors (Primitives)
*/
def setEscapeProcessing(a: Boolean): StatementIO[Unit] =
F.liftFC(SetEscapeProcessing(a))
/**
* @group Constructors (Primitives)
*/
def setFetchDirection(a: Int): StatementIO[Unit] =
F.liftFC(SetFetchDirection(a))
/**
* @group Constructors (Primitives)
*/
def setFetchSize(a: Int): StatementIO[Unit] =
F.liftFC(SetFetchSize(a))
/**
* @group Constructors (Primitives)
*/
def setMaxFieldSize(a: Int): StatementIO[Unit] =
F.liftFC(SetMaxFieldSize(a))
/**
* @group Constructors (Primitives)
*/
def setMaxRows(a: Int): StatementIO[Unit] =
F.liftFC(SetMaxRows(a))
/**
* @group Constructors (Primitives)
*/
def setPoolable(a: Boolean): StatementIO[Unit] =
F.liftFC(SetPoolable(a))
/**
* @group Constructors (Primitives)
*/
def setQueryTimeout(a: Int): StatementIO[Unit] =
F.liftFC(SetQueryTimeout(a))
/**
* @group Constructors (Primitives)
*/
def unwrap[T](a: Class[T]): StatementIO[T] =
F.liftFC(Unwrap(a))
/**
* Natural transformation from `StatementOp` to `Kleisli` for the given `M`, consuming a `java.sql.Statement`.
* @group Algebra
*/
def interpK[M[_]: Monad: Catchable: Capture]: StatementOp ~> Kleisli[M, Statement, ?] =
new (StatementOp ~> Kleisli[M, Statement, ?]) {
def apply[A](op: StatementOp[A]): Kleisli[M, Statement, A] =
op.defaultTransK[M]
}
/**
* Natural transformation from `StatementIO` to `Kleisli` for the given `M`, consuming a `java.sql.Statement`.
* @group Algebra
*/
def transK[M[_]: Monad: Catchable: Capture]: StatementIO ~> Kleisli[M, Statement, ?] =
new (StatementIO ~> Kleisli[M, Statement, ?]) {
def apply[A](ma: StatementIO[A]): Kleisli[M, Statement, A] =
F.runFC[StatementOp, Kleisli[M, Statement, ?], A](ma)(interpK[M])
}
/**
* Natural transformation from `StatementIO` to `M`, given a `java.sql.Statement`.
* @group Algebra
*/
def trans[M[_]: Monad: Catchable: Capture](c: Statement): StatementIO ~> M =
new (StatementIO ~> M) {
def apply[A](ma: StatementIO[A]): M[A] =
transK[M].apply(ma).run(c)
}
/**
* Syntax for `StatementIO`.
* @group Algebra
*/
implicit class StatementIOOps[A](ma: StatementIO[A]) {
def transK[M[_]: Monad: Catchable: Capture]: Kleisli[M, Statement, A] =
statement.transK[M].apply(ma)
}
}
| beni55/doobie | core/src/main/scala/doobie/free/statement.scala | Scala | mit | 23,816 |
package gitbucket.core.service
import gitbucket.core.model.{Account, GroupMember}
import org.specs2.mutable.Specification
import java.util.Date
class AccountServiceSpec extends Specification with ServiceSpecBase {
"AccountService" should {
val RootMailAddress = "root@localhost"
"getAllUsers" in { withTestDB { implicit session =>
AccountService.getAllUsers() must be like{
case List(Account("root", "root", RootMailAddress, _, true, _, _, _, None, None, false, false)) => ok
}
}}
"getAccountByUserName" in { withTestDB { implicit session =>
AccountService.getAccountByUserName("root") must beSome.like {
case user => user.userName must_== "root"
}
AccountService.getAccountByUserName("invalid user name") must beNone
}}
"getAccountByMailAddress" in { withTestDB { implicit session =>
AccountService.getAccountByMailAddress(RootMailAddress) must beSome
}}
"updateLastLoginDate" in { withTestDB { implicit session =>
val root = "root"
def user() =
AccountService.getAccountByUserName(root).getOrElse(sys.error(s"user $root does not exists"))
user().lastLoginDate must beNone
val date1 = new Date
AccountService.updateLastLoginDate(root)
user().lastLoginDate must beSome.like{ case date =>
date must be_>(date1)
}
val date2 = new Date
Thread.sleep(1000)
AccountService.updateLastLoginDate(root)
user().lastLoginDate must beSome.like{ case date =>
date must be_>(date2)
}
}}
"updateAccount" in { withTestDB { implicit session =>
val root = "root"
def user() =
AccountService.getAccountByUserName(root).getOrElse(sys.error(s"user $root does not exists"))
val newAddress = "new mail address"
AccountService.updateAccount(user().copy(mailAddress = newAddress))
user().mailAddress must_== newAddress
}}
"group" in { withTestDB { implicit session =>
val group1 = "group1"
val user1 = "root"
AccountService.createGroup(group1, None)
AccountService.getGroupMembers(group1) must_== Nil
AccountService.getGroupsByUserName(user1) must_== Nil
AccountService.updateGroupMembers(group1, List((user1, true)))
AccountService.getGroupMembers(group1) must_== List(GroupMember(group1, user1, true))
AccountService.getGroupsByUserName(user1) must_== List(group1)
AccountService.updateGroupMembers(group1, Nil)
AccountService.getGroupMembers(group1) must_== Nil
AccountService.getGroupsByUserName(user1) must_== Nil
}}
}
}
| intermezzo-fr/gitbucket | src/test/scala/gitbucket/core/service/AccountServiceSpec.scala | Scala | apache-2.0 | 2,626 |
package im.actor.server.group
import scala.util.control.NoStackTrace
object GroupErrors {
final object InvalidAccessHash extends IllegalArgumentException("Invalid group access hash") with NoStackTrace
final object NotAMember extends Exception("Not a group member") with NoStackTrace
case object UserAlreadyJoined extends Exception with NoStackTrace
case object UserAlreadyInvited extends Exception with NoStackTrace
case object UserAlreadyAdmin extends Exception with NoStackTrace
case object NotAdmin extends Exception with NoStackTrace
case object AboutTooLong extends Exception with NoStackTrace
case object TopicTooLong extends Exception with NoStackTrace
}
object GroupOffice extends GroupOperations {
def persistenceIdFor(groupId: Int): String = s"Group-${groupId}"
}
| chieryw/actor-platform | actor-server/actor-peer-managers/src/main/scala/im/actor/server/group/GroupOffice.scala | Scala | mit | 805 |
package io.grpc {
trait Grpc
}
package bar {
import io.grpc.Grpc
object a extends Grpc
}
| som-snytt/dotty | tests/pos/i2856.scala | Scala | apache-2.0 | 95 |
package models
case class MultiLineAddress(lineOne: Option[String] = None, lineTwo: Option[String] = None, lineThree: Option[String] = None)
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/models/MultiLineAddress.scala | Scala | mit | 142 |
/**
* Copyright 2011-2012 @WalmartLabs, a division of Wal-Mart Stores, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.walmartlabs.mupd8
import java.util.concurrent.PriorityBlockingQueue
import grizzled.slf4j.Logging
import scala.collection.immutable
import scala.collection.mutable
import com.walmartlabs.mupd8.Misc._
class MapUpdatePool[T <: MapUpdateClass[T]](val poolsize: Int, appRun: AppRuntime, clusterFactory: (T => Unit) => MUCluster[T]) extends Logging {
case class innerCompare(job: T, key: PerformerPacketKey) extends Comparable[innerCompare] {
override def compareTo(other: innerCompare) = job.compareTo(other.job)
}
val ring = appRun.ring
class ThreadData(val me: Int) {
val queue = new PriorityBlockingQueue[innerCompare]
private[MapUpdatePool] var keyInUse: Any = null
private[MapUpdatePool] var keyQueue = new mutable.Queue[Runnable]
private[MapUpdatePool] val keyLock = new scala.concurrent.Lock
// flags used in ring change
// started: a job from queue is started
var started = false;
// noticedCandidateRing: a candidate ring from message server is set
// before job from queue is started
var noticedCandidateRing = false;
val thread = new Thread(run {
while (true) {
val item = queue.take()
started = true
noticedCandidateRing = (appRun.candidateRing != null)
if (item.key == null) {
item.job.run() // This is a mapper job
} else {
val (i1, i2) = getPoolIndices(item.key)
assert(me == i1 || me == i2)
lock(i1, i2)
if (attemptQueue(item.job, item.key, i1, i2)) {
unlock(i1, i2)
} else {
keyInUse = item.key
unlock(i1, i2)
item.job.run()
var jobCount = 0
var currentlyHot = false
while ({
lock(i1, i2)
val work = keyQueue.headOption
if (work != None) keyQueue.dequeue() else keyInUse = null
val newPriority = currentlyHot || keyQueue.size > 50
unlock(i1, i2)
if (newPriority != currentlyHot) {
currentlyHot = newPriority
Thread.currentThread.setPriority(Thread.MAX_PRIORITY)
}
val otherItem = if (jobCount % 5 == 4) Option(queue.poll()) else None
otherItem.map { it => if (it.key == null) put(it.job) else putLocal(it.key, it.job) }
work map { w => w.run() }
jobCount += 1
work != None
}) {}
if (currentlyHot) {
Thread.currentThread.setPriority(Thread.NORM_PRIORITY)
}
}
}
// TODO: come with a better wait/notify solution
//if (ring2 != null && !noticedRing2) notify();
started = false
}
}, "MapUpdateThread-" + me)
thread.start()
def getSerialQueueSize() = {
// keyLock.acquire
val size = keyQueue.size
// keyLock.release
size
}
}
val threadDataPool = 0 until poolsize map { new ThreadData(_) }
private val rand = new java.util.Random(System.currentTimeMillis)
val cluster = clusterFactory(p => putLocal(p.getKey, p))
def mod(i: Int) = if (i < 0) -i else i
private val HASH_CONSTANT = 17
// Get queues in queue for key
private def getPoolIndices(key: Any) = {
val fullhash = key.hashCode()
val hash = fullhash / HASH_CONSTANT //cluster.hosts.size
val i1 = hash % threadDataPool.size
val i2 = (hash / threadDataPool.size) % (threadDataPool.size - 1)
val (m1, m2) = (mod(i1), mod(i2))
(m1, if (m2 < m1) m2 else m2 + 1)
}
def getPreferredPoolIndex(key: Any) = {
val fullhash = key.hashCode()
val hash = fullhash / HASH_CONSTANT //cluster.hosts.size
mod(hash % threadDataPool.size)
}
private def lock(i1: Int, i2: Int) {
val (k1, k2) = if (i1 < i2) (i1, i2) else (i2, i1)
threadDataPool(k1).keyLock.acquire()
if (k1 != k2) threadDataPool(k2).keyLock.acquire()
}
private def unlock(i1: Int, i2: Int) {
val (k1, k2) = if (i1 < i2) (i1, i2) else (i2, i1)
threadDataPool(k2).keyLock.release()
if (k1 != k2) threadDataPool(k1).keyLock.release()
}
// This method should only be called after acquiring the (i1,i2) locks
private def attemptQueue(job: Runnable with Comparable[T], key: Any, i1: Int, i2: Int): Boolean = {
val (p1, p2) = (threadDataPool(i1), threadDataPool(i2))
val b1 = if (p1.keyInUse != null) p1.keyInUse == key else false
val b2 = if (p2.keyInUse != null) p2.keyInUse == key else false
assert(!b1 || !b2 || b1 == b2)
if (b1 || b2) {
val dest = if (b1) p1 else p2
dest.keyQueue.enqueue(job)
true
} else {
false
}
}
def put(x: T) {
val a = rand.nextInt(threadDataPool.size) //TODO: Do we need to serialize this call?
val sa = threadDataPool(a).keyQueue.size + threadDataPool(a).queue.size()
val destination =
if (sa > 1) {
val temp = rand.nextInt(threadDataPool.size - 1)
val b = if (temp < a) temp else temp + 1
if (threadDataPool(b).keyQueue.size + threadDataPool(b).queue.size < sa) b else a
} else a
threadDataPool(destination).queue.put(innerCompare(x, null))
}
// Put source into queue
def putSource(x: T) {
var a = 0
var sa = 0
while ({
a = rand.nextInt(threadDataPool.size) //TODO: Do we need to serialize this call?
sa = threadDataPool(a).keyQueue.size + threadDataPool(a).queue.size()
sa > 50
}) {
java.lang.Thread.sleep((sa - 50L) * (sa - 50L) / 25 min 1000)
}
threadDataPool(a).queue.put(innerCompare(x, null))
}
def putLocal(key: PerformerPacketKey, x: T) { // TODO : Fix key : Any??
val (i1, i2) = getPoolIndices(key)
lock(i1, i2)
if (!attemptQueue(x, key, i1, i2)) {
// TODO: HOT conductor check not accurate, use time stamps
val (p1, p2) = (threadDataPool(i1), threadDataPool(i2))
val dest = if (p1.keyQueue.size + p1.queue.size > 1.3 * (p2.keyQueue.size + p2.queue.size)) p2 else p1
dest.queue.put(innerCompare(x, key))
}
unlock(i1, i2)
}
def put(key: PerformerPacketKey, x: T) {
// at startup appRun.ring could be null
if (appRun.ring != null) {
val destip = appRun.ring(key)
if (appRun.self.ip.compareTo(destip) == 0
||
// during ring chagne process, if dest is going to be removed from cluster
(appRun.candidateRing != null && !appRun.candidateRing.ips.contains(destip)))
putLocal(key, x)
else
cluster.send(Host(destip, appRun.ring.ipHostMap(destip)), x)
}
}
/*
Since hot conductor is not used, comment it out temporarily.
// Hot Conductor Queue Status
val queueStatus = cluster.hosts.map(_ => 0).toArray
var maxQueueBacklog = 0 // TODO: Make this volatile
val queueStatusServer = new HttpServer(cluster.port + 1, cluster.hosts.length,
s => if(s.split('/')(1) == "queuestatus")
Some { pool.map(p => p.queue.size + p.getSerialQueueSize()).max.toString.getBytes }
else
None
)
queueStatusServer.start
val queueStatusUpdater = new Thread(run {
cluster.hosts.foreach { host =>
excToOptionWithLog {
java.lang.Thread.sleep(500)
if (host.compareTo(cluster.self) != 0) {
val quote = fetchURL("http://" + host + ":" + (cluster.port + 1) + "/queuestatus")
quote map(new String(_).toInt) getOrElse(0)
} else
pool.map(p => p.queue.size + p.getSerialQueueSize()).max
} map { p =>
queueStatus(i) = p
maxQueueBacklog = queueStatus.max
}
}
}, "queueStatusUpdater")
//TODO: Uncomment the following line
//Do we need a thread pool here
//queueStatusUpdater.start()
*/
}
| walmartlabs/mupd8 | src/main/scala/com/walmartlabs/mupd8/MapUpdatePool.scala | Scala | apache-2.0 | 8,393 |
package dhg.ccg.parse.scg.mcmc
import dhg.util._
import dhg.ccg.cat._
import dhg.ccg.parse._
import dhg.ccg.parse.pcfg._
import dhg.ccg.parse.pcfg.mcmc._
import dhg.ccg.prob._
import scalaz._
import Scalaz._
import dhg.ccg.tagdict.StartEndTags
trait ScgAlphaPriorMaker {
def makeAll(guideCharts: Vector[CfgGuideChart], goldLabeledSentences: Vector[CcgTree],
priorRootDist: LogProbabilityDistribution[Cat],
priorBinyDist: ConditionalLogProbabilityDistribution[Cat, BinaryProd],
priorUnryDist: ConditionalLogProbabilityDistribution[Cat, UnaryProd],
priorTermDist: ConditionalLogProbabilityDistribution[Cat, TermProd],
priorLctxDist: ConditionalLogProbabilityDistribution[Cat, Cat],
priorRctxDist: ConditionalLogProbabilityDistribution[Cat, Cat],
alphaRoot: Double, alphaBiny: Double, alphaUnry: Double, alphaTerm: Double, alphaLctx: Double, alphaRctx: Double)(se: StartEndTags[Cat]): //
(Map[Cat, LogDouble], Map[Cat, Map[BinaryProd, LogDouble]], Map[Cat, Map[UnaryProd, LogDouble]], Map[Cat, Map[TermProd, LogDouble]], Map[Cat, Map[Cat, LogDouble]], Map[Cat, Map[Cat, LogDouble]])
}
class TrainDataScgAlphaPriorMaker(
productionFinder: ScgProductionFinder,
guideChartProdFinder: ScgGuideChartProdFinder)
extends ScgAlphaPriorMaker {
def makeAll(guideCharts: Vector[CfgGuideChart], goldLabeledSentences: Vector[CcgTree],
priorRootDist: LogProbabilityDistribution[Cat],
priorBinyDist: ConditionalLogProbabilityDistribution[Cat, BinaryProd],
priorUnryDist: ConditionalLogProbabilityDistribution[Cat, UnaryProd],
priorTermDist: ConditionalLogProbabilityDistribution[Cat, TermProd],
priorLctxDist: ConditionalLogProbabilityDistribution[Cat, Cat],
priorRctxDist: ConditionalLogProbabilityDistribution[Cat, Cat],
alphaRoot: Double, alphaBiny: Double, alphaUnry: Double, alphaTerm: Double, alphaLctx: Double, alphaRctx: Double)(se: StartEndTags[Cat]) = {
val goldRootCounts = goldLabeledSentences.map(productionFinder.rootCounts /* */ ).fold(Map.empty[Cat, Double])(_ |+| _)
val goldBinyCounts = goldLabeledSentences.map(productionFinder.binyCounts /* */ ).fold(Map.empty[Cat, Map[BinaryProd, Double]])(_ |+| _)
val goldUnryCounts = goldLabeledSentences.map(productionFinder.unryCounts /* */ ).fold(Map.empty[Cat, Map[UnaryProd, Double]])(_ |+| _)
val goldTermCounts = goldLabeledSentences.map(productionFinder.termCounts /* */ ).fold(Map.empty[Cat, Map[TermProd, Double]])(_ |+| _)
val goldLctxCounts = goldLabeledSentences.map(productionFinder.lctxCounts(_)(se)).fold(Map.empty[Cat, Map[Cat, Double]])(_ |+| _)
val goldRctxCounts = goldLabeledSentences.map(productionFinder.rctxCounts(_)(se)).fold(Map.empty[Cat, Map[Cat, Double]])(_ |+| _)
val allRootSet = guideCharts.map(guideChartProdFinder.roots /* */ ).reduce(_ |+| _) |+| goldRootCounts.keySet
val allBinySet = guideCharts.map(guideChartProdFinder.binys /* */ ).reduce(_ |+| _) |+| goldBinyCounts.mapValues(_.keySet)
val allUnrySet = guideCharts.map(guideChartProdFinder.unrys /* */ ).reduce(_ |+| _) |+| goldUnryCounts.mapValues(_.keySet)
val allTermSet = guideCharts.map(guideChartProdFinder.terms /* */ ).reduce(_ |+| _) |+| goldTermCounts.mapValues(_.keySet)
val allLctxSet = guideCharts.map(guideChartProdFinder.lctxs(_)(se)).reduce(_ |+| _) |+| goldLctxCounts.mapValues(_.keySet)
val allRctxSet = guideCharts.map(guideChartProdFinder.rctxs(_)(se)).reduce(_ |+| _) |+| goldRctxCounts.mapValues(_.keySet)
val alphaPriorRootCounts = allRootSet.mapTo(root => LogDouble(alphaRoot) * priorRootDist(root) + LogDouble(goldRootCounts.getOrElse(root, 0.0))).toMap
val alphaPriorBinyCounts = allBinySet.mapt((cat, binys) => cat -> binys.mapTo(prod => (LogDouble(alphaBiny) * priorBinyDist(prod, cat) + LogDouble(goldBinyCounts.get(cat).flatMap(_.get(prod)).getOrElse(0.0)))).toMap)
val alphaPriorUnryCounts = allUnrySet.mapt((cat, unrys) => cat -> unrys.mapTo(prod => (LogDouble(alphaUnry) * priorUnryDist(prod, cat) + LogDouble(goldUnryCounts.get(cat).flatMap(_.get(prod)).getOrElse(0.0)))).toMap)
val alphaPriorTermCounts = allTermSet.mapt((cat, terms) => cat -> terms.mapTo(prod => (LogDouble(alphaTerm) * priorTermDist(prod, cat) + LogDouble(goldTermCounts.get(cat).flatMap(_.get(prod)).getOrElse(0.0)))).toMap)
val alphaPriorLctxCounts = allLctxSet.mapt((cat, lctxs) => cat -> lctxs.mapTo(lctx => (LogDouble(alphaLctx) * priorLctxDist(lctx, cat) + LogDouble(goldLctxCounts.get(cat).flatMap(_.get(lctx)).getOrElse(0.0)))).toMap)
val alphaPriorRctxCounts = allRctxSet.mapt((cat, rctxs) => cat -> rctxs.mapTo(rctx => (LogDouble(alphaRctx) * priorRctxDist(rctx, cat) + LogDouble(goldRctxCounts.get(cat).flatMap(_.get(rctx)).getOrElse(0.0)))).toMap)
(alphaPriorRootCounts, alphaPriorBinyCounts, alphaPriorUnryCounts, alphaPriorTermCounts, alphaPriorLctxCounts, alphaPriorRctxCounts)
}
}
//class TrainDataNormalizingScgAlphaPriorMaker(
// productionFinder: ScgProductionFinder,
// guideChartProdFinder: ScgGuideChartProdFinder)
// extends ScgAlphaPriorMaker {
//
// def makeAll(guideCharts: Vector[CfgGuideChart], goldLabeledSentences: Vector[CcgTree],
// priorRootDist: LogProbabilityDistribution[Cat],
// priorBinyDist: ConditionalLogProbabilityDistribution[Cat, BinaryProd],
// priorUnryDist: ConditionalLogProbabilityDistribution[Cat, UnaryProd],
// priorTermDist: ConditionalLogProbabilityDistribution[Cat, TermProd],
// priorLctxDist: ConditionalLogProbabilityDistribution[Cat, Cat],
// priorRctxDist: ConditionalLogProbabilityDistribution[Cat, Cat],
// alphaRoot: Double, alphaBiny: Double, alphaUnry: Double, alphaTerm: Double, alphaLctx: Double, alphaRctx: Double)(se: StartEndTags[Cat]) = {
//
// val goldRootCounts = goldLabeledSentences.map(productionFinder.rootCounts /* */ ).fold(Map.empty[Cat, Double])(_ |+| _)
// val goldBinyCounts = goldLabeledSentences.map(productionFinder.binyCounts /* */ ).fold(Map.empty[Cat, Map[BinaryProd, Double]])(_ |+| _)
// val goldUnryCounts = goldLabeledSentences.map(productionFinder.unryCounts /* */ ).fold(Map.empty[Cat, Map[UnaryProd, Double]])(_ |+| _)
// val goldTermCounts = goldLabeledSentences.map(productionFinder.termCounts /* */ ).fold(Map.empty[Cat, Map[TermProd, Double]])(_ |+| _)
// val goldLctxCounts = goldLabeledSentences.map(productionFinder.lctxCounts(_)(se)).fold(Map.empty[Cat, Map[Cat, Double]])(_ |+| _)
// val goldRctxCounts = goldLabeledSentences.map(productionFinder.rctxCounts(_)(se)).fold(Map.empty[Cat, Map[Cat, Double]])(_ |+| _)
//
// val allRootSet = guideCharts.map(guideChartProdFinder.roots /* */ ).reduce(_ |+| _) |+| goldRootCounts.keySet
// val allBinySet = guideCharts.map(guideChartProdFinder.binys /* */ ).reduce(_ |+| _) |+| goldBinyCounts.mapValues(_.keySet)
// val allUnrySet = guideCharts.map(guideChartProdFinder.unrys /* */ ).reduce(_ |+| _) |+| goldUnryCounts.mapValues(_.keySet)
// val allTermSet = guideCharts.map(guideChartProdFinder.terms /* */ ).reduce(_ |+| _) |+| goldTermCounts.mapValues(_.keySet)
// val allLctxSet = guideCharts.map(guideChartProdFinder.lctxs(_)(se)).reduce(_ |+| _) |+| goldLctxCounts.mapValues(_.keySet)
// val allRctxSet = guideCharts.map(guideChartProdFinder.rctxs(_)(se)).reduce(_ |+| _) |+| goldRctxCounts.mapValues(_.keySet)
//
// val rootZ = allRootSet.sumBy(root => priorRootDist(root)); assert(rootZ.nonZero, f"rootZ=$rootZ; allRootSet=${allRootSet}") // normalization constants...
// val binyZ = allBinySet.map { case (cat, prods) => cat -> prods.sumBy(prod => priorBinyDist(prod, cat)) }; for ((cat, z) <- binyZ) assert(z.nonZero, f"binyZ($cat)=$z ; allBinySet($cat)=${allBinySet(cat)}")
// val unryZ = allUnrySet.map { case (cat, prods) => cat -> prods.sumBy(prod => priorUnryDist(prod, cat)) }; for ((cat, z) <- unryZ) assert(z.nonZero, f"unryZ($cat)=$z ; allUnrySet($cat)=${allUnrySet(cat)}")
// val termZ = allTermSet.map { case (cat, prods) => cat -> prods.sumBy(prod => priorTermDist(prod, cat)) }; for ((cat, z) <- termZ) assert(z.nonZero, f"termZ($cat)=$z ; allTermSet($cat)=${allTermSet(cat)}")
// val lctxZ = allLctxSet.map { case (cat, lctxs) => cat -> lctxs.sumBy(lctx => priorLctxDist(lctx, cat)) }; for ((cat, z) <- lctxZ) assert(z.nonZero, f"lctxZ($cat)=$z ; allLctxSet($cat)=${allLctxSet(cat)}")
// val rctxZ = allRctxSet.map { case (cat, rctxs) => cat -> rctxs.sumBy(rctx => priorRctxDist(rctx, cat)) }; for ((cat, z) <- rctxZ) assert(z.nonZero, f"rctxZ($cat)=$z ; allRctxSet($cat)=${allRctxSet(cat)}")
//
// val alphaPriorRootCounts = allRootSet.mapTo(root => LogDouble(alphaRoot) * priorRootDist(root) / rootZ + LogDouble(goldRootCounts.getOrElse(root, 0.0))).toMap
// val alphaPriorBinyCounts = allBinySet.mapt((cat, binys) => cat -> binys.mapTo(prod => (LogDouble(alphaBiny) * priorBinyDist(prod, cat) / binyZ(cat) + LogDouble(goldBinyCounts.get(cat).flatMap(_.get(prod)).getOrElse(0.0)))).toMap)
// val alphaPriorUnryCounts = allUnrySet.mapt((cat, unrys) => cat -> unrys.mapTo(prod => (LogDouble(alphaUnry) * priorUnryDist(prod, cat) / unryZ(cat) + LogDouble(goldUnryCounts.get(cat).flatMap(_.get(prod)).getOrElse(0.0)))).toMap)
// val alphaPriorTermCounts = allTermSet.mapt((cat, terms) => cat -> terms.mapTo(prod => (LogDouble(alphaTerm) * priorTermDist(prod, cat) / termZ(cat) + LogDouble(goldTermCounts.get(cat).flatMap(_.get(prod)).getOrElse(0.0)))).toMap)
// val alphaPriorLctxCounts = allLctxSet.mapt((cat, lctxs) => cat -> lctxs.mapTo(lctx => (LogDouble(alphaLctx) * priorLctxDist(lctx, cat) / lctxZ(cat) + LogDouble(goldLctxCounts.get(cat).flatMap(_.get(lctx)).getOrElse(0.0)))).toMap)
// val alphaPriorRctxCounts = allRctxSet.mapt((cat, rctxs) => cat -> rctxs.mapTo(rctx => (LogDouble(alphaRctx) * priorRctxDist(rctx, cat) / rctxZ(cat) + LogDouble(goldRctxCounts.get(cat).flatMap(_.get(rctx)).getOrElse(0.0)))).toMap)
//
// (alphaPriorRootCounts, alphaPriorBinyCounts, alphaPriorUnryCounts, alphaPriorTermCounts, alphaPriorLctxCounts, alphaPriorRctxCounts)
// }
//
//}
| dhgarrette/2015-ccg-parsing | src/main/scala/dhg/ccg/parse/scg/mcmc/ScgAlphaPriorMaker.scala | Scala | apache-2.0 | 10,041 |
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
import org.apache.spark.sql._
import org.apache.log4j._
import org.apache.hadoop.fs._
import scala.util.Random
object PageRank {
def main ( args: Array[String] ) {
val repeats = args(0).toInt
val vertices = args(1).toInt
val edges = args(2).toLong
val conf = new SparkConf().setAppName("LinearRegression")
val sc = new SparkContext(conf)
val spark = SparkSession
.builder()
.config(conf)
.getOrCreate()
import spark.implicits._
conf.set("spark.logConf","false")
conf.set("spark.eventLog.enabled","false")
LogManager.getRootLogger().setLevel(Level.WARN)
val rand = new Random()
val RMATa = 0.30
val RMATb = 0.25
val RMATd = 0.25
val RMATc = 0.20
val vn = math.round(Math.pow(2.0,Math.ceil(Math.log(vertices)/Math.log(2.0)))).toInt
def pickQuadrant ( a: Double, b: Double, c: Double, d: Double ): Int
= rand.nextDouble() match {
case x if x < a => 0
case x if (x >= a && x < a + b) => 1
case x if (x >= a + b && x < a + b + c) => 2
case _ => 3
}
def chooseCell ( x: Int, y: Int, t: Int ): (Int,Int) = {
if (t <= 1)
(x,y)
else {
val newT = math.round(t.toFloat/2.0).toInt
pickQuadrant(RMATa, RMATb, RMATc, RMATd) match {
case 0 => chooseCell(x, y, newT)
case 1 => chooseCell(x + newT, y, newT)
case 2 => chooseCell(x, y + newT, newT)
case 3 => chooseCell(x + newT, y + newT, newT)
}
}
}
def addEdge ( vn: Int ): (Int,Int) = {
val v = math.round(vn.toFloat/2.0).toInt
chooseCell(v,v,v)
}
val E = sc.parallelize(1L to edges/100)
.flatMap{ i => (1 to 100).map{ j => addEdge(vn) } }
.map{ case (i,j) => ((i.toLong,j.toLong),true) }
.cache()
var b: Double = 0.8;
var c: Double = b/vertices;
val R = sc.parallelize(1L to vertices).map(v => (v,(1.0-b)/vertices))
val Rds = R.toDS()
Rds.createOrReplaceTempView("Rds")
val size = sizeof(((1L,1L),true))
println("*** %d %d %.2f GB".format(vertices,edges,edges.toDouble*size/(1024.0*1024.0*1024.0)))
val Eds = E.toDS()
Eds.createOrReplaceTempView("Eds")
val n = vertices
def test () {
var t: Long = System.currentTimeMillis()
try {
val links = E.map(_._1).groupByKey().cache()
var ranks = links.mapValues(v => 1.0/vertices)
val contribs = links.join(ranks).values.flatMap {
case (urls,rank)
=> val size = urls.size
urls.map(url => (url, rank/size))
}
ranks = contribs.reduceByKey(_+_).mapValues((1-b)/vertices+b*_).cache();
println(ranks.count);
println("**** SparkRDD run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
t = System.currentTimeMillis()
try {
v(sc,"""
var C: vector[Int] = vector();
var S: vector[Double] = vector();
var I: vector[Double] = vector();
for i = 1, n do
for j = 1, n do
if (E[i,j])
C[i] += 1;
for i = 1, n do{
for j = 1, n do{
if (E[j,i])
S[i] += c/C[j];
};
};
for i = 1, n do
I[i] += S[i] + R[i];
println(I.count);
""")
println("**** Diablo run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
t = System.currentTimeMillis()
try {
s(sc,"""
var C: vector[Int] = vector();
var S: vector[Double] = vector();
var I: vector[Double] = vector();
for i = 1, n do
for j = 1, n do
if (Eds[i,j])
C[i] += 1;
for i = 1, n do{
for j = 1, n do{
if (Eds[j,i])
S[i] += c/C[j];
};
};
for i = 1, n do
I[i] += S[i] + Rds[i];
println(I.count);
""")
println("**** SQLGen run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
} catch { case x: Throwable => println(x) }
t = System.currentTimeMillis()
try{
var C = spark.sql("SELECT Eds._1._1 AS _1, COUNT(Eds._2) AS _2 FROM Eds WHERE Eds._2 = true GROUP BY Eds._1._1");
C.createOrReplaceTempView("C");
var S = spark.sql(StringContext("SELECT Eds._1._2 AS _1, SUM(", " / C._2) AS _2 FROM C JOIN Eds ON C._1 == Eds._1._1 GROUP BY Eds._1._2").s(c));
S.createOrReplaceTempView("S");
var I = spark.sql("SELECT S._1 AS _1, SUM(S._2 + Rds._2) AS _2 FROM Rds JOIN S ON Rds._1 == S._1 GROUP BY S._1");
I.createOrReplaceTempView("I");
println(I.count);
println("**** SparkSQL run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
}catch { case x: Throwable => println(x) }
}
for ( i <- 1 to repeats )
test()
sc.stop()
}
}
| fegaras/DIQL | benchmarks/sqlgen/PageRank.scala | Scala | apache-2.0 | 5,177 |
package com.twitter.finagle.redis.server.protocol
import com.twitter.finagle.redis.server.ByteArrayKey
import com.twitter.io.Buf
import org.scalatest.FreeSpec
import org.scalatest._
class CommandParserSpec extends FreeSpec with Matchers {
"CommandParser" - {
// "auxilary methods" - {
// "parts separated with space" in {
// val array = "SET A myValue".getBytes()
// val parts = CommandParser.extractParts(array)
// parts.size should be (3)
// new String(parts(0)) should be ("SET")
// new String(parts(1)) should be ("A")
// new String(parts(2)) should be ("myValue")
// }
// "parts separated with double quote" in {
// val array = "SET A \\"my value\\"".getBytes()
// val parts = CommandParser.extractParts(array)
// parts.size should be (3)
// new String(parts(0)) should be ("SET")
// new String(parts(1)) should be ("A")
// new String(parts(2)) should be ("my value")
// }
// }
def parseCmd(cmd: String): List[Buf] = cmd.split(' ').toList.map(e => Buf.Utf8(e))
"GET key" in {
val buf: List[Buf] = parseCmd("GET key")
val command = CommandParser(buf)
command shouldBe a[Get]
val get = command.asInstanceOf[Get]
new String(get.key) should be ("key")
}
"SET key value" in {
val buf: List[Buf] = parseCmd("SET key value")
val command = CommandParser(buf)
command shouldBe a[Set]
val set = command.asInstanceOf[Set]
new String(set.key) should be ("key")
new String(set.value) should be ("value")
set.ex should be (None)
set.px should be (None)
set.nx should be (false)
set.xx should be (false)
}
"SET key value XX" in {
val buf: List[Buf] = parseCmd("SET key value XX")
val command = CommandParser(buf)
command shouldBe a[Set]
val set = command.asInstanceOf[Set]
new String(set.key) should be ("key")
new String(set.value) should be ("value")
set.ex should be (None)
set.px should be (None)
set.nx should be (false)
set.xx should be (true)
}
"SET key value PX 12 NX" in {
val buf: List[Buf] = parseCmd("SET key value PX 12 NX")
val command = CommandParser(buf)
command shouldBe a[Set]
val set = command.asInstanceOf[Set]
set.key should be (new ByteArrayKey("key".getBytes))
set.value should be ("value".getBytes)
set.xx should be (false)
set.nx should be (true)
set.px should be (Some(12))
}
}
}
| gustavoamigo/finagle-redis-server | src/test/scala/com/twitter/finagle/redis/server/protocol/CommandParserSpec.scala | Scala | apache-2.0 | 2,556 |
package com.nutomic.ensichat.views
import java.text.DateFormat
import android.content.Context
import com.mobsandgeeks.adapters.{Sectionizer, SimpleSectionAdapter}
import com.nutomic.ensichat.R
import com.nutomic.ensichat.core.messages.Message
import scala.collection.JavaConverters._
object DatesAdapter {
private val Sectionizer = new Sectionizer[Message]() {
override def getSectionTitleForItem(item: Message): String = {
DateFormat
.getDateInstance(DateFormat.MEDIUM)
.format(item.header.time.get.toDate)
}
}
}
/**
* Wraps [[MessagesAdapter]] and shows date between messages.
*/
class DatesAdapter(context: Context, messagesAdapter: MessagesAdapter)
extends SimpleSectionAdapter[Message](context, messagesAdapter, R.layout.item_date, R.id.date,
DatesAdapter.Sectionizer) {
def replaceItems(items: Seq[Message]): Unit = {
messagesAdapter.clear()
messagesAdapter.addAll(items.asJava)
notifyDataSetChanged()
}
}
| Nutomic/ensichat | android/src/main/scala/com/nutomic/ensichat/views/DatesAdapter.scala | Scala | mpl-2.0 | 979 |
object NewModifiers/*<-_empty_::NewModifiers.*/ {
inline val foo/*<-_empty_::NewModifiers.foo.*/ = "foo"
opaque type A/*<-_empty_::NewModifiers.A#*/ = Int/*->scala::Int#*/
}
| som-snytt/dotty | tests/semanticdb/expect/NewModifiers.expect.scala | Scala | apache-2.0 | 178 |
package com.github.saurfang.spark.tsne.impl
import breeze.linalg._
import breeze.stats.distributions.Rand
import com.github.saurfang.spark.tsne.tree.SPTree
import com.github.saurfang.spark.tsne.{TSNEGradient, TSNEHelper, TSNEParam, X2P}
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.storage.StorageLevel
import org.slf4j.LoggerFactory
import scala.util.Random
object BHTSNE {
private def logger = LoggerFactory.getLogger(BHTSNE.getClass)
def tsne(
input: RowMatrix,
noDims: Int = 2,
maxIterations: Int = 1000,
perplexity: Double = 30,
theta: Double = 0.5,
reportLoss: Int => Boolean = {i => i % 10 == 0},
callback: (Int, DenseMatrix[Double], Option[Double]) => Unit = {case _ => },
seed: Long = Random.nextLong()
): DenseMatrix[Double] = {
if(input.rows.getStorageLevel == StorageLevel.NONE) {
logger.warn("Input is not persisted and performance could be bad")
}
Rand.generator.setSeed(seed)
val tsneParam = TSNEParam()
import tsneParam._
val n = input.numRows().toInt
val Y: DenseMatrix[Double] = DenseMatrix.rand(n, noDims, Rand.gaussian(0, 1)) :/ 1e4
val iY = DenseMatrix.zeros[Double](n, noDims)
val gains = DenseMatrix.ones[Double](n, noDims)
// approximate p_{j|i}
val p_ji = X2P(input, 1e-5, perplexity)
val P = TSNEHelper.computeP(p_ji, n).glom()
.map(rows => rows.map {
case (i, data) =>
(i, data.map(_._1).toSeq, DenseVector(data.map(_._2 * exaggeration_factor).toArray))
})
.cache()
var iteration = 1
while(iteration <= maxIterations) {
val bcY = P.context.broadcast(Y)
val bcTree = P.context.broadcast(SPTree(Y))
val initialValue = (DenseMatrix.zeros[Double](n, noDims), DenseMatrix.zeros[Double](n, noDims), 0.0)
val (posF, negF, sumQ) = P.treeAggregate(initialValue)(
seqOp = (c, v) => {
// c: (pos, neg, sumQ), v: Array[(i, Seq(j), vec(Distance))]
TSNEGradient.computeEdgeForces(v, bcY.value, c._1)
val q = TSNEGradient.computeNonEdgeForces(bcTree.value, bcY.value, theta, c._2, v.map(_._1): _*)
(c._1, c._2, c._3 + q)
},
combOp = (c1, c2) => {
// c: (grad, loss)
(c1._1 + c2._1, c1._2 + c2._2, c1._3 + c2._3)
})
val dY: DenseMatrix[Double] = posF :- (negF :/ sumQ)
TSNEHelper.update(Y, dY, iY, gains, iteration, tsneParam)
if(reportLoss(iteration)) {
val loss = P.treeAggregate(0.0)(
seqOp = (c, v) => {
TSNEGradient.computeLoss(v, bcY.value, sumQ)
},
combOp = _ + _
)
logger.debug(s"Iteration $iteration finished with $loss")
callback(iteration, Y.copy, Some(loss))
} else {
logger.debug(s"Iteration $iteration finished")
callback(iteration, Y.copy, None)
}
bcY.destroy()
bcTree.destroy()
//undo early exaggeration
if(iteration == early_exaggeration) {
P.foreach {
rows => rows.foreach {
case (_, _, vec) => vec.foreachPair { case (i, v) => vec.update(i, v / exaggeration_factor) }
}
}
}
iteration += 1
}
Y
}
}
| saurfang/spark-tsne | spark-tsne-core/src/main/scala/com/github/saurfang/spark/tsne/impl/BHTSNE.scala | Scala | apache-2.0 | 3,401 |
package mesosphere.marathon
package raml
import mesosphere.marathon.core.instance
object LocalVolumeConversion {
implicit val localVolumeIdWrites: Writes[instance.LocalVolumeId, LocalVolumeId] = Writes { localVolumeId =>
LocalVolumeId(
runSpecId = localVolumeId.runSpecId.toRaml,
containerPath = localVolumeId.name,
uuid = localVolumeId.uuid,
persistenceId = localVolumeId.idString
)
}
}
| janisz/marathon | src/main/scala/mesosphere/marathon/raml/LocalVolumeConversion.scala | Scala | apache-2.0 | 427 |
package omnibus.api.streaming
import akka.actor._
import akka.actor.SupervisorStrategy.Stop
import scala.util.Failure
import omnibus.core.actors.CommonActor
import omnibus.service.streamed.{ EndOfStream, TimeOutStream }
trait StreamingResponse[B] extends CommonActor {
val timerCtx = metrics.timer("streaming").timerContext()
override def postStop() = timerCtx.stop()
override def receive = {
case TimeOutStream ⇒ streamTimeout()
case EndOfStream ⇒ endOfStream()
case Failure(e) ⇒ handleException(e)
case e: Exception ⇒ handleException(e)
// TODO add generic event handling
// case s: StreamChunk => push(toChunkFormat(s))
}
def push(b: B)
def handleException(e: Throwable)
def toChunkFormat[A, F <: StreamingFormat[A, B]](event: A)(implicit fmt: F): B = fmt.format(event)
def streamTimeout()
def endOfStream()
override val supervisorStrategy =
OneForOneStrategy() {
case e ⇒
handleException(e)
Stop
}
}
trait StreamingFormat[A, B] {
def format(a: A): B
} | agourlay/omnibus | src/main/scala/omnibus/api/streaming/StreamingResponse.scala | Scala | apache-2.0 | 1,063 |
/**
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.device_registry.db
import java.time.Instant
import org.genivi.sota.core.DatabaseSpec
import org.genivi.sota.data.DeviceGenerators.{genDeviceId, genDeviceT}
import org.genivi.sota.data.Namespaces
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{FunSuite, ShouldMatchers}
import scala.concurrent.ExecutionContext.Implicits.global
class DeviceRepositorySpec extends FunSuite
with DatabaseSpec
with ScalaFutures
with ShouldMatchers {
test("updateLastSeen sets activated_at the first time only") {
val device = genDeviceT.sample.get.copy(deviceId = Some(genDeviceId.sample.get))
val setTwice = for {
uuid <- DeviceRepository.create(Namespaces.defaultNs, device)
first <- DeviceRepository.updateLastSeen(uuid, Instant.now()).map(_._1)
second <- DeviceRepository.updateLastSeen(uuid, Instant.now()).map(_._1)
} yield (first, second)
whenReady(db.run(setTwice), Timeout(Span(10, Seconds))) {
case (f, s) => f shouldBe(true)
s shouldBe(false)
}
}
test("activated_at can be counted") {
val device = genDeviceT.sample.get.copy(deviceId = Some(genDeviceId.sample.get))
val createDevice = for {
uuid <- DeviceRepository.create(Namespaces.defaultNs, device)
now = Instant.now()
_ <- DeviceRepository.updateLastSeen(uuid, now)
count <- DeviceRepository.countActivatedDevices(Namespaces.defaultNs, now, now.plusSeconds(100))
} yield count
whenReady(db.run(createDevice), Timeout(Span(10, Seconds))) { count =>
count shouldBe(1)
}
}
}
| PDXostc/rvi_sota_server | device-registry/src/test/scala/org/genivi/sota/device_registry/db/DeviceRepositorySpec.scala | Scala | mpl-2.0 | 1,805 |
package uk.co.morleydev.zander.client.data.exception
/**
* Created by jason on 24/06/14.
*/
class InvalidOperationException(val operation : String) extends InvalidArgumentsException("Operation " + operation + " is not valid operation")
| MorleyDev/zander.client | src/main/scala/uk/co/morleydev/zander/client/data/exception/InvalidOperationException.scala | Scala | mit | 239 |
/*
* Copyright 2020 Lenses.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lenses.streamreactor.connect.aws.s3.source
import io.lenses.streamreactor.connect.aws.s3.config.S3ConfigSettings._
import io.lenses.streamreactor.connect.aws.s3.config.{AuthMode, Format, FormatOptions}
import io.lenses.streamreactor.connect.aws.s3.sink.utils.{S3ProxyContext, S3TestPayloadReader}
import org.jclouds.blobstore.BlobStoreContext
import org.scalatest.matchers.should.Matchers
object BucketSetup extends Matchers {
import S3ProxyContext._
val DefaultProps = Map(
AWS_ACCESS_KEY -> Identity,
AWS_SECRET_KEY -> Credential,
AUTH_MODE -> AuthMode.Credentials.toString,
CUSTOM_ENDPOINT -> S3ProxyContext.Uri,
ENABLE_VIRTUAL_HOST_BUCKETS -> "true"
)
val PrefixName = "streamReactorBackups"
val TopicName = "myTopic"
def setUpBucketData(bucketName: String, blobStoreContext: BlobStoreContext, format: Format, formatOption: Option[FormatOptions]): Unit = {
1 to 5 foreach {
fileNum =>
S3TestPayloadReader.copyResourceToBucket(
s"/${format.entryName.toLowerCase}${generateFormatString(formatOption)}/$fileNum.${format.entryName.toLowerCase}",
bucketName,
s"$PrefixName/$TopicName/0/${fileNum * 200 - 1}.${format.entryName.toLowerCase}",
blobStoreContext
)
S3TestPayloadReader.fileExists(
bucketName,
s"$PrefixName/$TopicName/0/${fileNum * 200 - 1}.${format.entryName.toLowerCase}",
blobStoreContext
) should be(true)
}
}
def generateFormatString(formatOptions: Option[FormatOptions]): String = {
formatOptions.fold("")(option => s"_${option.entryName.toLowerCase}")
}
}
| datamountaineer/stream-reactor | kafka-connect-aws-s3/src/test/scala/io/lenses/streamreactor/connect/aws/s3/source/BucketSetup.scala | Scala | apache-2.0 | 2,248 |
package wikipedia
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
case class WikipediaArticle(title: String, text: String) {
/**
* @return Whether the text of this article mentions `lang` or not
* @param lang Language to look for (e.g. "Scala"). It is case sensitive.
*/
def mentionsLanguage(lang: String): Boolean = text.split(' ').contains(lang)
/** Returns the list of languages that appear in a `WikipediaArticle`. This is a helper function
* for `makeIndex()` and `rankLangsReduceByKey()`
*
* @param langs List of language names to search for.
* @return List of languages that are mentioned in `article`.
*/
def mentionsLangs(langs: List[String]): List[String] =
langs.filter( {lang: String => this.mentionsLanguage(lang)} )
}
object WikipediaRanking {
val langs = List(
"JavaScript", "Java", "PHP", "Python", "C#", "C++", "Ruby", "CSS",
"Objective-C", "Perl", "Scala", "Haskell", "MATLAB", "Clojure", "Groovy")
/** For this assignment, we are to run Spark in "local" mode. This means our full
* Spark application will run on one node only, locally, on this machine along with
* our application code.
*
* We have to first create a configuration, then use that configuration object to
* initialize a SparkContext. We will choose to try running on 3 cores: the [n]
* after `local` indicates this. Finally, our source RDD will be constructed from a
* data file stored in our resource area. We will cache that RDD so we don't have to
* reload the data.
*/
val conf: SparkConf = new SparkConf().setMaster("local[3]")
.setAppName("Wikipedia homework")
val sc: SparkContext = new SparkContext(conf)
sc.setLogLevel("ERROR") // controls log4j, other choices: "OFF", "DEBUG", "INFO", "WARN", "ALL"
// Hint: use a combination of `sc.textFile`, `WikipediaData.filePath` and `WikipediaData.parse`
/** `sc.textfile()` reads a text file and returns `RDD[String]`. `WikipediaData.parse()` turns a
* `String` into a `WikipediaArticle`, so if I map it over `RDD[String]`, I get the
* `RDD[WikipediaArticle]` I'm looking for. Remember to cache it so Spark won't keep reopening
* and reparsing it.
*/
val wikiRdd: RDD[WikipediaArticle] = sc
.textFile(WikipediaData.filePath)
.map(WikipediaData.parse)
.cache()
/** Returns the number of articles on which the language `lang` occurs.
*
* We're told to consider `aggregate` on the RDD, although I don't see that as necessary for this.
* We really just need to filter the RDD for stuff matching `lang`, and we're given a method
* `mentionsLanguage()` to act as a filter. After that it is simply counting the articles that
* made it out of the filter. (`count()` returns a Long so we need to convert it down.)
*
* The case of `lang` matters. We are not converting its case, nor the text of the Wiki articles.
* So "Java" and "java" will not match and will be counted separately.
*/
def occurrencesOfLang(lang: String, rdd: RDD[WikipediaArticle]): Int =
rdd.filter(_.mentionsLanguage(lang))
.count()
.toInt
/* (1) Use `occurrencesOfLang` to compute the ranking of the languages
* (`val langs`) by determining the number of Wikipedia articles that
* mention each language at least once. Don't forget to sort the
* languages by their occurrence, in decreasing order!
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*
* Approach: for every `lang` run `occurrencesOfLang` on the RDD, transforming the input into a list of
* pairs as we go. "For every" means `map` or `flatMap`. To sort there are a few options:
* - use sortBy(_._2).reverse: this processes the list twice, which isn't necessary
* - use sortBy(- _._2): notice the minus sign, to negate the value. Because the second value of the pair
* is a number, we can use a math trick to get the normal sortBy()'s ascending order to reverse. If a > b,
* then -a < -b and in an ascending sort -a will come before -b. Thanks to immutability though, we aren't
* actually changing the value so the largest value ends up at the head of the list. This isn't terribly
* clear, although it might perform well.
* - use sortWith(_._2 >= _._2): this makes the descending sort on the 2nd part of the pair obvious.
*
* Well, surprise surprise. sortBy(- _._2) came in at 70 ms average and sortWith() 62.375 ms average. I have
* a feeling the compiler or Spark was able to do something with the expression that it couldn't do for negating
* numbers.
*
* I note that `rankLangs` is written just like it would look if the wiki articles were in a Seq or List instead
* of an RDD. And that is pretty darn neat.
*
* @return A list of pairs, consisting of the language and number of occurrences, in descending order.
* For example, List( ("Scala", 999999), ("JavaScript", 1278), ("Python", 982), ("Java", 42) )
*/
def rankLangs(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] =
langs.map( {lang: String => (lang, occurrencesOfLang(lang, rdd)) } )
.sortWith(_._2 >= _._2)
/* Compute an inverted index of the set of articles, mapping each language
* to the Wikipedia pages in which it occurs.
*
* Approach: We need to build an RDD of pairs from an RDD of articles. That means we'll be mapping or
* flatMapping over the RDD this time.
*
* For every article, filter the `langs` list to the langs that it mentions. Since there can be multiple
* languages in an article, we'll need to use `flatMap` instead of `map`. At this point we have a list of
* `langs` for the `article`, so now map over the filtered `langs` to turn it into individual pairs of
* `(lang, article)`. This results in `RDD[(lang, article)]` and we need to collect all the articles
* associated with each `lang`. That's exactly what `groupByKey()` does, so use it to create
* `RDD[(lang, articles)]`, where `articles` is some iterable collection class (we don't care which).
* Running `groupByKey()` will do an implicit `collect()` on the RDD so that we end up with an RDD that
* looks like a `Map` object of key-value pairs, where the value is a collection of articles.
*/
def makeIndex(langs: List[String], rdd: RDD[WikipediaArticle]): RDD[(String, Iterable[WikipediaArticle])] =
rdd.flatMap( {article: WikipediaArticle => article.mentionsLangs(langs)
.map( {lang: String => (lang, article)} )
} )
.groupByKey()
/* (2) Compute the language ranking again, but now using the inverted index. Can you notice
* a performance improvement?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*
* Approach: We have an RDD with each `lang` associated with all the `articles` that mention it.
* We need pairs of `lang` and occurrences. This reminds me of a homework from a prior class, where
* we built a container of containers, and needed a container of counts. So do the same thing here,
* the occurrences is simply the size of the value in the RDD key-value pair. Therefore, map over
* the RDD values, replacing the `articles` with the size of their container. Then `collect()`
* everything back, sort it in descending order and make it become a List. After `makeIndex()`,
* this is pretty straightforward.
*/
def rankLangsUsingIndex(index: RDD[(String, Iterable[WikipediaArticle])]): List[(String, Int)] =
index.mapValues(_.size)
.collect()
.sortWith(_._2 >= _._2)
.toList
/* (3) Use `reduceByKey` so that the computation of the index and the ranking are combined.
* Can you notice an improvement in performance compared to measuring *both* the computation of the index
* and the computation of the ranking? If so, can you think of a reason?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*
* Approach: The first video in this week's lecture includes using `reduceByKey()` in a word counting
* example. We follow that same kind of pattern. The instructions above say to combine the indexing and
* the counting, which suggests starting with the guts of `makeIndex()`. But instead of putting the
* actual article into a new RDD, we instead simply put a 1 in there. We'll then use `reduceByKey()`
* to sum them all up (as in the classroom video). We're returning a List instead of an RDD, so that
* might require a `collect()` call unless `reduceByKey()` happens to perform it under the covers. That's
* followed by a descending sort on the 2nd value in each pair and converting the result to a list.
*/
def rankLangsReduceByKey(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] =
rdd.flatMap( _.mentionsLangs(langs).map( {lang: String => (lang, 1)} ) ) // NOTICE THE '1'!
.reduceByKey(_ + _)
.collect()
.sortWith(_._2 >= _._2)
.toList
def main(args: Array[String]) {
/* Languages ranked according to (1) */
val langsRanked: List[(String, Int)] = timed("Part 1: naive ranking", rankLangs(langs, wikiRdd))
/* An inverted index mapping languages to wikipedia pages on which they appear */
def index: RDD[(String, Iterable[WikipediaArticle])] = makeIndex(langs, wikiRdd)
/* Languages ranked according to (2), using the inverted index */
val langsRanked2: List[(String, Int)] = timed("Part 2: ranking using inverted index", rankLangsUsingIndex(index))
/* Languages ranked according to (3) */
val langsRanked3: List[(String, Int)] = timed("Part 3: ranking using reduceByKey", rankLangsReduceByKey(langs, wikiRdd))
/* Output the speed of each ranking */
println(timing)
sc.stop()
}
val timing = new StringBuffer
def timed[T](label: String, code: => T): T = {
val start = System.currentTimeMillis()
val result = code
val stop = System.currentTimeMillis()
timing.append(s"Processing $label took ${stop - start} ms.\n")
result
}
}
| jeffreylloydbrown/classwork | BigDataAndSpark/wikipedia/src/main/scala/wikipedia/WikipediaRanking.scala | Scala | unlicense | 10,556 |
/*
* Copyright (C) 2015 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.abm.datatypes
import java.util
import it.unimi.dsi.fastutil.ints.IntArrayList
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector => OI, StructObjectInspector}
/**
* Created by kzeng on 6/4/14.
*/
class ContinuousOutputParserSimple(_oi: OI, val numKeys: Int) {
private val oi = _oi.asInstanceOf[StructObjectInspector]
private val fields = oi.getAllStructFieldRefs
// simple: keys, aggregates, condition, gby-id
private val aggrParser = new ContinuousSrvParser(_oi, numKeys, fields.size() - 2)
private val conditionField = fields.get(fields.size() - 2)
private val conditionParser = new ConditionsParser(conditionField.getFieldObjectInspector)
private val gbyIdField = fields.get(fields.size() - 1)
private val gbyIdParser = new IdParser(gbyIdField.getFieldObjectInspector)
def parse(o: Any): (Array[Double], IntArrayList, util.List[RangeList], Int) = {
(aggrParser.parse(o),
conditionParser.parseKey(oi.getStructFieldData(o, conditionField)),
conditionParser.parseRange(oi.getStructFieldData(o, conditionField)),
gbyIdParser.parse(oi.getStructFieldData(o, gbyIdField)))
}
}
| uclaabs/abs | src/main/scala/shark/execution/abm/ContinuousOutputParserSimple.scala | Scala | apache-2.0 | 1,831 |
// map と無名関数を使う
object FizzBuzz05 {
def main(args: Array[String]) {
(1 to 20) map { x =>
if (x % 3 == 0 && x % 5 == 0)
"FizzBuzz"
else if (x % 3 == 0)
"Fizz"
else if (x % 5 == 0)
"Buzz"
else
x.toString
} foreach println
}
} | mzkrelx/wakuwaku-scala1 | FizzBuzz05.scala | Scala | mit | 310 |
package gitbucket.core.ssh
import gitbucket.core.model.Profile.profile.blockingApi._
import gitbucket.core.plugin.{GitRepositoryRouting, PluginRegistry}
import gitbucket.core.service.{AccountService, DeployKeyService, RepositoryService, SystemSettingsService}
import gitbucket.core.servlet.{CommitLogHook, Database}
import gitbucket.core.util.Directory
import org.apache.sshd.server.{Environment, ExitCallback, SessionAware}
import org.apache.sshd.server.command.{Command, CommandFactory}
import org.apache.sshd.server.session.ServerSession
import org.slf4j.LoggerFactory
import java.io.{File, InputStream, OutputStream}
import org.eclipse.jgit.api.Git
import Directory._
import gitbucket.core.ssh.PublicKeyAuthenticator.AuthType
import org.eclipse.jgit.transport.{ReceivePack, UploadPack}
import org.apache.sshd.server.shell.UnknownCommand
import org.eclipse.jgit.errors.RepositoryNotFoundException
import scala.util.Using
object GitCommand {
val DefaultCommandRegex = """\\Agit-(upload|receive)-pack '/([a-zA-Z0-9\\-_.]+)/([a-zA-Z0-9\\-\\+_.]+).git'\\Z""".r
val SimpleCommandRegex = """\\Agit-(upload|receive)-pack '/(.+\\.git)'\\Z""".r
}
abstract class GitCommand extends Command with SessionAware {
private val logger = LoggerFactory.getLogger(classOf[GitCommand])
@volatile protected var err: OutputStream = null
@volatile protected var in: InputStream = null
@volatile protected var out: OutputStream = null
@volatile protected var callback: ExitCallback = null
@volatile private var authType: Option[AuthType] = None
protected def runTask(authType: AuthType): Unit
private def newTask(): Runnable = () => {
authType match {
case Some(authType) =>
try {
runTask(authType)
callback.onExit(0)
} catch {
case e: RepositoryNotFoundException =>
logger.info(e.getMessage)
callback.onExit(1, "Repository Not Found")
case e: Throwable =>
logger.error(e.getMessage, e)
callback.onExit(1)
}
case None =>
val message = "User not authenticated"
logger.error(message)
callback.onExit(1, message)
}
}
final override def start(env: Environment): Unit = {
val thread = new Thread(newTask())
thread.start()
}
override def destroy(): Unit = {}
override def setExitCallback(callback: ExitCallback): Unit = {
this.callback = callback
}
override def setErrorStream(err: OutputStream): Unit = {
this.err = err
}
override def setOutputStream(out: OutputStream): Unit = {
this.out = out
}
override def setInputStream(in: InputStream): Unit = {
this.in = in
}
override def setSession(serverSession: ServerSession): Unit = {
this.authType = PublicKeyAuthenticator.getAuthType(serverSession)
}
}
abstract class DefaultGitCommand(val owner: String, val repoName: String) extends GitCommand {
self: RepositoryService with AccountService with DeployKeyService =>
protected def userName(authType: AuthType): String = {
authType match {
case AuthType.UserAuthType(userName) => userName
case AuthType.DeployKeyType(_) => owner
}
}
protected def isReadableUser(authType: AuthType, repositoryInfo: RepositoryService.RepositoryInfo)(
implicit session: Session
): Boolean = {
authType match {
case AuthType.UserAuthType(username) => {
getAccountByUserName(username) match {
case Some(account) => hasGuestRole(owner, repoName, Some(account))
case None => false
}
}
case AuthType.DeployKeyType(key) => {
getDeployKeys(owner, repoName).filter(sshKey => SshUtil.str2PublicKey(sshKey.publicKey).contains(key)) match {
case List(_) => true
case _ => false
}
}
}
}
protected def isWritableUser(authType: AuthType, repositoryInfo: RepositoryService.RepositoryInfo)(
implicit session: Session
): Boolean = {
authType match {
case AuthType.UserAuthType(username) => {
getAccountByUserName(username) match {
case Some(account) => hasDeveloperRole(owner, repoName, Some(account))
case None => false
}
}
case AuthType.DeployKeyType(key) => {
getDeployKeys(owner, repoName).filter(sshKey => SshUtil.str2PublicKey(sshKey.publicKey).contains(key)) match {
case List(x) if x.allowWrite => true
case _ => false
}
}
}
}
}
class DefaultGitUploadPack(owner: String, repoName: String)
extends DefaultGitCommand(owner, repoName)
with RepositoryService
with AccountService
with DeployKeyService {
override protected def runTask(authType: AuthType): Unit = {
val execute = Database() withSession { implicit session =>
getRepository(owner, repoName.replaceFirst("\\\\.wiki\\\\Z", ""))
.map { repositoryInfo =>
!repositoryInfo.repository.isPrivate || isReadableUser(authType, repositoryInfo)
}
.getOrElse(false)
}
if (execute) {
Using.resource(Git.open(getRepositoryDir(owner, repoName))) { git =>
val repository = git.getRepository
val upload = new UploadPack(repository)
upload.upload(in, out, err)
}
}
}
}
class DefaultGitReceivePack(owner: String, repoName: String, baseUrl: String, sshUrl: Option[String])
extends DefaultGitCommand(owner, repoName)
with RepositoryService
with AccountService
with DeployKeyService {
override protected def runTask(authType: AuthType): Unit = {
val execute = Database() withSession { implicit session =>
getRepository(owner, repoName.replaceFirst("\\\\.wiki\\\\Z", ""))
.map { repositoryInfo =>
isWritableUser(authType, repositoryInfo)
}
.getOrElse(false)
}
if (execute) {
Using.resource(Git.open(getRepositoryDir(owner, repoName))) { git =>
val repository = git.getRepository
val receive = new ReceivePack(repository)
if (!repoName.endsWith(".wiki")) {
val hook = new CommitLogHook(owner, repoName, userName(authType), baseUrl, sshUrl)
receive.setPreReceiveHook(hook)
receive.setPostReceiveHook(hook)
}
receive.receive(in, out, err)
}
}
}
}
class PluginGitUploadPack(repoName: String, routing: GitRepositoryRouting)
extends GitCommand
with SystemSettingsService {
override protected def runTask(authType: AuthType): Unit = {
val execute = Database() withSession { implicit session =>
routing.filter.filter("/" + repoName, AuthType.userName(authType), loadSystemSettings(), false)
}
if (execute) {
val path = routing.urlPattern.r.replaceFirstIn(repoName, routing.localPath)
Using.resource(Git.open(new File(Directory.GitBucketHome, path))) { git =>
val repository = git.getRepository
val upload = new UploadPack(repository)
upload.upload(in, out, err)
}
}
}
}
class PluginGitReceivePack(repoName: String, routing: GitRepositoryRouting)
extends GitCommand
with SystemSettingsService {
override protected def runTask(authType: AuthType): Unit = {
val execute = Database() withSession { implicit session =>
routing.filter.filter("/" + repoName, AuthType.userName(authType), loadSystemSettings(), true)
}
if (execute) {
val path = routing.urlPattern.r.replaceFirstIn(repoName, routing.localPath)
Using.resource(Git.open(new File(Directory.GitBucketHome, path))) { git =>
val repository = git.getRepository
val receive = new ReceivePack(repository)
receive.receive(in, out, err)
}
}
}
}
class GitCommandFactory(baseUrl: String, sshUrl: Option[String]) extends CommandFactory {
private val logger = LoggerFactory.getLogger(classOf[GitCommandFactory])
override def createCommand(command: String): Command = {
import GitCommand._
logger.debug(s"command: $command")
val pluginCommand = PluginRegistry().getSshCommandProviders.collectFirst {
case f if f.isDefinedAt(command) => f(command)
}
pluginCommand match {
case Some(x) => x
case None =>
command match {
case SimpleCommandRegex("upload", repoName) if (pluginRepository(repoName)) =>
new PluginGitUploadPack(repoName, routing(repoName))
case SimpleCommandRegex("receive", repoName) if (pluginRepository(repoName)) =>
new PluginGitReceivePack(repoName, routing(repoName))
case DefaultCommandRegex("upload", owner, repoName) => new DefaultGitUploadPack(owner, repoName)
case DefaultCommandRegex("receive", owner, repoName) =>
new DefaultGitReceivePack(owner, repoName, baseUrl, sshUrl)
case _ => new UnknownCommand(command)
}
}
}
private def pluginRepository(repoName: String): Boolean =
PluginRegistry().getRepositoryRouting("/" + repoName).isDefined
private def routing(repoName: String): GitRepositoryRouting =
PluginRegistry().getRepositoryRouting("/" + repoName).get
}
| xuwei-k/gitbucket | src/main/scala/gitbucket/core/ssh/GitCommand.scala | Scala | apache-2.0 | 9,141 |
/*
* Copyright (C) 2017 Vincibean <Andrea Bessi>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vincibean.scala.impatient.chapter13
/**
* Write a function that turns an array of Double values into a two-
* dimensional array. Pass the number of columns as a parameter. For example,
* with Array(1, 2, 3, 4, 5, 6) and three columns, return Array(Array(1, 2, 3),
* Array(4, 5, 6)). Use the grouped method.
*/
package object exercise8 {
def nDimArray(is: Array[Int], cols: Int): Array[Array[Int]] = if (is.isEmpty && cols <= 0) {
Array.empty[Array[Int]]
} else {
is.grouped(cols).toArray
}
}
| Vincibean/ScalaForTheImpatient-Solutions | src/main/scala/org/vincibean/scala/impatient/chapter13/exercise8/package.scala | Scala | gpl-3.0 | 1,246 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.execution.datasources.ResolvedDataSource
//解析数据源测试套件
class ResolvedDataSourceSuite extends SparkFunSuite {
test("jdbc") {
assert(
//解析JDBC数据源
ResolvedDataSource.lookupDataSource("jdbc") ===
//默认使用jdbc.DefaultSource类型
classOf[org.apache.spark.sql.execution.datasources.jdbc.DefaultSource])
assert(
ResolvedDataSource.lookupDataSource("org.apache.spark.sql.execution.datasources.jdbc") ===
classOf[org.apache.spark.sql.execution.datasources.jdbc.DefaultSource])
assert(
ResolvedDataSource.lookupDataSource("org.apache.spark.sql.jdbc") ===
classOf[org.apache.spark.sql.execution.datasources.jdbc.DefaultSource])
}
test("json") {
assert(
//解析json数据源
ResolvedDataSource.lookupDataSource("json") ===
//默认使用json.DefaultSource类型
classOf[org.apache.spark.sql.execution.datasources.json.DefaultSource])
assert(
ResolvedDataSource.lookupDataSource("org.apache.spark.sql.execution.datasources.json") ===
classOf[org.apache.spark.sql.execution.datasources.json.DefaultSource])
assert(
ResolvedDataSource.lookupDataSource("org.apache.spark.sql.json") ===
classOf[org.apache.spark.sql.execution.datasources.json.DefaultSource])
}
test("parquet") {
assert(
//解析parquet数据源
ResolvedDataSource.lookupDataSource("parquet") ===
//默认使用parquet.DefaultSource类型
classOf[org.apache.spark.sql.execution.datasources.parquet.DefaultSource])
assert(
ResolvedDataSource.lookupDataSource("org.apache.spark.sql.execution.datasources.parquet") ===
classOf[org.apache.spark.sql.execution.datasources.parquet.DefaultSource])
assert(
ResolvedDataSource.lookupDataSource("org.apache.spark.sql.parquet") ===
classOf[org.apache.spark.sql.execution.datasources.parquet.DefaultSource])
}
}
| tophua/spark1.52 | sql/core/src/test/scala/org/apache/spark/sql/sources/ResolvedDataSourceSuite.scala | Scala | apache-2.0 | 2,843 |
package com.airbnb.aerosolve.demo.IncomePrediction;
import com.typesafe.config.ConfigFactory
import org.apache.spark.{SparkContext, SparkConf}
import org.slf4j.{LoggerFactory, Logger}
/*
* Runs an arbitrary job given a config resource name.
* The config must contain
* job_name : name_of_job
* jobs = [ list of jobs ]
* ... other job specific configs.
* Example command line:
* bin/spark-submit --executor-memory 8G
* --class com.airbnb.aerosolve.demo.ImageImpressionism.JobRunner
* image_impressionism-0.1.2-all.jar
* image_impressionism.conf
*/
object JobRunner {
def main(args: Array[String]): Unit = {
val log: Logger = LoggerFactory.getLogger("Job.Runner")
if (args.length < 2) {
log.error("Usage: Job.Runner config_name job1,job2...")
System.exit(-1)
}
log.info("Loading config from " + args(0))
val config = ConfigFactory.load(args(0))
val jobs : Seq[String] = args(1).split(',')
val conf = new SparkConf().setAppName("ImageImpressionism")
val sc = new SparkContext(conf)
for (job <- jobs) {
log.info("Running " + job)
try {
job match {
case "MakeTraining" => IncomePredictionPipeline
.makeExampleRun(sc, config.getConfig("make_training"))
case "MakeTesting" => IncomePredictionPipeline
.makeExampleRun(sc, config.getConfig("make_testing"))
case "TrainModel" => IncomePredictionPipeline
.trainModel(sc, config)
case "EvalTesting" => IncomePredictionPipeline
.evalModel(sc, config, "eval_testing")
case "EvalTraining" => IncomePredictionPipeline
.evalModel(sc, config, "eval_training")
case _ => log.error("Unknown job " + job)
}
} catch {
case e : Exception => log.error("Exception on job %s : %s".format(job, e.toString))
System.exit(-1)
}
}
}
}
| sagivo/aerosolve | demo/income_prediction/src/main/scala/JobRunner.scala | Scala | apache-2.0 | 1,901 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import scala.collection.mutable
import java.io.File
import java.util.regex.Pattern
import SuiteDiscoveryHelper.discoverTests
import org.scalatest.funspec.AnyFunSpec
class SuiteDiscoveryHelperFriend(sdt: SuiteDiscoveryHelper.type) {
def transformToClassName(fileName: String, fileSeparator: Char): Option[String] = {
val tranfromToClassNameMethodName =
if (ScalaTestVersions.BuiltForScalaVersion == "2.10" || ScalaTestVersions.BuiltForScalaVersion == "2.11")
"org$scalatest$tools$SuiteDiscoveryHelper$$transformToClassName"
else
"transformToClassName"
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod(tranfromToClassNameMethodName,
Array(classOf[String], classOf[Char]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](fileName, new java.lang.Character(fileSeparator)): _*).asInstanceOf[Option[String]]
}
def extractClassNames(fileNames: Iterator[String], fileSeparator: Char): Iterator[String] = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("extractClassNames",
Array(classOf[Iterator[String]], classOf[Char]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](fileNames, new java.lang.Character(fileSeparator)): _*).asInstanceOf[Iterator[String]]
}
def isAccessibleSuite(clazz: java.lang.Class[_]): Boolean = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("isAccessibleSuite",
Array(classOf[Class[_]]): _*) // This one works in 2.7
// Array(classOf[Class])) // This one works in 2.6
m.setAccessible(true)
m.invoke(sdt, Array[Object](clazz): _*).asInstanceOf[Boolean]
}
def isRunnable(clazz: java.lang.Class[_]): Boolean = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("isRunnable",
Array(classOf[Class[_]]): _*) // This one works in 2.7
m.setAccessible(true)
m.invoke(sdt, Array[Object](clazz): _*).asInstanceOf[Boolean]
}
def processFileNames(fileNames: Iterator[String], fileSeparator: Char, loader: ClassLoader, suffixes: Option[Pattern]):
Set[String] =
{
val processFileNamesMethodName =
if (ScalaTestVersions.BuiltForScalaVersion == "2.10" || ScalaTestVersions.BuiltForScalaVersion == "2.11")
"org$scalatest$tools$SuiteDiscoveryHelper$$processFileNames"
else
"processFileNames"
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod(processFileNamesMethodName,
Array(classOf[Iterator[String]], classOf[Char], classOf[ClassLoader], classOf[Option[Pattern]]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](fileNames, new java.lang.Character(fileSeparator), loader, suffixes): _*).asInstanceOf[Set[String]]
}
def getFileNamesSetFromFile(file: File, fileSeparator: Char): Set[String] = {
val getFileNamesSetFromFileMethodName =
if (ScalaTestVersions.BuiltForScalaVersion == "2.10" || ScalaTestVersions.BuiltForScalaVersion == "2.11")
"org$scalatest$tools$SuiteDiscoveryHelper$$getFileNamesSetFromFile"
else
"getFileNamesSetFromFile"
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod(getFileNamesSetFromFileMethodName,
Array(classOf[File], classOf[Char]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](file, new java.lang.Character(fileSeparator)): _*).asInstanceOf[Set[String]]
}
def isDiscoverableSuite(clazz: java.lang.Class[_]): Boolean = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("isDiscoverableSuite",
Array(classOf[Class[_]]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](clazz): _*).asInstanceOf[Boolean]
}
}
class SuiteDiscoveryHelperSpec extends AnyFunSpec {
val sdtf = new SuiteDiscoveryHelperFriend(SuiteDiscoveryHelper)
val loader = getClass.getClassLoader
val accessibleSuites =
Set(
"org.scalatest.tools.RunnerSpec",
"org.scalatest.tools.SuiteDiscoveryHelperSpec",
"org.scalatest.tools.SuiteDiscoveryHelperSpec2")
//
// Given this Suite's name and one of its test names,
// discoverTests should return a SuiteParam object for this
// Suite and the specified test.
//
it("test discover tests 1") {
val testSpecs = List(TestSpec("test discover tests 1", false))
val suiteParams = discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 1)
val suiteParam = suiteParams(0)
assert(suiteParam.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam.testNames.length === 1)
assert(suiteParam.testNames(0) === "test discover tests 1")
assert(suiteParam.wildcardTestNames.length === 0)
assert(suiteParam.nestedSuites.length === 0)
}
//
// Given two test names, where only one is found, discoverTests should
// return a SuiteParam with just the one test name.
//
it("test discover tests 2") {
val testSpecs =
List(
TestSpec("test discover tests 2", false),
TestSpec("test discover tests X", false)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 1)
val suiteParam = suiteParams(0)
assert(suiteParam.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam.testNames.length === 1)
assert(suiteParam.testNames(0) === "test discover tests 2")
assert(suiteParam.wildcardTestNames.length === 0)
assert(suiteParam.nestedSuites.length === 0)
}
//
// Given two test names, where both are found, discoverTests should
// return a SuiteParam with both test names.
//
it("test discover tests 3") {
val testSpecs =
List(
TestSpec("test discover tests 2", false),
TestSpec("test discover tests 1", false)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 1)
val suiteParam = suiteParams(0)
assert(suiteParam.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam.testNames.length === 2)
assert(suiteParam.testNames(0) === "test discover tests 1")
assert(suiteParam.testNames(1) === "test discover tests 2")
assert(suiteParam.wildcardTestNames.length === 0)
assert(suiteParam.nestedSuites.length === 0)
}
//
// Two test names, where both are in one Suite and one is in
// two Suites.
//
it("test discover tests 4") {
val testSpecs =
List(
TestSpec("test discover tests 4", false),
TestSpec("test discover tests 1", false)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 2)
val suiteParam0 = suiteParams(0)
assert(suiteParam0.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam0.testNames.length === 2)
assert(suiteParam0.testNames(0) === "test discover tests 1")
assert(suiteParam0.testNames(1) === "test discover tests 4")
assert(suiteParam0.wildcardTestNames.length === 0)
assert(suiteParam0.nestedSuites.length === 0)
val suiteParam1 = suiteParams(1)
assert(suiteParam1.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec2")
assert(suiteParam1.testNames.length === 1)
assert(suiteParam1.testNames(0) === "test discover tests 4")
assert(suiteParam1.wildcardTestNames.length === 0)
assert(suiteParam1.nestedSuites.length === 0)
}
//
// Discover tests using a substring. This should discover tests in
// two Suites.
//
it("test discover tests A1") {
val testSpecs =
List(
TestSpec("test discover tests A", true)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 2)
val suiteParam0 = suiteParams(0)
assert(suiteParam0.className ===
"org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam0.testNames.length === 0)
assert(suiteParam0.wildcardTestNames.length === 1)
assert(suiteParam0.wildcardTestNames(0) === "test discover tests A")
assert(suiteParam0.nestedSuites.length === 0)
val suiteParam1 = suiteParams(1)
assert(suiteParam1.className ===
"org.scalatest.tools.SuiteDiscoveryHelperSpec2")
assert(suiteParam1.testNames.length === 0)
assert(suiteParam1.wildcardTestNames.length === 1)
assert(suiteParam1.wildcardTestNames(0) === "test discover tests A")
assert(suiteParam1.nestedSuites.length === 0)
}
it("test transform to class name") {
assert(sdtf.transformToClassName("bob.class", '/') === Some("bob"))
assert(sdtf.transformToClassName("a.b.c.bob.class", '/') === Some("a.b.c.bob"))
assert(sdtf.transformToClassName("a.b.c.bob", '/') === None)
assert(sdtf.transformToClassName("", '/') === None)
assert(sdtf.transformToClassName("notdotclass", '/') === None)
assert(sdtf.transformToClassName(".class", '/') === None)
assert(sdtf.transformToClassName("a/b/c/bob.class", '/') === Some("a.b.c.bob"))
assert(sdtf.transformToClassName("a/b/c/bob", '/') === None)
assert(sdtf.transformToClassName("/.class", '/') === None)
assert(sdtf.transformToClassName("..class", '/') === Some("."))
assert(sdtf.transformToClassName("a\\\\b\\\\c\\\\bob.class", '\\\\') === Some("a.b.c.bob"))
assert(sdtf.transformToClassName("a\\\\b\\\\c\\\\bob", '\\\\') === None)
assert(sdtf.transformToClassName("\\\\.class", '\\\\') === None)
}
it("test is accessible suite") {
assert(sdtf.isAccessibleSuite(classOf[SuiteDiscoveryHelperSpec]))
assert(!sdtf.isAccessibleSuite(classOf[PackageAccessSuite]))
assert(!sdtf.isAccessibleSuite(classOf[PackageAccessConstructorSuite]))
assert(!sdtf.isAccessibleSuite(classOf[Suite]))
assert(!sdtf.isAccessibleSuite(classOf[Object]))
}
it("test extract class names") {
assert(sdtf.extractClassNames(List("bob.class").iterator, '/').toList === List("bob"))
assert(sdtf.extractClassNames(List("bob.class", "manifest.txt", "a/b/c/bob.class").iterator, '/').toList === List("bob", "a.b.c.bob"))
assert(sdtf.extractClassNames(List("bob.class", "manifest.txt", "a\\\\b\\\\c\\\\bob.class").iterator, '\\\\').toList === List("bob", "a.b.c.bob"))
assert(sdtf.extractClassNames(List("bob.class", "manifest.txt", "/a/b/c/bob.class").iterator, '/').toList === List("bob", "a.b.c.bob"))
}
it("test process file names") {
val loader = getClass.getClassLoader
val discoveredSet1 = sdtf.processFileNames(List("doesNotExist.txt", "noSuchfile.class").iterator, '/', loader, None)
assert(discoveredSet1.isEmpty)
val discoveredSet2 = sdtf.processFileNames(List("org/scalatest/EasySuite.class", "noSuchfile.class", "org/scalatest/FastAsLight.class").iterator, '/', loader, None)
assert(discoveredSet2 === Set("org.scalatest.EasySuite"))
val fileNames3 =
List(
"org/scalatest/EasySuite.class",
"org/scalatest/RunnerSuite.class",
"org/scalatest/SlowAsMolasses.class",
"org/scalatest/SuiteSuite.class",
"noSuchfile.class",
"org/scalatest/FastAsLight.class"
)
val classNames3 =
Set(
"org.scalatest.EasySuite",
// "org.scalatest.RunnerSuite", dropped this when moved RunnerSuite to tools
"org.scalatest.SuiteSuite"
)
val discoveredSet3 = sdtf.processFileNames(fileNames3.iterator, '/', loader, None)
assert(discoveredSet3 === classNames3)
// Test with backslashes
val fileNames4 =
List(
"org\\\\scalatest\\\\EasySuite.class",
"org\\\\scalatest\\\\RunnerSuite.class",
"org\\\\scalatest\\\\SlowAsMolasses.class",
"org\\\\scalatest\\\\SuiteSuite.class",
"noSuchfile.class",
"org\\\\scalatest\\\\FastAsLight.class"
)
val discoveredSet4 = sdtf.processFileNames(fileNames4.iterator, '\\\\', loader, None)
assert(discoveredSet4 === classNames3)
// Test with leading slashes
val fileNames5 =
List(
"/org/scalatest/EasySuite.class",
"/org/scalatest/RunnerSuite.class",
"/org/scalatest/SlowAsMolasses.class",
"/org/scalatest/SuiteSuite.class",
"/noSuchfile.class",
"/org/scalatest/FastAsLight.class"
)
val discoveredSet5 = sdtf.processFileNames(fileNames5.iterator, '/', loader, None)
assert(discoveredSet5 === classNames3)
// Test for specified suffixes only
val fileNames6 =
List(
"/org/scalatest/EasySuite.class",
"/org/scalatest/RunnerSuite.class",
"/org/scalatest/SlowAsMolasses.class",
"/org/scalatest/SuiteSuite.class",
"/org/scalatest/FilterSpec.class",
"/noSuchfile.class",
"/org/scalatest/FastAsLight.class"
)
val classNames4 =
Set(
"org.scalatest.EasySuite",
"org.scalatest.SuiteSuite",
"org.scalatest.FilterSpec"
)
val discoveredSet6 = sdtf.processFileNames(fileNames6.iterator, '/', loader, Some(Pattern.compile(".*(Suite)$")))
assert(discoveredSet6 === classNames3)
val discoveredSet7 = sdtf.processFileNames(fileNames6.iterator, '/', loader, Some(Pattern.compile(".*(Spec|Suite)$")))
assert(discoveredSet7 === classNames4)
}
it("test get file names set from file") {
assert(sdtf.getFileNamesSetFromFile(new File("harness/fnIteratorTest/empty.txt"), '/') === Set("empty.txt"))
/*
This one doesn't work now that I've checked the harness into subversion, because it finds the svn files.
So I need to first copy just the files I want somewhere, then run this.
assert(sdtf.getFileNamesSetFromFile(new File("harness/fnIteratorTest"), '/') === Set("subDir2/inSubDir2.class",
"subDir2/subSubDir/inSubSubDir.class", "empty.txt", "empty.class", "subDir1/inSubDir1.class"))
*/
}
it("test is discoverable suite") {
assert(sdtf.isDiscoverableSuite(classOf[SuiteDiscoveryHelperSpec]))
@DoNotDiscover class NotDiscoverable {}
assert(!sdtf.isDiscoverableSuite(classOf[NotDiscoverable]))
}
it("test is runnable") {
class NormalClass {}
class SuiteClass extends Suite
@WrapWith(classOf[SuiteClass])
class AnnotateDefaultConstructor
class WrongSuiteClass(testValue: String) extends Suite
@WrapWith(classOf[WrongSuiteClass])
class AnnotateWrongConstructor
assert(!sdtf.isRunnable(classOf[NormalClass]))
assert(!sdtf.isRunnable(classOf[SuiteClass]))
assert(!sdtf.isRunnable(classOf[AnnotateDefaultConstructor]))
assert(!sdtf.isRunnable(classOf[AnnotateWrongConstructor]))
assert(sdtf.isRunnable(classOf[SomeApiClass]))
assert(sdtf.isRunnable(classOf[SomeApiSubClass]))
}
}
//
// This class is just used by tests in SuiteDiscoveryHelperSpec
// for testing Suite discovery by test name.
//
class SuiteDiscoveryHelperSpec2 extends AnyFunSpec {
it("test discover tests 4") {
}
it("test discover tests A2") {
}
it("test discover tests A3") {
}
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/tools/SuiteDiscoveryHelperSuite.scala | Scala | apache-2.0 | 15,712 |
// scalac: -Xsource:2.13 -deprecation -Xfatal-warnings
//
// bug 989
'42'
// SI-10133
'''
'';
// SI-10120
'abc'
'utf_8'
'utf-8'
'
| martijnhoekstra/scala | test/files/neg/badtok-1.scala | Scala | apache-2.0 | 137 |
/*
* Copyright 2016 Scalalaz Podcast Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.scalalaz.gen.writers
import ru.scalalaz.gen.{Episode, EpisodeFile}
import knockoff.DefaultDiscounter.{knockoff, toXHTML}
import scalatags.Text.TypedTag
import scalatags.Text.all.*
import java.nio.file.{Files, Paths}
case class ITunesInfo(
title: String,
link: String,
description: String,
language: String,
atomLink: String,
imageLink: String,
ownerName: String,
email: String,
category: String
)
object ITunesInfo {
val Scalalaz = {
val url = "https://scalalaz.ru"
ITunesInfo(
title = "Scalalaz Podcast",
link = url,
description = "Подкаст о программировании на языке Scala (16+)",
language = "ru-RU",
atomLink = s"$url/rss/feed.xml",
imageLink = s"$url/files/scalalaz.jpg",
ownerName = "Scalalaz Podcast",
email = "info@scalalaz.ru",
category = "Technology"
)
}
}
class RSSWriter(dir: String, iTunesInfo: ITunesInfo) {
def write(episodes: Seq[EpisodeFile]): Unit = {
val xml = toXML(episodes.sortBy(_.episode.settings.title))
val path = Paths.get(dir, "feed.xml")
Files.write(path, xml.getBytes)
}
private def toXML(episodes: Seq[EpisodeFile]): String = {
import iTunesInfo._
val head = "<?xml version=\\"1.0\\" encoding=\\"UTF-8\\"?>\\n"
val ep: Seq[Either[String, TypedTag[String]]] = episodes.map { e =>
if (e.episode.settings.audio.url != "")
Right(toItem(e.episode))
else Left("episode doesnt'has audio")
}
val xml = tag("rss")(
attr("version") := "2.0",
attr("xmlns:itunes") := "http://www.itunes.com/dtds/podcast-1.0.dtd",
attr("xmlns:atom") := "http://www.w3.org/2005/Atom"
)(
tag("channel")(
tag("title")(title),
tag("link")(link),
tag("description")(description),
tag("language")(language),
raw(
s"""<atom:link href="$atomLink" rel="self" type="application/rss+xml" />"""
),
raw(s"""<itunes:author>$ownerName</itunes:author>"""),
raw(s"""<itunes:image href="$imageLink" />"""),
raw(s"""<itunes:owner>
| <itunes:name>$ownerName</itunes:name>
| <itunes:email>$email</itunes:email>
|</itunes:owner>
""".stripMargin),
raw(s"""<itunes:category text="$category" />"""),
raw(s"""<itunes:explicit>no</itunes:explicit>"""),
tag("managingEditor")(s"$email ($ownerName)"),
ep.filter(_.isRight).map(_.getOrElse(throw new Exception(s"toXML call failed for the following files: ${episodes.map(_.path)}")))
)
)
head + xml.toString()
}
private def toItem(e: Episode): TypedTag[String] = {
import e.settings._
tag("item")(
tag("title")(title),
raw(s"""<description>
|<![CDATA[<pre>
|${toXHTML(knockoff(e.content)).mkString}
|</pre>]]>
|</description>""".stripMargin),
tag("enclosure")(attr("url") := audio.url, attr("type") := audio.`type`, attr("length") := audio.length),
tag("guid")(attr("isPermaLink") := "false", page),
tag("pubDate")(RFCDate),
tag("link")(page)
)
}
}
| scalalaz-podcast/scalalaz-gen | src/main/scala/ru/scalalaz/gen/writers/RSSWriter.scala | Scala | apache-2.0 | 3,822 |
package di
import javax.inject.{Provider, Singleton}
import slick.jdbc.JdbcProfile
@Singleton
class JdbcProfileProvider extends Provider[JdbcProfile] {
override lazy val get = slick.jdbc.PostgresProfile
}
| THK-ADV/lwm-reloaded | app/di/JdbcProfileProvider.scala | Scala | mit | 209 |
package models.Services
import java.util.UUID
import javax.inject.Inject
import akka.actor.Status.Success
import models.DAOs.{ProductDAO, UserDAO}
import models.User
import utils.Base64
import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
/**
* Created by carlos on 16/10/15.
*/
class UserService @Inject()(userDAO: UserDAO) {
def findListUser(): Future[Seq[User]] = {
userDAO.list
}
def addUser(user: User): Future[Option[User]] = {
val newUser = user.copy(password = Base64.encodeString(user.password))
userDAO.insert(newUser).map { uuid =>
val finishedUser = newUser.copy(id = Option(uuid))
Option(finishedUser)
}
}
def findUser(id: UUID): Future[Option[User]] = {
userDAO.findByID(id)
}
def updateUSer(user: User): Future[Int] = {
val newUser = user.copy(password = Base64.encodeString(user.password))
userDAO.update(user.id.get, newUser)
}
def removeUser(id: UUID): Future[Int] = {
userDAO.delete(id)
}
def findUserByEmail(email: String): Future[Option[User]] = {
userDAO.findByEmail(email)
}
def validateUser(email: String, password: String): Future[Option[User]] = {
userDAO.findByEmail(email).map { user =>
user.filter(u => u.password == (Base64.encodeString(password)))
}
}
def deleteUser(id: UUID) = {
userDAO.delete(id)
}
}
| carlosFattor/DoceTentacaoSlick | app/models/Services/UserService.scala | Scala | apache-2.0 | 1,376 |
/**
* Copyright (C) 2012 Inria, University Lille 1.
*
* This file is part of PowerAPI.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI. If not, see <http://www.gnu.org/licenses/>.
*
* Contact: powerapi-user-list@googlegroups.com.
*/
package fr.inria.powerapi.formula.disk.single
import scala.concurrent.duration.DurationInt
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import org.scalatest.junit.ShouldMatchersForJUnit
import akka.actor.ActorSystem
import akka.testkit.TestActorRef
import fr.inria.powerapi.core.Energy
import fr.inria.powerapi.core.Process
import fr.inria.powerapi.core.Tick
import fr.inria.powerapi.core.TickSubscription
import fr.inria.powerapi.formula.disk.api.DiskFormulaMessage
import fr.inria.powerapi.sensor.disk.api.DiskSensorMessage
class DiskFormulaSuite extends JUnitSuite with ShouldMatchersForJUnit {
implicit val system = ActorSystem("DiskFormulaSuiteSystem")
val diskFormula = TestActorRef[DiskFormula].underlyingActor
val megaByte = 1000000.0
@Test
def testReadPower() {
diskFormula.readPower should equal(2.1)
}
@Test
def testWritePower() {
diskFormula.writePower should equal(2.2)
}
@Test
def testReadRate() {
diskFormula.readRate should equal(100 * megaByte)
}
@Test
def testWriteRate() {
diskFormula.writeRate should equal(90 * megaByte)
}
@Test
def testRefreshCache() {
val old = DiskSensorMessage(rw = Map("n/a" -> (123: Long, 456: Long)), Tick(TickSubscription(Process(123), 500.milliseconds)))
diskFormula.refreshCache(old)
diskFormula.cache getOrElse (TickSubscription(Process(123), 500.milliseconds), null) should equal(old)
}
@Test
def testReadEnergyByByte() {
diskFormula.readEnergyByByte should equal(2.1 / (100 * megaByte))
}
@Test
def testWriteEnergyByByte() {
diskFormula.writeEnergyByByte should equal(2.2 / (90 * megaByte))
}
@Test
def testPower() {
val duration = 500.milliseconds
val old = DiskSensorMessage(rw = Map("n/a" -> (100: Long, 200: Long)), Tick(TickSubscription(Process(123), duration)))
val now = DiskSensorMessage(rw = Map("n/a" -> (500: Long, 400: Long)), Tick(TickSubscription(Process(123), duration)))
diskFormula.power(now, old) should equal(Energy.fromJoule(((500 - 100) * diskFormula.readEnergyByByte + (400 - 200) * diskFormula.writeEnergyByByte), duration))
}
@Test
def testCompute() {
val duration = 500.milliseconds
val tick = Tick(TickSubscription(Process(123), duration))
val old = DiskSensorMessage(rw = Map("n/a" -> (100: Long, 200: Long)), tick)
diskFormula.refreshCache(old)
val now = DiskSensorMessage(rw = Map("n/a" -> (500: Long, 400: Long)), tick)
diskFormula.compute(now) should equal(DiskFormulaMessage(Energy.fromJoule(((500 - 100) * diskFormula.readEnergyByByte + (400 - 200) * diskFormula.writeEnergyByByte), duration), tick))
}
} | abourdon/powerapi-akka | formulae/formula-disk-single/src/test/scala/fr/inria/powerapi/formula/disk/single/DiskFormulaSuite.scala | Scala | agpl-3.0 | 3,458 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.util.Random
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasSeed
import org.apache.spark.ml.util._
import org.apache.spark.sql.types.StructType
/**
* :: Experimental ::
*
* Model produced by [[MinHashLSH]], where multiple hash functions are stored. Each hash function
* is picked from the following family of hash functions, where a_i and b_i are randomly chosen
* integers less than prime:
* `h_i(x) = ((x \cdot a_i + b_i) \mod prime)`
*
* This hash family is approximately min-wise independent according to the reference.
*
* Reference:
* Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear permutations."
* Electronic Journal of Combinatorics 7 (2000): R26.
*
* @param randCoefficients Pairs of random coefficients. Each pair is used by one hash function.
*/
@Experimental
@Since("2.1.0")
class MinHashLSHModel private[ml](
override val uid: String,
private[ml] val randCoefficients: Array[(Int, Int)])
extends LSHModel[MinHashLSHModel] {
@Since("2.1.0")
override protected[ml] val hashFunction: Vector => Array[Vector] = {
elems: Vector => {
require(elems.numNonzeros > 0, "Must have at least 1 non zero entry.")
val elemsList = elems.toSparse.indices.toList
val hashValues = randCoefficients.map { case (a, b) =>
elemsList.map { elem: Int =>
((1 + elem) * a + b) % MinHashLSH.HASH_PRIME
}.min.toDouble
}
// TODO: Output vectors of dimension numHashFunctions in SPARK-18450
hashValues.map(Vectors.dense(_))
}
}
@Since("2.1.0")
override protected[ml] def keyDistance(x: Vector, y: Vector): Double = {
val xSet = x.toSparse.indices.toSet
val ySet = y.toSparse.indices.toSet
val intersectionSize = xSet.intersect(ySet).size.toDouble
val unionSize = xSet.size + ySet.size - intersectionSize
assert(unionSize > 0, "The union of two input sets must have at least 1 elements")
1 - intersectionSize / unionSize
}
@Since("2.1.0")
override protected[ml] def hashDistance(x: Seq[Vector], y: Seq[Vector]): Double = {
// Since it's generated by hashing, it will be a pair of dense vectors.
// TODO: This hashDistance function requires more discussion in SPARK-18454
x.zip(y).map(vectorPair =>
vectorPair._1.toArray.zip(vectorPair._2.toArray).count(pair => pair._1 != pair._2)
).min
}
@Since("2.1.0")
override def copy(extra: ParamMap): MinHashLSHModel = {
val copied = new MinHashLSHModel(uid, randCoefficients).setParent(parent)
copyValues(copied, extra)
}
@Since("2.1.0")
override def write: MLWriter = new MinHashLSHModel.MinHashLSHModelWriter(this)
}
/**
* :: Experimental ::
*
* LSH class for Jaccard distance.
*
* The input can be dense or sparse vectors, but it is more efficient if it is sparse. For example,
* `Vectors.sparse(10, Array((2, 1.0), (3, 1.0), (5, 1.0)))`
* means there are 10 elements in the space. This set contains elements 2, 3, and 5. Also, any
* input vector must have at least 1 non-zero index, and all non-zero values are
* treated as binary "1" values.
*
* References:
* <a href="https://en.wikipedia.org/wiki/MinHash">Wikipedia on MinHash</a>
*/
@Experimental
@Since("2.1.0")
class MinHashLSH(override val uid: String) extends LSH[MinHashLSHModel] with HasSeed {
@Since("2.1.0")
override def setInputCol(value: String): this.type = super.setInputCol(value)
@Since("2.1.0")
override def setOutputCol(value: String): this.type = super.setOutputCol(value)
@Since("2.1.0")
override def setNumHashTables(value: Int): this.type = super.setNumHashTables(value)
@Since("2.1.0")
def this() = {
this(Identifiable.randomUID("mh-lsh"))
}
/** @group setParam */
@Since("2.1.0")
def setSeed(value: Long): this.type = set(seed, value)
@Since("2.1.0")
override protected[ml] def createRawLSHModel(inputDim: Int): MinHashLSHModel = {
require(inputDim <= MinHashLSH.HASH_PRIME,
s"The input vector dimension $inputDim exceeds the threshold ${MinHashLSH.HASH_PRIME}.")
val rand = new Random($(seed))
val randCoefs: Array[(Int, Int)] = Array.fill($(numHashTables)) {
(1 + rand.nextInt(MinHashLSH.HASH_PRIME - 1), rand.nextInt(MinHashLSH.HASH_PRIME - 1))
}
new MinHashLSHModel(uid, randCoefs)
}
@Since("2.1.0")
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(inputCol), new VectorUDT)
validateAndTransformSchema(schema)
}
@Since("2.1.0")
override def copy(extra: ParamMap): this.type = defaultCopy(extra)
}
@Since("2.1.0")
object MinHashLSH extends DefaultParamsReadable[MinHashLSH] {
// A large prime smaller than sqrt(2^63 − 1)
private[ml] val HASH_PRIME = 2038074743
@Since("2.1.0")
override def load(path: String): MinHashLSH = super.load(path)
}
@Since("2.1.0")
object MinHashLSHModel extends MLReadable[MinHashLSHModel] {
@Since("2.1.0")
override def read: MLReader[MinHashLSHModel] = new MinHashLSHModelReader
@Since("2.1.0")
override def load(path: String): MinHashLSHModel = super.load(path)
private[MinHashLSHModel] class MinHashLSHModelWriter(instance: MinHashLSHModel)
extends MLWriter {
private case class Data(randCoefficients: Array[Int])
override protected def saveImpl(path: String): Unit = {
DefaultParamsWriter.saveMetadata(instance, path, sc)
val data = Data(instance.randCoefficients.flatMap(tuple => Array(tuple._1, tuple._2)))
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class MinHashLSHModelReader extends MLReader[MinHashLSHModel] {
/** Checked against metadata when loading model */
private val className = classOf[MinHashLSHModel].getName
override def load(path: String): MinHashLSHModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath).select("randCoefficients").head()
val randCoefficients = data.getAs[Seq[Int]](0).grouped(2)
.map(tuple => (tuple(0), tuple(1))).toArray
val model = new MinHashLSHModel(metadata.uid, randCoefficients)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
}
| akopich/spark | mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala | Scala | apache-2.0 | 7,404 |
package protocol.utils
import java.util.UUID
import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{Await, Future}
import akka.stream.Materializer
import akka.stream.scaladsl._
import akka.stream.testkit._
import akka.stream.testkit.scaladsl._
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.headers.CacheDirectives._
import akka.http.scaladsl.model.HttpMethods._
import akka.util.ByteString
import org.scalactic.source.Position
import org.scalatest._
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
import play.api.libs.json._
trait TestHelpers extends AnyWordSpec with Matchers with OptionValues { self: TestServer with TestClient =>
def sleep(duration: FiniteDuration): Unit = {
val scheduler = as.scheduler
val dispatcher = as.dispatcher
val f = akka.pattern.after(duration, scheduler)(Future.successful(()))(dispatcher)
Await.ready(f, Duration.Inf)
}
def session(serverId: String = "000") = "/" + serverId + "/" + UUID.randomUUID().toString
implicit class Verifiers(val result: HttpResponse) {
def verify200()(implicit pos: Position) = result.status mustEqual OK
def verify204()(implicit pos: Position) = result.status mustEqual NoContent
def verify304()(implicit pos: Position) = result.status mustEqual NotModified
def verify404()(implicit pos: Position) = result.status mustEqual NotFound
def verify405()(implicit pos: Position) = {
result.status mustEqual MethodNotAllowed
result.header[`Content-Type`] mustBe None
result.header[Allow] mustBe a[Some[_]]
body mustBe empty
}
def verify500()(implicit pos: Position) = result.status mustEqual InternalServerError
def verifyTextPlain()(implicit pos: Position) = verifyMediaType(MediaTypes.`text/plain`)
def verifyTextHtml()(implicit pos: Position) = verifyMediaType(MediaTypes.`text/html`)
def verifyApplicationJavascript()(implicit pos: Position) = verifyMediaType(MediaTypes.`application/javascript`)
def verifyMediaType(mtype: MediaType)(implicit pos: Position) = {
result.entity.contentType.mediaType mustBe mtype
result.entity.contentType.charsetOption mustBe Some(HttpCharsets.`UTF-8`)
}
def verifyNoContentType()(implicit pos: Position) = {
result.entity.contentType mustBe ContentTypes.NoContentType
}
def verifyNoCookie()(implicit pos: Position) = result.header[`Set-Cookie`] mustBe None
def verifyCookie(value: String)(implicit pos: Position) = {
val Some(cookie) = result.header[`Set-Cookie`].map(_.cookie)
cookie.name mustEqual "JSESSIONID"
cookie.value mustEqual value
cookie.path mustEqual Some("/")
}
def verifyCORS(origin: Option[String])(implicit pos: Position) = origin match {
case Some(value) =>
// I have to look for the header manually because origin could be an invalid
// value and akka-http will fail to parse it
result.headers.find(_.is("access-control-allow-origin")).map(_.value) mustBe Some(value)
result.header[`Access-Control-Allow-Credentials`] mustBe Some(`Access-Control-Allow-Credentials`(true))
case _ =>
result.header[`Access-Control-Allow-Origin`].map(_.range) mustBe Some(HttpOriginRange.*)
result.header[`Access-Control-Allow-Credentials`] mustBe None
}
def verifyNotCached()(implicit pos: Position) = {
result.header[`Cache-Control`] mustBe Some(`Cache-Control`(`no-store`, `no-cache`, `no-transform`, `must-revalidate`, `max-age`(0)))
result.header[Expires] mustBe None
result.header[`Last-Modified`] mustBe None
}
def verifyIFrame()(implicit pos: Position) = {
verify200()
verifyTextHtml()
result.header[`Cache-Control`].map(_.value).orNull must include("public")
result.header[`Cache-Control`].map(_.value).orNull must include regex "max-age=[1-9][0-9]{6}"
result.header[Expires] mustBe a[Some[_]]
result.header[ETag] mustBe a[Some[_]]
result.header[`Last-Modified`] mustBe None
//TODO: check body
verifyNoCookie()
discardBody()
}
def verifyOpenFrame()(implicit pos: Position) = {
verify200()
body mustBe "o\\n"
}
def json: JsValue = Json.parse(body)
def body: String = {
val bytes = result.entity.dataBytes.runFold(ByteString.empty)(_ ++ _)
Await.result(bytes, 5.seconds).utf8String
}
def discardBody(): Unit = {
Await.result(result.discardEntityBytes().future(), 5.seconds)
}
def stream(delimiter: String)(implicit mat: Materializer): TestSubscriber.Probe[String] = {
result.entity.dataBytes
.via(Framing.delimiter(ByteString(delimiter), Int.MaxValue))
.map(_.utf8String + delimiter)
.runWith(TestSink.probe[String])
}
def cancel(): Unit = stream("\\n").cancel()
}
def verifyOptions(url: String, allowedMethods: String)(implicit pos: Position) = {
for (origin <- List("test", "null")) {
val r = http(HttpRequest(OPTIONS, uri = url, headers = List(RawHeader("Origin", origin))))
r.status must (be(OK) or be(NoContent))
r.header[`Cache-Control`].map(_.value).orNull must include("public")
r.header[`Cache-Control`].map(_.value).orNull must include regex "max-age=[1-9][0-9]{6}"
r.header[Expires] mustBe a[Some[_]]
r.header[`Access-Control-Max-Age`].map(_.value.toInt).getOrElse(0) must be > 1000000
r.body mustBe empty
//TODO: test allowed methods
r.verifyCORS(Some(origin))
}
}
}
| fdimuccio/play2-sockjs | src/test/scala/protocol/utils/TestHelpers.scala | Scala | apache-2.0 | 5,664 |
package org.tmoerman.lab
import com.holdenkarau.spark.testing.SharedSparkContext
import org.apache.spark.mllib.linalg.SparseMatrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.scalatest.{FlatSpec, Matchers}
import org.tmoerman.plongeur.test.TestResources
import org.tmoerman.plongeur.util.RDDFunctions._
/**
* @author Thomas Moerman
*/
class MLLab extends FlatSpec with Matchers with SharedSparkContext with TestResources {
behavior of "columnSimilarities"
it should "compute similarities between the dimensions" in {
val sims = new RowMatrix(circle1kRDD.map(_.features)).columnSimilarities()
}
behavior of "distance matrix"
it should "be computable" in {
val in = circle1kRDD.distinctComboSets
println(in.take(3).mkString("\\n"))
}
import org.apache.spark.mllib.linalg.BreezeConversions._
behavior of "matrix power"
it should "compute matrix power" ignore {
val diagonal = Seq(3.0, 5.0, 7.0).zipWithIndex.map{ case (v, i) => (i, i, v) }
val m = SparseMatrix.fromCOO(3, 3, diagonal).toBreeze
println(m :^ 0.5)
}
} | tmoerman/plongeur | scala/plongeur-spark/src/test/scala/org/tmoerman/lab/MLLab.scala | Scala | mit | 1,100 |
class A
class C
class B extends C
class Z[+T]
def goo[A, BB >: A](x: A): Z[BB] = new Z[BB]
val zzzzzz : Z[C] = /*start*/goo(new B)/*end*/
//Z[B] | LPTK/intellij-scala | testdata/typeInference/bugs5/SCL4095B.scala | Scala | apache-2.0 | 144 |
package com.sksamuel.elastic4s.search.aggs
import com.sksamuel.elastic4s.testkit.ElasticSugar
import org.scalatest.{FreeSpec, Matchers}
abstract class AbstractAggregationTest extends FreeSpec with Matchers with ElasticSugar {
client.execute {
createIndex("aggregations") mappings {
mapping("breakingbad") fields(
keywordField("job"),
keywordField("actor")
)
}
}.await
client.execute(
bulk(
indexInto("aggregations/breakingbad") fields("name" -> "walter white", "job" -> "meth kingpin", "age" -> 50, "actor" -> "bryan"),
indexInto("aggregations/breakingbad") fields("name" -> "hank schrader", "job" -> "dea agent", "age" -> 55, "actor" -> "dean"),
indexInto("aggregations/breakingbad") fields("name" -> "jesse pinkman", "job" -> "meth sidekick", "age" -> 30),
indexInto("aggregations/breakingbad") fields("name" -> "gus fring", "job" -> "meth kingpin", "age" -> 60),
indexInto("aggregations/breakingbad") fields("name" -> "steven gomez", "job" -> "dea agent", "age" -> 50),
indexInto("aggregations/breakingbad") fields("name" -> "saul goodman", "job" -> "lawyer", "age" -> 55),
indexInto("aggregations/breakingbad") fields("name" -> "Huell Babineaux", "job" -> "heavy", "age" -> 43, "actor" -> "lavell"),
indexInto("aggregations/breakingbad") fields("name" -> "mike ehrmantraut", "job" -> "heavy", "age" -> 45),
indexInto("aggregations/breakingbad") fields("name" -> "lydia rodarte quayle", "job" -> "meth sidekick", "age" -> 40),
indexInto("aggregations/breakingbad") fields("name" -> "todd alquist", "job" -> "meth sidekick", "age" -> 26)
)
).await
refresh("aggregations")
blockUntilCount(10, "aggregations")
}
| tyth/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/aggs/AbstractAggregationTest.scala | Scala | apache-2.0 | 1,733 |
package p
class D {
def ok() = ()
}
| scala/scala | test/files/pos/t7232f/q.scala | Scala | apache-2.0 | 40 |
/**
* Flexmojos is a set of maven goals to allow maven users to compile, optimize and test Flex SWF, Flex SWC, Air SWF and Air SWC.
* Copyright (C) 2008-2012 Marvin Froeder <marvin@flexmojos.net>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.flexmojos.oss.tests.matcher
import apparat.abc._
import apparat.swf._
import java.io.{
File => JFile
}
import org.hamcrest.{ TypeSafeMatcher, Description, Matcher }
object ClassMatcher {
def hasClass(classname: String): Matcher[JFile] = {
/*return*/ new ClassMatcher(classname)
}
}
class ClassMatcher(classnames: String*) extends TypeSafeMatcher[JFile] {
private var fileTested: JFile = null
private val found = new StringBuilder()
def matchesSafely(file: JFile): Boolean = {
fileTested = file
for {
tag <- Swf fromFile file
abc <- Abc fromTag tag
nominal <- abc.types
} {
val foundName = nominal.name.namespace.name.name + ":" + nominal.name.name.name
if (classnames contains foundName) {
return true
} else {
found append foundName
found append ", "
}
}
false
}
def describeTo(desc: Description): Unit = {
desc.appendText(" contains ")
for (classname <- classnames) {
desc.appendValue(classname)
desc.appendText(", ")
}
desc.appendValue(" instead found ")
desc.appendText(found.toString())
}
} | chrisdutz/flexmojos | flexmojos-testing/flexmojos-test-harness/src/test/scala/net/flexmojos/oss/tests/matcher/ClassMatcher.scala | Scala | gpl-2.0 | 2,004 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.io
import java.io._
import scala.language.implicitConversions
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.utils.AssertThrows._
trait CommonStreamsTests {
def mkStream(seq: Seq[Int]): InputStream
private val length = 50
private def newStream: InputStream = mkStream(1 to length)
private implicit def seqToArray(seq: Seq[Int]): Array[Byte] =
seq.toArray.map(_.toByte)
@Test def should_provide_read()(): Unit = {
val stream = newStream
for (i <- 1 to length)
assertEquals(i, stream.read())
for (_ <- 1 to 5)
assertEquals(-1, stream.read())
}
@Test def should_provide_read_from_buf(): Unit = {
val stream = newStream
val buf = new Array[Byte](10)
assertEquals(10, stream.read(buf))
assertArrayEquals(1 to 10, buf)
assertEquals(35L, stream.skip(35))
assertEquals(5, stream.read(buf))
assertArrayEquals((46 to 50) ++ (6 to 10), buf)
assertEquals(-1, stream.read(buf))
assertEquals(-1, stream.read())
}
@Test def should_provide_full_argument_read(): Unit = {
val stream = newStream
val buf = new Array[Byte](20)
assertEquals(5, stream.read(buf, 10, 5))
assertArrayEquals(Seq.fill(10)(0) ++ (1 to 5) ++ Seq.fill(5)(0), buf)
assertEquals(20, stream.read(buf, 0, 20))
assertArrayEquals(6 to 25, buf)
assertEquals(0, stream.read(buf, 10, 0))
assertArrayEquals(6 to 25, buf)
expectThrows(classOf[IndexOutOfBoundsException], stream.read(buf, -1, 0))
expectThrows(classOf[IndexOutOfBoundsException], stream.read(buf, 0, -1))
expectThrows(classOf[IndexOutOfBoundsException], stream.read(buf, 100, 0))
expectThrows(classOf[IndexOutOfBoundsException], stream.read(buf, 10, 100))
assertArrayEquals(6 to 25, buf)
assertEquals(20L, stream.skip(20))
assertEquals(5, stream.read(buf, 0, 10))
assertArrayEquals((46 to 50) ++ (11 to 25), buf)
assertEquals(-1, stream.read(buf, 0, 10))
assertArrayEquals((46 to 50) ++ (11 to 25), buf)
}
@Test def should_provide_available(): Unit = {
val stream = newStream
def mySkip(n: Int) = for (_ <- 1 to n) assertNotEquals(stream.read(), -1)
def check(n: Int) = assertEquals(n, stream.available)
check(50)
mySkip(5)
check(45)
assertEquals(10L, stream.skip(10))
check(35)
mySkip(30)
check(5)
assertEquals(5L, stream.skip(20))
check(0)
}
@Test def should_provide_skip(): Unit = {
val stream = newStream
assertEquals(7L, stream.skip(7))
for (i <- 8 to 32)
assertEquals(i, stream.read())
assertEquals(0L, stream.skip(0))
assertEquals(33, stream.read())
assertEquals(0L, stream.skip(-4))
assertEquals(34, stream.read())
assertEquals(16L, stream.skip(30))
assertEquals(0L, stream.skip(30))
}
@Test def should_return_true_from_markSupported(): Unit = {
assertTrue(newStream.markSupported)
}
@Test def should_provide_no_op_close(): Unit = {
val stream = newStream
for (i <- 1 to length) {
stream.close()
assertEquals(i, stream.read())
}
}
@Test def should_provide_mark_and_reset(): Unit = {
val stream = newStream
def read(range: Range) = for (i <- range) assertEquals(i, stream.read())
read(1 to 10)
stream.reset() // mark must be 0 at creation
read(1 to 5)
stream.mark(length)
read(6 to 22)
stream.reset()
read(6 to 20)
stream.reset()
read(6 to 25)
stream.reset()
assertEquals(40L, stream.skip(40))
stream.mark(length)
read(46 to 50)
stream.reset()
read(46 to 50)
stream.mark(length)
assertEquals(-1, stream.read())
stream.reset()
assertEquals(-1, stream.read())
}
@Test def should_return_positive_integers_when_calling_read(): Unit = {
val stream = mkStream(Seq(-1, -2, -3))
assertEquals(255, stream.read())
assertEquals(254, stream.read())
assertEquals(253, stream.read())
assertEquals(-1, stream.read())
}
}
| nicolasstucki/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/io/CommonStreamsTests.scala | Scala | apache-2.0 | 4,306 |
package edu.gemini.seqexec.server
/**
* Created by jluhrs on 5/18/15.
*/
sealed trait SeqexecFailure
object SeqexecFailure {
/** Seqexec does not know how to deal with instrument in sequence. */
case class UnrecognizedInstrument(name: String) extends SeqexecFailure
/** Something went wrong while running a sequence. **/
case class Execution(errMsg: String) extends SeqexecFailure
/** Exception thrown while running a sequence. */
case class SeqexecException(ex: Throwable) extends SeqexecFailure
/** Invalid operation on a Sequence */
case class InvalidOp(errMsg: String) extends SeqexecFailure
/** Indicates an unexpected problem while performing a Seqexec operation. */
case class Unexpected(msg: String) extends SeqexecFailure
/** Timeout */
case class Timeout(msg: String) extends SeqexecFailure
def explain(f: SeqexecFailure): String = f match {
case UnrecognizedInstrument(name) => s"Unrecognized instrument: $name"
case Execution(errMsg) => s"Sequence execution failed with error $errMsg"
case SeqexecException(ex) => "Application exception: " + ex.getMessage
case InvalidOp(msg) => s"Invalid operation: $msg"
case Unexpected(msg) => s"Unexpected error: $msg"
case Timeout(msg) => s"Timeout while waiting for $msg"
}
}
| arturog8m/ocs | bundle/edu.gemini.seqexec.server/src/main/scala/edu/gemini/seqexec/server/SeqexecFailure.scala | Scala | bsd-3-clause | 1,353 |
package org.jetbrains.plugins.scala
package debugger
import java.io.File
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicReference
import com.intellij.debugger.DebuggerManagerEx
import com.intellij.debugger.engine._
import com.intellij.debugger.engine.evaluation._
import com.intellij.debugger.engine.evaluation.expression.EvaluatorBuilder
import com.intellij.debugger.engine.events.SuspendContextCommandImpl
import com.intellij.debugger.impl._
import com.intellij.diagnostic.ThreadDumper
import com.intellij.execution.Executor
import com.intellij.execution.application.{ApplicationConfiguration, ApplicationConfigurationType}
import com.intellij.execution.configurations.RunnerSettings
import com.intellij.execution.executors.DefaultDebugExecutor
import com.intellij.execution.process.{ProcessAdapter, ProcessEvent, ProcessHandler, ProcessListener}
import com.intellij.execution.runners.{ExecutionEnvironmentBuilder, ProgramRunner}
import com.intellij.execution.ui.RunContentDescriptor
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.{Disposer, Key, Ref}
import com.intellij.psi.PsiDocumentManager
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.testFramework.{EdtTestUtil, ThreadTracker}
import com.intellij.util.concurrency.Semaphore
import com.intellij.xdebugger.XDebuggerManager
import com.intellij.xdebugger.breakpoints.XBreakpointType
import com.sun.jdi.VoidValue
import org.jetbrains.java.debugger.breakpoints.properties.JavaLineBreakpointProperties
import org.jetbrains.plugins.scala.base.ScalaSdkOwner
import org.jetbrains.plugins.scala.debugger.breakpoints.ScalaLineBreakpointType
import org.jetbrains.plugins.scala.debugger.evaluation.ScalaCodeFragmentFactory
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.junit.Assert
import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}
/**
* User: Alefas
* Date: 13.10.11
*/
abstract class ScalaDebuggerTestCase extends ScalaDebuggerTestBase with ScalaSdkOwner {
protected val bp = "<breakpoint>"
private val breakpoints: mutable.Set[(String, Int, Integer)] = mutable.Set.empty
@volatile
private var breakpointTracker: BreakpointTracker = _
private val threadLeakDisposable = new TestDisposable
override def setUp(): Unit = {
super.setUp()
//todo: properly fix thread leak
ThreadTracker.longRunningThreadCreated(threadLeakDisposable, "DebugProcessEvents")
if (needMake) {
compiler.rebuild().assertNoProblems(allowWarnings = true)
saveChecksums()
}
}
override protected def tearDown(): Unit = {
super.tearDown()
Disposer.dispose(threadLeakDisposable)
}
protected def runDebugger(mainClass: String = mainClassName,
debug: Boolean = false,
shouldStopAtBreakpoint: Boolean = true)(callback: => Unit): Unit = {
setupBreakpoints()
val processHandler = runProcess(mainClass, debug)
val debugProcess = getDebugProcess
def failedToStopAtBreakpoint: Boolean = shouldStopAtBreakpoint && !breakpointTracker.wasAtBreakpoint
val result = Try {
callback
}
if (failedToStopAtBreakpoint) {
result.failed.foreach(_.printStackTrace())
Assert.fail("Stop at breakpoint expected")
}
else {
EdtTestUtil.runInEdtAndWait(() => {
clearXBreakpoints()
val requestBeforeStop = debugProcess.getManagerThread.getCurrentRequest
debugProcess.stop(true)
/**
* Non 100% reliable attempt to avoid such exceptions:
* {{{
* java.lang.IllegalStateException: Expected com.intellij.debugger.impl.InvokeThread$WorkerThreadRequest@8a5ebc8 instead of com.intellij.debugger.impl.InvokeThread$WorkerThreadRequest@61cb3c3a closed=false
* at com.intellij.debugger.impl.InvokeThread.lambda$run$0(InvokeThread.java:134)
* }}}
*
* Such exceptions occur because `debugProcess.stop` does async stuff under the hood (see [[DebuggerManagerThreadImpl#terminateAndInvoke]]
* and there is a race condition with `processHandler.destroyProcess()`
* (just try to comment this sleep and see the logger exceptions)
*
* @todo create some reliable synchronized version of [[com.intellij.debugger.engine.DebugProcessImpl#stop(boolean)]] in platform
*/
Thread.sleep(150)
breakpointTracker.removeListener(debugProcess)
breakpointTracker = null
processHandler.destroyProcess()
val timeout = 10.seconds
Assert.assertTrue(s"Debuggee process have not exited for $timeout",
processHandler.waitFor(timeout.toMillis))
ThreadTracker.awaitJDIThreadsTermination(10, TimeUnit.SECONDS)
})
result.failed.foreach(throw _)
}
}
private def runProcess(mainClass: String = mainClassName, debug: Boolean = false): ProcessHandler = {
val runner = ProgramRunner.PROGRAM_RUNNER_EP.getExtensions.find {
_.getClass == classOf[GenericDebuggerRunner]
}.get
val processHandler = runProcess(mainClass, getModule, classOf[DefaultDebugExecutor], new ProcessAdapter {
override def startNotified(event: ProcessEvent): Unit =
tryInitBreakpointTracker()
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]): Unit = {
val text = event.getText
if (debug) print(text)
}
}, runner)
tryInitBreakpointTracker()
processHandler
}
private def runProcess(className: String,
module: Module,
executorClass: Class[_ <: Executor],
listener: ProcessListener,
runner: ProgramRunner[_ <: RunnerSettings]): ProcessHandler = {
val configuration: ApplicationConfiguration = new ApplicationConfiguration("app", module.getProject, ApplicationConfigurationType.getInstance)
configuration.setModule(module)
configuration.setMainClassName(className)
val executor: Executor = Executor.EXECUTOR_EXTENSION_NAME.findExtension(executorClass)
val executionEnvironmentBuilder: ExecutionEnvironmentBuilder = new ExecutionEnvironmentBuilder(module.getProject, executor)
executionEnvironmentBuilder.runProfile(configuration)
val semaphore: Semaphore = new Semaphore
semaphore.down()
val processHandler: AtomicReference[ProcessHandler] = new AtomicReference[ProcessHandler]
val environment = executionEnvironmentBuilder.build
environment.setCallback { (descriptor: RunContentDescriptor) =>
val handler: ProcessHandler = descriptor.getProcessHandler
assert(handler != null)
handler.addProcessListener(listener)
processHandler.set(handler)
semaphore.up()
}
EdtTestUtil.runInEdtAndWait(() =>
runner.execute(environment)
)
semaphore.waitFor()
processHandler.get
}
protected def getDebugProcess: DebugProcessImpl =
DebuggerManagerEx.getInstanceEx(getProject).getContext.getDebugProcess
protected def positionManager: ScalaPositionManager = {
val process = getDebugProcess
ScalaPositionManager.instance(process).getOrElse {
new ScalaPositionManager(process)
}
}
protected def resume(): Unit = {
val resumeCommand = getDebugProcess.createResumeCommand(currentSuspendContext())
getDebugProcess.getManagerThread.invokeAndWait(resumeCommand)
}
protected def addBreakpoint(line: Int, fileName: String = mainFileName, lambdaOrdinal: Integer = -1): Unit = {
breakpoints += ((fileName, line, lambdaOrdinal))
}
protected def clearBreakpoints(): Unit = {
breakpoints.clear()
clearXBreakpoints()
}
private def setupBreakpoints(): Unit = {
invokeAndWaitInTransaction {
breakpoints.foreach {
case (fileName, line, ordinal) =>
val ioFile = new File(srcDir, fileName)
val file = getVirtualFile(ioFile)
val xBreakpointManager = XDebuggerManager.getInstance(getProject).getBreakpointManager
val properties = new JavaLineBreakpointProperties
properties.setLambdaOrdinal(ordinal)
inWriteAction {
xBreakpointManager.addLineBreakpoint(scalaLineBreakpointType, file.getUrl, line, properties)
}
}
}
}
protected def setupLibraryBreakpoint(classQName: String, methodName: String, relativeLineNumber: Int = 1): Unit = {
invokeAndWaitInTransaction {
implicit val project: Project = getProject
val psiClass = ScalaPsiManager.instance.getCachedClass(GlobalSearchScope.allScope(getProject), classQName)
val method = psiClass.map(_.getNavigationElement.asInstanceOf[ScTypeDefinition]).flatMap(_.functions.find(_.name == methodName))
Assert.assertTrue(s"Method $methodName of $classQName not found", method.isDefined)
val file = method.get.getContainingFile
val document = PsiDocumentManager.getInstance(getProject).getDocument(file)
val vFile = file.getVirtualFile
val methodDefLine = method.get.nameId.getTextRange.getStartOffset // method element can contain doc comment
val methodLine = document.getLineNumber(methodDefLine)
val lineNumber = methodLine + relativeLineNumber
val lineText = document.getImmutableCharSequence.subSequence(document.getLineStartOffset(lineNumber), document.getLineEndOffset(lineNumber))
val xBreakpointManager = XDebuggerManager.getInstance(getProject).getBreakpointManager
val properties = new JavaLineBreakpointProperties
inWriteAction {
xBreakpointManager.addLineBreakpoint(scalaLineBreakpointType, vFile.getUrl, methodLine + relativeLineNumber, properties)
//println(s"Breakpoint set on line $lineNumber: `$lineText` in `$classQName``")
}
}
}
private def clearXBreakpoints(): Unit = {
EdtTestUtil.runInEdtAndWait(() => {
val xBreakpointManager = XDebuggerManager.getInstance(getProject).getBreakpointManager
inWriteAction {
xBreakpointManager.getAllBreakpoints.foreach(xBreakpointManager.removeBreakpoint)
}
})
}
protected def scalaLineBreakpointType = XBreakpointType.EXTENSION_POINT_NAME.findExtension(classOf[ScalaLineBreakpointType])
protected def waitForBreakpoint(): SuspendContextImpl = {
val suspendContext = waitForBreakpointInner()
val debugProcessState = DebugProcessState(getDebugProcess)
val message =
if (!debugProcessState.isAttached) debugProcessState.description
else "too long waiting for breakpoint"
Assert.assertTrue(message, suspendContext != null)
Assert.assertTrue("resumed context is not expected on breakpoint", !suspendContext.isResumed)
suspendContext
}
protected def processTerminatedNoBreakpoints(): Boolean = {
waitForBreakpointInner()
!DebugProcessState(getDebugProcess).isAttached
}
private def waitForBreakpointInner(): SuspendContextImpl = {
assertNotManagerThread()
breakpointTracker.waitBreakpoint(30000)
currentSuspendContext()
}
private def assertNotManagerThread(): Unit = {
Assert.assertTrue("Waiting on manager thread will cause deadlock",
!DebuggerManagerThreadImpl.isManagerThread)
}
protected def currentSuspendContext() = {
val manager = Option(getDebugProcess).map(_.getSuspendManager)
val context = manager.flatMap(_.getPausedContext.toOption)
context.orNull
}
protected def currentLocation() = managed {
val suspendContext = currentSuspendContext()
suspendContext.getFrameProxy.getStackFrame.location
}
protected def evaluationContext() = managed {
val suspendContext = currentSuspendContext()
new EvaluationContextImpl(suspendContext, suspendContext.getFrameProxy, suspendContext.getFrameProxy.thisObject())
}
protected def currentSourcePosition = managed {
ContextUtil.getSourcePosition(currentSuspendContext())
}
/**
* @param renderSelfAsString false to skip rendering resulting value as string. It can be helpful if the value
* is ignored and `toString` takes long time for some object
* (e.g. see [[renderers.ScalaCollectionRendererTestBase.testQueueWithLongToStringChildren]]
*/
protected def evalResult(codeText: String, renderSelfAsString: Boolean = true): String = {
val ctx = evaluationContext()
val factory = new ScalaCodeFragmentFactory()
val factoryWrapper = new CodeFragmentFactoryContextWrapper(factory)
val evaluatorBuilder: EvaluatorBuilder = factory.getEvaluatorBuilder
val kind =
if (codeText.contains("\n")) CodeFragmentKind.CODE_BLOCK
else CodeFragmentKind.EXPRESSION
val contextElement = managed {
ContextUtil.getContextElement(ctx)
}
val textWithImports = new TextWithImportsImpl(kind, codeText)
val fragment = inReadAction {
val fragment = factoryWrapper.createCodeFragment(textWithImports, contextElement, getProject)
fragment.forceResolveScope(GlobalSearchScope.allScope(getProject))
DebuggerUtils.checkSyntax(fragment)
fragment
}
inSuspendContextAction(60.seconds, "Too long evaluate expression: " + codeText) {
val value = Try {
val evaluator = inReadAction {
evaluatorBuilder.build(fragment, currentSourcePosition)
}
evaluator.evaluate(ctx)
}
value match {
case Success(_: VoidValue) => "undefined"
case Success(v) =>
if (renderSelfAsString)
DebuggerUtils.getValueAsString(ctx, v)
else
"<skipped rendering self as string>"
case Failure(e: EvaluateException) => e.getMessage + stacktraces(e)
case Failure(e: Throwable) => "Other error: " + e.getMessage + stacktraces(e)
}
}
}
private def stacktraces(e: Throwable): String = e match {
case e: EvaluateException =>
val cause = Option(e.getCause).map(cause => s"Caused by: ${stacktraces(cause)})").getOrElse("")
s"""
|${stacktrace(e)}
|
|$cause""".stripMargin
case e =>
s"""
|${stacktrace(e)}""".stripMargin
}
private def stacktrace(e: Throwable): String = e.getStackTrace.mkString("\n")
private def waitScheduledAction[T](timeout: Duration, timeoutMsg: String, callback: => T)
(schedule: (=> Unit) => Unit): T = {
val result = Ref.create[T]()
val semaphore = new Semaphore()
semaphore.down()
schedule {
result.set(callback)
semaphore.up()
}
val finished = semaphore.waitFor(timeout.toMillis)
if (!finished) {
semaphore.up()
val messageWithDump =
s"""$timeoutMsg
|
|${ThreadDumper.dumpThreadsToString()}
""".stripMargin
Assert.fail(messageWithDump)
}
result.get
}
protected def inSuspendContextAction[T](timeout: Duration, timeoutMsg: String)(callback: => T): T = {
val context = currentSuspendContext()
val process = getDebugProcess
assertNotManagerThread()
waitScheduledAction(timeout, timeoutMsg, callback) { body =>
process.getManagerThread.schedule(new SuspendContextCommandImpl(context) {
override def contextAction(suspendContext: SuspendContextImpl): Unit = body
})
}
}
protected def managed[T >: Null](callback: => T): T = {
if (DebuggerManagerThreadImpl.isManagerThread) callback
else {
waitScheduledAction(30.seconds, "Too long debugger action", callback) { body =>
getDebugProcess.getManagerThread.invoke(() => body)
}
}
}
protected def evalEquals(codeText: String, expected: String): Unit = {
Assert.assertEquals(s"Evaluating:\n $codeText", expected, evalResult(codeText))
}
protected def evalStartsWith(codeText: String, startsWith: String): Unit = {
val result = evalResult(codeText)
Assert.assertTrue(s"Evaluating:\n $codeText,\n $result doesn't starts with $startsWith",
result.startsWith(startsWith))
}
protected def evaluateCodeFragments(mainClass: String, fragmentsWithResults: (String, String)*): Unit = {
runDebugger(mainClass) {
waitForBreakpoint()
fragmentsWithResults.foreach {
case (fragment, result) => evalEquals(fragment.stripMargin.trim().replace("\r", ""), result)
}
}
}
protected def evaluateCodeFragments(fragmentsWithResults: (String, String)*): Unit =
evaluateCodeFragments(mainClassName, fragmentsWithResults: _*)
def atNextBreakpoint(action: => Unit): Unit = {
resume()
waitForBreakpoint()
action
}
protected def addOtherLibraries(): Unit = {}
def checkLocation(source: String, methodName: String, lineNumber: Int)(implicit suspendContext: SuspendContextImpl): Unit = {
def format(s: String, mn: String, ln: Int) = s"$s:$mn:$ln"
val location = currentLocation()
val expected = format(source, methodName, lineNumber)
val actualLine = inReadAction {
positionManager.getSourcePosition(location).getLine
}
val actual = format(location.sourceName, location.method().name(), actualLine + 1)
Assert.assertEquals("Wrong location:", expected, actual)
}
protected def addFileWithBreakpoints(path: String, fileText: String): Unit = {
val breakpointLines =
for {
(line, idx) <- fileText.linesIterator.zipWithIndex
if line.contains(bp)
} yield idx
val cleanedText = fileText.replace(bp, "")
addSourceFile(path, cleanedText)
breakpointLines.foreach(addBreakpoint(_, path))
}
private def tryInitBreakpointTracker(): Unit = synchronized {
if (breakpointTracker == null) {
breakpointTracker = new BreakpointTracker
breakpointTracker.addListener(getDebugProcess)
}
}
//should be initialized before debug process is started
private class BreakpointTracker() {
private val breakpointSemaphore = new Semaphore()
breakpointSemaphore.down()
//safety net against not running tests at all
private var _wasAtBreakpoint: Boolean = false
private val breakpointListener = new DebugProcessAdapterImpl {
override def resumed(suspendContext: SuspendContextImpl): Unit = {
breakpointSemaphore.down()
}
override def paused(suspendContext: SuspendContextImpl): Unit = {
_wasAtBreakpoint = true
breakpointSemaphore.up()
}
override def processDetached(process: DebugProcessImpl, closedByUser: Boolean): Unit = {
breakpointSemaphore.up()
}
}
def waitBreakpoint(msTimeout: Long): Boolean = breakpointSemaphore.waitFor(msTimeout)
def wasAtBreakpoint: Boolean = _wasAtBreakpoint
def addListener(process: DebugProcessImpl): Unit = {
process.addDebugProcessListener(breakpointListener)
}
def removeListener(process: DebugProcessImpl): Unit = {
process.removeDebugProcessListener(breakpointListener)
breakpointSemaphore.up()
}
}
}
case class Loc(className: String, methodName: String, line: Int) | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/debugger/ScalaDebuggerTestCase.scala | Scala | apache-2.0 | 19,292 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml
import org.apache.spark.SparkException
import org.apache.spark.ml.functions.vector_to_array
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.util.MLTest
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
import org.apache.spark.sql.functions.col
class FunctionsSuite extends MLTest {
import testImplicits._
test("test vector_to_array") {
val df = Seq(
(Vectors.dense(1.0, 2.0, 3.0), OldVectors.dense(10.0, 20.0, 30.0)),
(Vectors.sparse(3, Seq((0, 2.0), (2, 3.0))), OldVectors.sparse(3, Seq((0, 20.0), (2, 30.0))))
).toDF("vec", "oldVec")
val result = df.select(vector_to_array('vec), vector_to_array('oldVec))
.as[(Seq[Double], Seq[Double])]
.collect().toSeq
val expected = Seq(
(Seq(1.0, 2.0, 3.0), Seq(10.0, 20.0, 30.0)),
(Seq(2.0, 0.0, 3.0), Seq(20.0, 0.0, 30.0))
)
assert(result === expected)
val df2 = Seq(
(Vectors.dense(1.0, 2.0, 3.0),
OldVectors.dense(10.0, 20.0, 30.0), 1),
(null, null, 0)
).toDF("vec", "oldVec", "label")
for ((colName, valType) <- Seq(
("vec", "null"), ("oldVec", "null"), ("label", "java.lang.Integer"))) {
val thrown1 = intercept[SparkException] {
df2.select(vector_to_array(col(colName))).count
}
assert(thrown1.getCause.getMessage.contains(
"function vector_to_array requires a non-null input argument and input type must be " +
"`org.apache.spark.ml.linalg.Vector` or `org.apache.spark.mllib.linalg.Vector`, " +
s"but got ${valType}"))
}
}
}
| ptkool/spark | mllib/src/test/scala/org/apache/spark/ml/FunctionsSuite.scala | Scala | apache-2.0 | 2,405 |
package com.munchii.sbt.resolver
import com.amazonaws.services.s3.{AmazonS3, AmazonS3URI}
import com.amazonaws.services.s3.model.{ListObjectsRequest, ObjectMetadata}
import java.io.ByteArrayInputStream
import scala.annotation.tailrec
import scala.collection.JavaConverters._
case class S3DirectoryListing(uri: AmazonS3URI, s3Client: AmazonS3, directoryListingDocument: String) {
def publish() {
visit(Seq(""))
}
@tailrec
private def visit(prefixes: Seq[String]): Unit = {
if (prefixes.nonEmpty) {
val initialRequest = new ListObjectsRequest()
.withBucketName(uri.getBucket())
.withPrefix(prefixes.head)
.withDelimiter("/")
val keys = list(initialRequest, Seq())
publish(prefixes.head, keys)
visit(prefixes.drop(1) ++ keys.filter(_.endsWith("/")))
}
}
@tailrec
private def list(request: ListObjectsRequest, keys: Seq[String]): Seq[String] = {
val listing = s3Client.listObjects(request);
val result = keys ++
listing.getCommonPrefixes().asScala ++ // Add "directories"
listing.getObjectSummaries().asScala.map(_.getKey) // Add "files"
Option(listing.getNextMarker) match {
case Some(nextMarker) => list(request.withMarker(nextMarker), result)
case None => result
}
}
private def publish(prefix: String, keys: Seq[String]) = {
println(s"Publishing index of /$prefix")
val directoryTemplate = new StringBuilder()
directoryTemplate ++= s"""<!DOCTYPE html><html lang="en">"""
directoryTemplate ++= s"""<head><meta charset="utf-8"><title>Index of /$prefix</title></head>"""
directoryTemplate ++= s"""<body><h1>Index of /$prefix</h1><ul>"""
if (!prefix.isEmpty()) {
directoryTemplate ++= s"""<li><a href="..">Parent Directory</a></li>"""
}
// TODO: Sort for aesthetics?
for (key <- keys
if !key.endsWith(directoryListingDocument)) {
directoryTemplate ++= s"""<li><a href="${key.stripPrefix(prefix)}">${key.stripPrefix(prefix)}</a></li>"""
}
directoryTemplate ++= s"""</ul><hr />Published using <a href="https://github.com/munchii/sbt-s3-resolver/">munchii / sbt-s3-resolver</a>"""
directoryTemplate ++= s"""</body></html>"""
val bytes = directoryTemplate.mkString.getBytes
val metadata = new ObjectMetadata();
metadata.setContentType("text/html")
metadata.setContentLength(bytes.length)
s3Client.putObject(uri.getBucket(), prefix + directoryListingDocument, new ByteArrayInputStream(bytes), metadata)
}
} | munchii/sbt-s3-resolver | src/main/scala/com/munchii/sbt/resolver/S3DirectoryListing.scala | Scala | apache-2.0 | 2,523 |
package io.catbird
package util
import cats.Eq
import cats.instances.int._
import cats.instances.tuple._
import cats.kernel.laws.discipline.{ MonoidTests, SemigroupTests }
import cats.laws.discipline._
import com.twitter.concurrent.AsyncStream
import com.twitter.conversions.DurationOps._
class AsyncStreamSuite extends CatbirdSuite with AsyncStreamInstances with ArbitraryInstances {
implicit val eqAsyncStreamInt: Eq[AsyncStream[Int]] = asyncStreamEq(1.second)
implicit val eqAsyncStreamAsyncStreamInt: Eq[AsyncStream[AsyncStream[Int]]] = asyncStreamEq(1.second)
implicit val eqAsyncStreamIntIntInt: Eq[AsyncStream[(Int, Int, Int)]] = asyncStreamEq[(Int, Int, Int)](1.second)
checkAll("AsyncStream[Int]", AlternativeTests[AsyncStream].alternative[Int, Int, Int])
checkAll("AsyncStream[Int]", MonadTests[AsyncStream].monad[Int, Int, Int])
checkAll("AsyncStream[Int]", SemigroupTests[AsyncStream[Int]](asyncStreamSemigroup[Int]).semigroup)
checkAll("AsyncStream[Int]", MonoidTests[AsyncStream[Int]].monoid)
}
| travisbrown/catbird | util/src/test/scala/io/catbird/util/asyncstream.scala | Scala | apache-2.0 | 1,026 |
package nodescala
import scala.language.postfixOps
import scala.util.{Try, Success, Failure}
import scala.collection._
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.async.Async.{async, await}
import org.scalatest._
import NodeScala._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class NodeScalaSuite extends FunSuite {
test("A Future should always be completed") {
val always = Future.always(517)
assert(Await.result(always, 0 nanos) == 517)
}
test("A Future should never be completed") {
val never = Future.never[Int]
try {
Await.result(never, 1 second)
assert(false)
} catch {
case t: TimeoutException => // ok!
}
}
class DummyExchange(val request: Request) extends Exchange {
@volatile var response = ""
val loaded = Promise[String]()
def write(s: String) {
response += s
}
def close() {
loaded.success(response)
}
}
class DummyListener(val port: Int, val relativePath: String) extends NodeScala.Listener {
self =>
@volatile private var started = false
var handler: Exchange => Unit = null
def createContext(h: Exchange => Unit) = this.synchronized {
assert(started, "is server started?")
handler = h
}
def removeContext() = this.synchronized {
assert(started, "is server started?")
handler = null
}
def start() = self.synchronized {
started = true
new Subscription {
def unsubscribe() = self.synchronized {
started = false
}
}
}
def emit(req: Request) = {
val exchange = new DummyExchange(req)
if (handler != null) handler(exchange)
exchange
}
}
class DummyServer(val port: Int) extends NodeScala {
self =>
val listeners = mutable.Map[String, DummyListener]()
def createListener(relativePath: String) = {
val l = new DummyListener(port, relativePath)
listeners(relativePath) = l
l
}
def emit(relativePath: String, req: Request) = this.synchronized {
val l = listeners(relativePath)
l.emit(req)
}
}
test("Server should serve requests") {
val dummy = new DummyServer(8191)
val dummySubscription = dummy.start("/testDir") {
request => for (kv <- request.iterator) yield (kv + "\\n").toString
}
// wait until server is really installed
Thread.sleep(500)
def test(req: Request) {
val webpage = dummy.emit("/testDir", req)
val content = Await.result(webpage.loaded.future, 1 second)
val expected = (for (kv <- req.iterator) yield (kv + "\\n").toString).mkString
assert(content == expected, s"'$content' vs. '$expected'")
}
test(immutable.Map("StrangeRequest" -> List("Does it work?")))
test(immutable.Map("StrangeRequest" -> List("It works!")))
test(immutable.Map("WorksForThree" -> List("Always works. Trust me.")))
dummySubscription.unsubscribe()
}
}
| wezil/principles-reactive-programming | nodescala/tests.scala | Scala | mit | 3,077 |
package ohnosequences.tabula.impl
import ohnosequences.cosas._, records._, fns._, types._
import ohnosequences.cosas.ops.typeSets._
import ohnosequences.tabula._
import ImplicitConversions._
import com.amazonaws.services.dynamodbv2.model._
// TODO check region of clients
case class DynamoDBExecutors(dynamoClient: AnyDynamoDBClient) {
/* CREATE TABLE */
implicit def createTableExecutor[A <: action.AnyCreateTable](a: A):
CreateTableExecutor[A] =
CreateTableExecutor[A](dynamoClient)
/* DELETE TABLE */
implicit def deleteTableExecutor[A <: action.AnyDeleteTable](a: A):
DeleteTableExecutor[A] =
DeleteTableExecutor[A](dynamoClient)
/* DESCRIBE TABLE */
implicit def describeTableExecutor[A <: action.AnyDescribeTable](a: A):
DescribeTableExecutor[A] =
DescribeTableExecutor[A](dynamoClient)
/* UPDATE TABLE */
implicit def updateTableExecutor[A <: action.AnyUpdateTable](a: A):
UpdateTableExecutor[A] =
UpdateTableExecutor[A](dynamoClient)
/* PUT ITEM */
implicit def putItemExecutor[A <: action.AnyPutItem](a: A)(implicit
serializer: A#Item#Raw SerializeTo SDKRep
): PutItemExecutor[A] =
PutItemExecutor[A](dynamoClient, serializer)
// /* GET ITEM */
// // implicit def getItemHashKeyExecutor[A <: AnyGetItemHashKeyAction with SDKRepParser](a: A):
// // GetItemHashKeyExecutor[A] =
// // GetItemHashKeyExecutor[A](a)(dynamoClient)
// // implicit def getItemCompositeKeyExecutor[A <: AnyGetItemCompositeKeyAction with SDKRepParser](a: A):
// // GetItemCompositeKeyExecutor[A] =
// // GetItemCompositeKeyExecutor[A](a)(dynamoClient)
/* QUERY */
// implicit def queryExecutor[A0 <: AnyQueryAction, A <: AnyQueryAction.Q[A0]](a: A)(implicit
// parser: (PropertiesOf[A#Item] ParseFrom SDKRep) with out[RawOf[A#Item]]
// ): QueryExecutor[A0, A] =
// QueryExecutor[A0, A](a)(dynamoClient, parser)
// implicit def queryExecutor[A <: AnyQuery](a: A)(implicit
// parser: (A#Item#Properties ParseFrom SDKRep) with out[A#Item#Raw]
// ): QueryExecutor[A] =
// QueryExecutor[A](dynamoClient, parser)
// /* DELETE ITEM */
// // implicit def deleteItemHashKeyExecutor[A <: AnyDeleteItemHashKeyAction](a: A):
// // DeleteItemHashKeyExecutor[A] =
// // DeleteItemHashKeyExecutor[A](a)(dynamoClient)
// // implicit def deleteItemCompositeKeyExecutor[A <: AnyDeleteItemCompositeKeyAction](a: A):
// // DeleteItemCompositeKeyExecutor[A] =
// // DeleteItemCompositeKeyExecutor[A](a)(dynamoClient)
}
| ohnosequences/tabula | src/main/scala/tabula/impl/DynamoDBExecutors.scala | Scala | agpl-3.0 | 2,554 |
package net.zzorn.controls
import org.scalaprops.Bean
import net.zzorn.Settings
/**
*
*/
class SpeedSettings extends Settings {
val strafeSpeed = p('strafeSpeed, 10f).editor(makeSlider(0, 100))
val forwardSpeed = p('forwardSpeed, 30f).editor(makeSlider(0, 100))
val backSpeed = p('backSpeed, 5f).editor(makeSlider(0, 100))
val upSpeed = p('upSpeed, 0f).editor(makeSlider(0, 100))
val downSpeed = p('downSpeed, 0f).editor(makeSlider(0, 100))
} | zzorn/ludumdare20 | src/main/scala/net/zzorn/controls/SpeedSettings.scala | Scala | gpl-2.0 | 460 |
//package demesne.module
//
//import akka.actor.{ActorSystem, Props}
//import com.typesafe.config.Config
//import shapeless.Lens
//import demesne._
//import demesne.testkit.AggregateRootSpec
//import org.scalatest.Tag
//import omnibus.archetype.domain.model.core.{Entity, EntityLensProvider}
//import org.scalatest.concurrent.ScalaFutures
//import demesne.index.IndexSpecification
//import demesne.module.SimpleAggregateModuleSpec.Foo
//import omnibus.identifier.Identifying
//
//object SimpleAggregateModuleSpec {
//
// trait Foo extends Entity[Foo] {
// def isActive: Boolean
// def f: Int
// def b: Double
// def z: String
// }
//
// object Foo extends EntityLensProvider[Foo] {
// override implicit val identifying: Identifying[Foo] = new Identifying.ByShortUuid[Foo]
//
// override val idLens: Lens[Foo, Foo#TID] = new Lens[Foo, Foo#TID] {
// override def get( f: Foo ): Foo#TID = f.id
// override def set( f: Foo )( id: Foo#TID ): Foo = {
// FooImpl(
// id = id,
// name = f.name,
// slug = f.slug,
// isActive = f.isActive,
// f = f.f,
// b = f.b,
// z = f.z
// )
// }
// }
//
// override val nameLens: Lens[Foo, String] = new Lens[Foo, String] {
// override def get( f: Foo ): String = f.name
// override def set( f: Foo )( n: String ): Foo = {
// FooImpl(
// id = f.id,
// name = n,
// slug = f.slug,
// isActive = f.isActive,
// f = f.f,
// b = f.b,
// z = f.z
// )
// }
// }
//
// val slugLens: Lens[Foo, String] = new Lens[Foo, String] {
// override def get( f: Foo ): String = f.slug
// override def set( f: Foo )( s: String ): Foo = {
// FooImpl(
// id = f.id,
// name = f.name,
// slug = s,
// isActive = f.isActive,
// f = f.f,
// b = f.b,
// z = f.z
// )
// }
// }
//
// val isActiveLens: Lens[Foo, Boolean] = new Lens[Foo, Boolean] {
// override def get( f: Foo ): Boolean = f.isActive
// override def set( f: Foo )( a: Boolean ): Foo = {
// FooImpl( id = f.id, name = f.name, slug = f.slug, isActive = a, f = f.f, b = f.b, z = f.z )
// }
// }
//
// final case class FooImpl(
// override val id: Foo#TID,
// override val name: String,
// override val slug: String,
// override val isActive: Boolean = true,
// override val f: Int = 0,
// override val b: Double = 0.0,
// override val z: String = ""
// ) extends Foo
//
// object Protocol extends AggregateProtocol[Foo] {
// case class Bar( targetId: Bar#TID, b: Int ) extends Command
// case class Barred( sourceId: Barred#TID, b: Int ) extends Event
// }
//
// object FooAggregateRoot {
//
// val module: SimpleAggregateModule[Foo] = {
// val b = SimpleAggregateModule.builderFor[Foo].make
// import b.P.{ Props => BProps, _ }
//
// b.builder
// // .set( BTag, 'fooTAG )
// .set( BProps, FooActor.props( _, _ ) )
// .build()
// }
//
// // object FooActor {
// // def props( model: DomainModel, meta: AggregateRootType ): Props = ???
// // }
// }
// }
//}
//
//abstract class SimpleAggregateModuleSpec extends AggregateRootSpec[Foo] with ScalaFutures {
// import SimpleAggregateModuleSpec.Foo
//
//// override type State = Foo
//// override type ID = ShortUUID
// override type Protocol = SimpleAggregateModuleSpec.Protocol.type
// override val protocol: Protocol = SimpleAggregateModuleSpec.Protocol
//
// override def createAkkaFixture( test: OneArgTest, system: ActorSystem, slug: String ): Fixture = {
// new TestFixture( slug, system )
// }
//
// override type Fixture = TestFixture
//
// class TestFixture( _slug: String, _system: ActorSystem )
// extends AggregateFixture( _slug, _system ) {
//
// override val module: AggregateRootModule[Foo] = {
// SimpleAggregateModuleSpec.FooAggregateRoot.module
// }
//
// override def rootTypes: Set[AggregateRootType] = Set( module.rootType )
//
// override def nextId(): TID = SimpleAggregateModuleSpec.Foo.identifying.next
// }
//
// object ADD extends Tag( "add" )
// object UPDATE extends Tag( "update" )
// object GOOD extends Tag( "good" )
//
// "Module should" should {
// import SimpleAggregateModuleSpec._
//
// "simple" in { fixture: Fixture =>
// import fixture._
// val expected = SimpleAggregateModule.SimpleAggregateModuleImpl[Foo, Foo#ID](
//// aggregateIdTag = 'fooTAG,
// aggregateRootPropsOp = FooAggregateRoot.FooActor.props( _, _ ),
// passivateTimeout = AggregateRootType.DefaultPassivation,
// snapshotPeriod = Some( AggregateRootType.DefaultSnapshotPeriod ),
// startTask = StartTask.empty( "simple" ),
// environment = AggregateEnvironment.Resolver.local,
// clusterRole = None,
// indexes = Seq.empty[IndexSpecification]
// )
//
// val b = SimpleAggregateModule.builderFor[Foo, Foo#ID].make
// import b.P.{ Props => BProps, _ }
// val actual = b.builder
//// .set( BTag, 'fooTAG )
// .set( BProps, FooAggregateRoot.FooActor.props( _, _ ) )
// .build
//
//// actual.aggregateIdTag must equal( 'fooTAG )
// }
//
// "build module" in { fixture: Fixture =>
// import fixture._
//
// val expected = SimpleAggregateModule.SimpleAggregateModuleImpl[Foo, Foo#ID](
//// aggregateIdTag = 'fooTAG,
// aggregateRootPropsOp = FooAggregateRoot.FooActor.props( _, _ ),
// passivateTimeout = AggregateRootType.DefaultPassivation,
// snapshotPeriod = Some( AggregateRootType.DefaultSnapshotPeriod ),
// startTask = StartTask.empty( "expected" ),
// environment = AggregateEnvironment.Resolver.local,
// clusterRole = None,
// indexes = Seq.empty[IndexSpecification]
// )
//
// FooAggregateRoot.module must equal( expected )
// }
//
////todo resolve test status -- incorporate into EntityAggregateModuleSpec?
//
// // "not respond before added" in { fixture: Fixture =>
// // import fixture._
//
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val id = Module.nextId
// // val t = Module aggregateOf id
// // t !+ Module.Rename( id, "foobar" )
// // bus.expectNoMsg( 200.millis.dilated )
// // }
//
// // "add foo" taggedAs(ADD) in { fixture: Fixture =>
// // import fixture._
//
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val id = Module.nextId
// // val foo = FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster")
// // val f = Module aggregateOf id
// // f !+ Module.Add( foo )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) { //DMR: Is this sensitive to total num of tests executed?
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
// // }
//
// // "update name" taggedAs(UPDATE) in { fixture: Fixture =>
// // import fixture._
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val id = Module.nextId
// // val f = Module aggregateOf id
// // f !+ Module.Add( FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster") )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) {
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
//
// // f !+ Module.Rename( id, "good-foo" )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo renamed" ) {
// // case Envelope( payload: Module.Renamed, _ ) => {
// // payload.sourceId mustBe id
// // payload.oldName mustBe "foo1"
// // payload.newName mustBe "good-foo"
// // }
// // }
// // }
//
// // "update slug" in { fixture: Fixture =>
// // import fixture._
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val id = Module.nextId
// // val f = Module aggregateOf id
// // f !+ Module.Add( FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster") )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) {
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
//
// // val newSlug = "gt"
// // f !+ Module.Reslug( id, newSlug )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo slug changed" ) {
// // case Envelope( payload: Module.Reslugged, _ ) => {
// // payload.sourceId mustBe id
// // payload.oldSlug mustBe "f1"
// // payload.newSlug mustBe "gt"
// // }
// // }
// // }
//
// // "disable aggregate" in { fixture: Fixture =>
// // import fixture._
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val id = Module.nextId
// // val f = Module aggregateOf id
// // f !+ Module.Add( FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster") )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) {
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
//
// // f !+ Module.Disable( id )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo disabled" ) {
// // case Envelope( payload: Module.Disabled, _ ) => {
// // payload.sourceId mustBe id
// // payload.slug mustBe "f1"
// // }
// // }
// // }
//
// // "enable from disable aggregate" in { fixture: Fixture =>
// // import fixture._
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val id = Module.nextId
// // val f = Module aggregateOf id
// // f !+ Module.Add( FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster") )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) {
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
//
// // f !+ Module.Disable( id )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo disabled" ) {
// // case Envelope( payload: Module.Disabled, _ ) => {
// // payload.sourceId mustBe id
// // payload.slug mustBe "f1"
// // }
// // }
//
// // f !+ Module.Enable( id )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo enabled" ) {
// // case Envelope( payload: Module.Enabled, _ ) => {
// // payload.sourceId mustBe id
// // payload.slug mustBe "f1"
// // }
// // }
// // }
//
// // "recorded in slug index" in { fixture: Fixture =>
// // import fixture._
//
// // val id = Module.nextId
// // val f1 = FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster")
//
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val f = Module aggregateOf id
// // f !+ Module.Add( f1 )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) { //DMR: Is this sensitive to total num of tests executed?
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
//
// // val countDown = new CountDownFunction[String]
// // countDown await 200.millis.dilated
//
// // whenReady( slugIndex.futureGet( "f1" ) ) { result => result mustBe Some(id) }
// // trace( s"""index:f1 = ${slugIndex.get("f1")}""" )
// // slugIndex.get( "f1" ) mustBe Some(id)
// // }
//
// // "enablement actions translate in slug index" taggedAs(WIP) in { fixture: Fixture =>
// // import fixture._
//
// // val id = Module.nextId
// // val f1 = FooImpl(id, "foo1", "f1", true, 17, 3.14159, "zedster")
//
// // system.eventStream.subscribe( bus.ref, classOf[Envelope] )
//
// // val f = Module aggregateOf id
// // f !+ Module.Add( f1 )
// // bus.expectMsgPF( max = 800.millis.dilated, hint = "foo added" ) { //DMR: Is this sensitive to total num of tests executed?
// // case Envelope( payload: Module.Added, _ ) => payload.info.name mustBe "foo1"
// // }
//
// // new CountDownFunction[String] await 200.millis.dilated
// // whenReady( slugIndex.futureGet( "f1" ) ) { result => result mustBe Some(id) }
// // trace( s"""index:f1 = ${slugIndex.get("f1")}""" )
// // slugIndex.get( "f1" ) mustBe Some(id)
//
// // f !+ Module.Disable( id )
// // new CountDownFunction[String] await 200.millis.dilated
// // whenReady( slugIndex.futureGet( "f1" ) ) { result => result mustBe None }
// // trace( s"""index:f1 = ${slugIndex.get("f1")}""" )
// // slugIndex.get( "f1" ) mustBe None
//
// // f !+ Module.Enable( id )
// // new CountDownFunction[String] await 200.millis.dilated
// // whenReady( slugIndex.futureGet( "f1" ) ) { result => result mustBe Some(id) }
// // trace( s"""index:f1 = ${slugIndex.get("f1")}""" )
// // slugIndex.get( "f1" ) mustBe Some(id)
//
// // f !+ Module.Enable( id )
// // new CountDownFunction[String] await 200.millis.dilated
// // whenReady( slugIndex.futureGet( "f1" ) ) { result => result mustBe Some(id) }
// // trace( s"""index:f1 = ${slugIndex.get("f1")}""" )
// // slugIndex.get( "f1" ) mustBe Some(id)
// // }
//
// }
//}
| dmrolfs/demesne | core/src/test/scala/demesne/module/SimpleAggregateModuleSpec.scala | Scala | apache-2.0 | 13,795 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import com.twitter.algebird.monad.Reader
import com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.MacroEqualityOrderedSerialization
import com.twitter.scalding.serialization.OrderedSerialization
import java.nio.file.{FileSystems, Files, Path}
import java.util
import org.scalatest.{Matchers, WordSpec}
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future, Promise, ExecutionContext => ConcurrentExecutionContext}
import scala.util.Random
import scala.util.{Failure, Success, Try}
import ExecutionContext._
import com.twitter.scalding.Execution.TempFileCleanup
import org.apache.hadoop.conf.Configuration
object ExecutionTestJobs {
def wordCount(in: String, out: String) =
TypedPipe.from(TextLine(in))
.flatMap(_.split("\\s+"))
.map((_, 1L))
.sumByKey
.writeExecution(TypedTsv(out))
def wordCount2(in: TypedPipe[String]) =
in
.flatMap(_.split("\\s+"))
.map((_, 1L))
.sumByKey
.toIterableExecution
def zipped(in1: TypedPipe[Int], in2: TypedPipe[Int]) =
in1.groupAll.sum.values.toIterableExecution
.zip(in2.groupAll.sum.values.toIterableExecution)
def mergeFanout(in: List[Int]): Execution[Iterable[(Int, Int)]] = {
// Force a reduce, so no fancy optimizations kick in
val source = TypedPipe.from(in).groupBy(_ % 3).head
(source.mapValues(_ * 2) ++ (source.mapValues(_ * 3))).toIterableExecution
}
def writeExecutionWithTempFile(tempFile: String, testData: List[String]): Execution[List[String]] = {
val writeFn = { (conf: Config, mode: Mode) =>
(TypedPipe.from(testData), TypedTsv[String](tempFile))
}
val readFn = { (conf: Config, mode: Mode) =>
testData
}
val filesToDeleteFn = { (conf: Config, mode: Mode) =>
Set(tempFile)
}
Execution.write(writeFn, readFn, filesToDeleteFn)
}
}
abstract class TestExecutionJob[+T](args: Args) extends ExecutionJob[T](args) {
// In tests, classloader issues with sbt mean we should not
// really use threads, so we run immediately
override def concurrentExecutionContext = new scala.concurrent.ExecutionContext {
def execute(r: Runnable) = r.run
def reportFailure(t: Throwable) = ()
}
}
class WordCountEc(args: Args) extends TestExecutionJob[Unit](args) {
def execution = ExecutionTestJobs.wordCount(args("input"), args("output"))
}
class ExecutionWithTempFiles(args: Args, tempFile: String, testData: List[String]) extends TestExecutionJob[List[String]](args) {
override def execution = ExecutionTestJobs.writeExecutionWithTempFile(tempFile, testData)
}
class ZippedExecutionWithTempFiles(args: Args, tempFileOne: String, tempFileTwo: String, testDataOne: List[String], testDataTwo: List[String]) extends TestExecutionJob[(List[String], List[String])](args) {
override def execution = {
val executionOne = ExecutionTestJobs.writeExecutionWithTempFile(tempFileOne, testDataOne)
val executionTwo = ExecutionTestJobs.writeExecutionWithTempFile(tempFileTwo, testDataTwo)
executionOne.zip(executionTwo)
}
}
case class MyCustomType(s: String)
class ExecutionTest extends WordSpec with Matchers {
implicit class ExecutionTestHelper[T](ex: Execution[T]) {
def shouldSucceed(): T = {
val r = ex.waitFor(Config.default, Local(true))
r match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\n$e")
}
}
def shouldFail(): Unit = {
val r = ex.waitFor(Config.default, Local(true))
assert(r.isFailure)
}
}
def getShutdownHooks: Seq[Thread] = {
// The list of attached shutdown hooks are not accessible normally, so we must use reflection to get them
val clazz = Class.forName("java.lang.ApplicationShutdownHooks")
val hooksField = clazz.getDeclaredField("hooks")
hooksField.setAccessible(true)
hooksField.get(null).asInstanceOf[util.IdentityHashMap[Thread, Thread]].asScala.keys.toSeq
}
def isTempFileCleanupHook(hook: Thread): Boolean = {
classOf[TempFileCleanup].isAssignableFrom(hook.getClass)
}
"An Execution" should {
"run" in {
ExecutionTestJobs.wordCount2(TypedPipe.from(List("a b b c c c", "d d d d")))
.waitFor(Config.default, Local(false)).get.toMap shouldBe Map("a" -> 1L, "b" -> 2L, "c" -> 3L, "d" -> 4L)
}
"run with zip" in {
(ExecutionTestJobs.zipped(TypedPipe.from(0 until 100), TypedPipe.from(100 until 200))
.shouldSucceed() match {
case (it1, it2) => (it1.head, it2.head)
}) shouldBe ((0 until 100).sum, (100 until 200).sum)
}
"lift to try" in {
val res = ExecutionTestJobs
.wordCount2(TypedPipe.from(List("a", "b")))
.liftToTry
.shouldSucceed()
assert(res.isSuccess)
}
"lift to try on exception" in {
val res: Try[Nothing] = ExecutionTestJobs
.wordCount2(TypedPipe.from(List("a", "b")))
.map(_ => throw new RuntimeException("Something went wrong"))
.liftToTry
.shouldSucceed()
assert(res.isFailure)
}
"merge fanouts without error" in {
def unorderedEq[T](l: Iterable[T], r: Iterable[T]): Boolean =
(l.size == r.size) && (l.toSet == r.toSet)
def correct(l: List[Int]): List[(Int, Int)] = {
val in = l.groupBy(_ % 3).mapValues(_.head)
in.mapValues(_ * 2).toList ++ in.mapValues(_ * 3)
}
val input = (0 to 100).toList
val result = ExecutionTestJobs.mergeFanout(input).waitFor(Config.default, Local(false)).get
val cres = correct(input)
unorderedEq(cres, result.toList) shouldBe true
}
"If either fails, zip fails, else we get success" in {
val neverHappens = Promise[Int]().future
Execution.fromFuture { _ => neverHappens }
.zip(Execution.failed(new Exception("oh no")))
.shouldFail()
Execution.failed(new Exception("oh no"))
.zip(Execution.fromFuture { _ => neverHappens })
.shouldFail()
// If both are good, we succeed:
Execution.from(1)
.zip(Execution.from("1"))
.shouldSucceed() shouldBe (1, "1")
}
"Config transformer will isolate Configs" in {
def doesNotHaveVariable(message: String) = Execution.getConfig.flatMap { cfg =>
if (cfg.get("test.cfg.variable").isDefined)
Execution.failed(new Exception(s"${message}\n: var: ${cfg.get("test.cfg.variable")}"))
else
Execution.from(())
}
val hasVariable = Execution.getConfig.flatMap { cfg =>
if (cfg.get("test.cfg.variable").isEmpty)
Execution.failed(new Exception("Should see variable inside of transform"))
else
Execution.from(())
}
def addOption(cfg: Config) = cfg.+ ("test.cfg.variable", "dummyValue")
doesNotHaveVariable("Should not see variable before we've started transforming")
.flatMap{ _ => Execution.withConfig(hasVariable)(addOption) }
.flatMap(_ => doesNotHaveVariable("Should not see variable in flatMap's after the isolation"))
.map(_ => true)
.shouldSucceed() shouldBe true
}
"Config transformer will interact correctly with the cache" in {
var incrementIfDefined = 0
var totalEvals = 0
val incrementor = Execution.getConfig.flatMap { cfg =>
totalEvals += 1
if (cfg.get("test.cfg.variable").isDefined)
incrementIfDefined += 1
Execution.from(())
}
def addOption(cfg: Config) = cfg.+ ("test.cfg.variable", "dummyValue")
// Here we run without the option, with the option, and finally without again.
incrementor
.flatMap{ _ => Execution.withConfig(incrementor)(addOption) }
.flatMap(_ => incrementor)
.map(_ => true)
.shouldSucceed() shouldBe true
assert(incrementIfDefined === 1)
// We should evaluate once for the default config, and once for the modified config.
assert(totalEvals === 2)
}
"Config transformer will interact correctly with the cache when writing" in {
import java.io._
val srcF = File.createTempFile("tmpoutputLocation", ".tmp").getAbsolutePath
val sinkF = File.createTempFile("tmpoutputLocation2", ".tmp").getAbsolutePath
def writeNums(nums: List[Int]): Unit = {
val pw = new PrintWriter(new File(srcF))
pw.write(nums.mkString("\n"))
pw.close
}
writeNums(List(1, 2, 3))
val sink = TypedTsv[Int](sinkF)
val src = TypedTsv[Int](srcF)
val operationTP = (TypedPipe.from(src) ++ TypedPipe.from((1 until 100).toList)).writeExecution(sink).getCounters.map(_._2.toMap)
def addOption(cfg: Config) = cfg.+ ("test.cfg.variable", "dummyValue")
// Here we run without the option, with the option, and finally without again.
val (oldCounters, newCounters) = operationTP
.flatMap{ oc =>
writeNums(List(1, 2, 3, 4, 5, 6, 7))
Execution.withConfig(operationTP)(addOption).map { nc => (oc, nc) }
}
.shouldSucceed()
assert(oldCounters != newCounters, "With new configs given the source changed we shouldn't cache so the counters should be different")
}
"correctly add cached file into config" in {
val execution = Execution.withCachedFile("/path/to/your/file.txt") { cachedFile =>
Execution.getConfig.map { config =>
config.getDistributedCachedFiles should contain only cachedFile
}
}
execution.waitFor(Config.default, Hdfs(strict = true, new Configuration(false))) match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\n$e")
}
}
"correctly add cached files into config" in {
val execution =
Execution.withCachedFile("/path/to/your/one.txt") { one =>
Execution.withCachedFile("/path/to/your/second.txt") { second =>
Execution.getConfig.map { config =>
config.getDistributedCachedFiles should contain only (one, second)
}
}
}
execution.waitFor(Config.default, Hdfs(strict = true, new Configuration(false))) match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\n$e")
}
}
}
"ExecutionApp" should {
val parser = new ExecutionApp { def job = Execution.from(()) }
"parse hadoop args correctly" in {
val conf = parser.config(Array("-Dmapred.reduce.tasks=100", "--local"))._1
conf.get("mapred.reduce.tasks") should contain ("100")
conf.getArgs.boolean("local") shouldBe true
val (conf1, Hdfs(_, hconf)) = parser.config(Array("--test", "-Dmapred.reduce.tasks=110", "--hdfs"))
conf1.get("mapred.reduce.tasks") should contain ("110")
conf1.getArgs.boolean("test") shouldBe true
hconf.get("mapred.reduce.tasks") shouldBe "110"
}
}
"An ExecutionJob" should {
"run correctly" in {
JobTest(new WordCountEc(_))
.arg("input", "in")
.arg("output", "out")
.source(TextLine("in"), List((0, "hello world"), (1, "goodbye world")))
.typedSink(TypedTsv[(String, Long)]("out")) { outBuf =>
outBuf.toMap shouldBe Map("hello" -> 1L, "world" -> 2L, "goodbye" -> 1L)
}
.run
.runHadoop
.finish()
}
}
"Executions" should {
"shutdown hook should clean up temporary files" in {
val tempFileOne = Files.createTempDirectory("scalding-execution-test")
val tempFileTwo = Files.createTempDirectory("scalding-execution-test")
val mode = Test(Map())
Files.exists(tempFileOne) should be(true)
Files.exists(tempFileTwo) should be(true)
val cleanupThread = TempFileCleanup(Seq(tempFileOne.toFile.getAbsolutePath, tempFileTwo.toFile.getAbsolutePath), mode)
cleanupThread.run()
Files.exists(tempFileOne) should be(false)
Files.exists(tempFileTwo) should be(false)
}
"clean up temporary files on exit" in {
val tempFile = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val testData = List("a", "b", "c")
getShutdownHooks.foreach { hook: Thread =>
isTempFileCleanupHook(hook) should be(false)
}
ExecutionTestJobs.writeExecutionWithTempFile(tempFile, testData).shouldSucceed()
// This is hacky, but there's a small chance that the new cleanup hook isn't registered by the time we get here
// A small sleep like this appears to be sufficient to ensure we can see it
Thread.sleep(1000)
val cleanupHook = getShutdownHooks.find(isTempFileCleanupHook)
cleanupHook shouldBe defined
cleanupHook.get.asInstanceOf[TempFileCleanup].filesToCleanup should contain theSameElementsAs Set(tempFile)
cleanupHook.get.run()
// Remove the hook so it doesn't show up in the list of shutdown hooks for other tests
Runtime.getRuntime.removeShutdownHook(cleanupHook.get)
}
"clean up temporary files on exit with a zip" in {
val tempFileOne = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val tempFileTwo = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val testDataOne = List("a", "b", "c")
val testDataTwo = List("x", "y", "z")
getShutdownHooks.foreach { hook: Thread =>
isTempFileCleanupHook(hook) should be(false)
}
ExecutionTestJobs.writeExecutionWithTempFile(tempFileOne, testDataOne)
.zip(ExecutionTestJobs.writeExecutionWithTempFile(tempFileTwo, testDataTwo)).shouldSucceed()
// This is hacky, but there's a small chance that the new cleanup hook isn't registered by the time we get here
// A small sleep like this appears to be sufficient to ensure we can see it
Thread.sleep(1000)
val cleanupHook = getShutdownHooks.find(isTempFileCleanupHook)
cleanupHook shouldBe defined
cleanupHook.get.asInstanceOf[TempFileCleanup].filesToCleanup should contain theSameElementsAs Set(tempFileOne, tempFileTwo)
cleanupHook.get.run()
// Remove the hook so it doesn't show up in the list of shutdown hooks for other tests
Runtime.getRuntime.removeShutdownHook(cleanupHook.get)
}
"evaluate once per run" in {
var first = 0
var second = 0
var third = 0
val e1 = Execution.from({ first += 1; 42 })
val e2 = e1.flatMap { x =>
second += 1
Execution.from(2 * x)
}
val e3 = e1.map { x => third += 1; x * 3 }
/**
* Notice both e3 and e2 need to evaluate e1.
*/
val res = e3.zip(e2)
res.shouldSucceed()
assert((first, second, third) == (1, 1, 1))
}
"zip does not duplicate counters" in {
val c1 = Execution.withId { implicit uid =>
val stat = Stat("test")
val e1 = TypedPipe.from(0 until 100).map { x =>
stat.inc
x
}
.writeExecution(source.NullSink)
e1.zip(e1)
}
.getCounters.map { case (_, c) => c("test") }
val c2 = Execution.withId { implicit uid =>
val stat = Stat("test")
val e2 = TypedPipe.from(0 until 100).map { x =>
stat.inc
x
}
.writeExecution(source.NullSink)
e2.flatMap(Execution.from(_)).zip(e2)
}
.getCounters.map { case (_, c) => c("test") }
c1.shouldSucceed() should ===(100)
c2.shouldSucceed() should ===(100)
}
"Running a large loop won't exhaust boxed instances" in {
var timesEvaluated = 0
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering._
// Attempt to use up 4 boxed classes for every execution
def baseExecution(idx: Int): Execution[Unit] = TypedPipe.from(0 until 1000).map(_.toShort).flatMap { i =>
timesEvaluated += 1
List((i, i), (i, i))
}.sumByKey.map {
case (k, v) =>
(k.toInt, v)
}.sumByKey.map {
case (k, v) =>
(k.toLong, v)
}.sumByKey.map {
case (k, v) =>
(k.toString, v)
}.sumByKey.map {
case (k, v) =>
(MyCustomType(k), v)
}.sumByKey.writeExecution(TypedTsv(s"/tmp/asdf_${idx}"))
implicitly[OrderedSerialization[MyCustomType]] match {
case mos: MacroEqualityOrderedSerialization[_] => assert(mos.uniqueId == "com.twitter.scalding.MyCustomType")
case _ => sys.error("Ordered serialization should have been the MacroEqualityOrderedSerialization for this test")
}
def executionLoop(idx: Int): Execution[Unit] = {
if (idx > 0)
baseExecution(idx).flatMap(_ => executionLoop(idx - 1))
else
Execution.unit
}
executionLoop(55).shouldSucceed()
assert(timesEvaluated == 55 * 1000, "Should run the 55 execution loops for 1000 elements")
}
"evaluate shared portions just once, writeExecution" in {
var timesEvaluated = 0
val baseTp = TypedPipe.from(0 until 1000).flatMap { i =>
timesEvaluated += 1
List(i, i)
}.fork
val fde1 = baseTp.map{ _ * 3 }.writeExecution(TypedTsv("/tmp/asdf"))
val fde2 = baseTp.map{ _ * 5 }.writeExecution(TypedTsv("/tmp/asdf2"))
val res = fde1.zip(fde2)
res.shouldSucceed()
assert(timesEvaluated == 1000, "Should share the common sub section of the graph when we zip two write Executions")
}
"evaluate shared portions just once, forceToDiskExecution" in {
var timesEvaluated = 0
val baseTp = TypedPipe.from(0 until 1000).flatMap { i =>
timesEvaluated += 1
List(i, i)
}.fork
val fde1 = baseTp.map{ _ * 3 }.forceToDiskExecution
val fde2 = baseTp.map{ _ * 5 }.forceToDiskExecution
val res = fde1.zip(fde2)
res.shouldSucceed()
assert(timesEvaluated == 1000, "Should share the common sub section of the graph when we zip two write Executions")
}
"evaluate shared portions just once, forceToDiskExecution with execution cache" in {
var timesEvaluated = 0
val baseTp = TypedPipe.from(0 until 1000).flatMap { i =>
timesEvaluated += 1
List(i, i)
}.fork
val fde1 = baseTp.map{ _ * 3 }.forceToDiskExecution
val fde2 = baseTp.map{ _ * 5 }.forceToDiskExecution
val res = fde1.zip(fde2).flatMap{ _ => fde1 }.flatMap(_.toIterableExecution)
res.shouldSucceed()
assert(timesEvaluated == 1000, "Should share the common sub section of the graph when we zip two write Executions and then flatmap")
}
"Ability to do isolated caches so we don't exhaust memory" in {
def memoryWastingExecutionGenerator(id: Int): Execution[Array[Long]] = Execution.withNewCache(Execution.from(id).flatMap{ idx =>
Execution.from(Array.fill(4000000)(idx.toLong))
})
def writeAll(numExecutions: Int): Execution[Unit] = {
if (numExecutions > 0) {
memoryWastingExecutionGenerator(numExecutions).flatMap { _ =>
writeAll(numExecutions - 1)
}
} else {
Execution.from(())
}
}
writeAll(400).shouldSucceed()
}
"handle failure" in {
val result = Execution.withParallelism(Seq(Execution.failed(new Exception("failed"))), 1)
result.shouldFail()
}
"handle an error running in parallel" in {
val executions = Execution.failed(new Exception("failed")) :: 0.to(10).map(i => Execution.from[Int](i)).toList
val result = Execution.withParallelism(executions, 3)
result.shouldFail()
}
"run in parallel" in {
val executions = 0.to(10).map(i => Execution.from[Int](i)).toList
val result = Execution.withParallelism(executions, 3)
assert(result.shouldSucceed() == 0.to(10).toSeq)
}
"block correctly" in {
var seen = 0
def updateSeen(idx: Int): Unit = {
assert(seen === idx)
seen += 1
}
val executions = 0.to(10).map{ i =>
Execution
.from[Int](i)
.map{ i => Thread.sleep(10 - i); i }
.onComplete(t => updateSeen(t.get))
}.toList.reverse
val result = Execution.withParallelism(executions, 1)
assert(result.shouldSucceed() == 0.to(10).reverse)
}
"can hashCode, compare, and run a long sequence" in {
val execution = Execution.sequence((1 to 100000).toList.map(Execution.from(_)))
assert(execution.hashCode == execution.hashCode)
assert(execution == execution)
assert(execution.shouldSucceed() == (1 to 100000).toList)
}
"caches a withId Execution computation" in {
var called = false
val execution = Execution.withId { id =>
assert(!called)
called = true
Execution.from("foobar")
}
val doubleExecution = execution.zip(execution)
assert(doubleExecution.shouldSucceed() == ("foobar", "foobar"))
assert(called)
}
"maintains equality and hashCode after reconstruction" when {
// Make two copies of these. Comparison by reference
// won't match between the two.
val futureF = { _: ConcurrentExecutionContext => Future.successful(10) }
val futureF2 = { _: ConcurrentExecutionContext => Future.successful(10) }
val fnF = { (_: Config, _: Mode) => null }
val fnF2 = { (_: Config, _: Mode) => null }
val withIdF = { _: UniqueID => Execution.unit }
val withIdF2 = { _: UniqueID => Execution.unit }
val mapF = { _: Int => 12 }
val mapF2 = { _: Int => 12 }
def reconstructibleLaws[T](ex: => Execution[T], ex2: Execution[T]): Unit = {
assert(ex == ex)
assert(ex.hashCode == ex.hashCode)
assert(ex != ex2)
}
"Execution.fromFuture" in {
reconstructibleLaws(Execution.fromFuture(futureF), Execution.fromFuture(futureF2))
}
"Execution.fromFn" in {
reconstructibleLaws(Execution.fromFn(fnF), Execution.fromFn(fnF2))
}
"Execution.withId" in {
reconstructibleLaws(Execution.withId(withIdF), Execution.withId(withIdF2))
}
"Execution#map" in {
reconstructibleLaws(
Execution.fromFuture(futureF).map(mapF),
Execution.fromFuture(futureF).map(mapF2))
}
"Execution.zip" in {
reconstructibleLaws(
Execution.zip(Execution.fromFuture(futureF2), Execution.withId(withIdF)),
Execution.zip(Execution.fromFuture(futureF2), Execution.withId(withIdF2)))
}
"Execution.sequence" in {
reconstructibleLaws(
Execution.sequence(Seq(
Execution.fromFuture(futureF),
Execution.withId(withIdF),
Execution.fromFuture(futureF2).map(mapF))),
Execution.sequence(Seq(
Execution.fromFuture(futureF),
Execution.withId(withIdF),
Execution.fromFn(fnF))))
}
}
"Has consistent hashCode and equality for mutable" when {
// These cases are a bit convoluted, but we still
// want equality to be consistent
trait MutableX[T] {
protected var x: Int
def setX(newX: Int): Unit = { x = newX }
def makeExecution: Execution[T]
}
case class FromFutureMutable(var x: Int = 0) extends Function1[ConcurrentExecutionContext, Future[Int]] with MutableX[Int] {
def apply(context: ConcurrentExecutionContext) = Future.successful(x)
def makeExecution = Execution.fromFuture(this)
}
case class FromFnMutable(var x: Int = 0) extends Function2[Config, Mode, Null] with MutableX[Unit] {
def apply(config: Config, mode: Mode) = null
def makeExecution = Execution.fromFn(this)
}
case class WithIdMutable(var x: Int = 0) extends Function1[UniqueID, Execution[Int]] with MutableX[Int] {
def apply(id: UniqueID) = Execution.fromFuture(FromFutureMutable(x))
def makeExecution = Execution.withId(this)
}
val mapFunction = { x: Int => x * x }
case class MapMutable(var x: Int = 0) extends MutableX[Int] {
val m = FromFutureMutable(x)
override def setX(newX: Int) = {
x = newX
m.setX(x)
}
def makeExecution = m.makeExecution.map(mapFunction)
}
case class ZipMutable(var x: Int = 0) extends MutableX[(Int, Int)] {
val m1 = FromFutureMutable(x)
val m2 = WithIdMutable(x)
override def setX(newX: Int) = {
x = newX
m1.setX(x)
m2.setX(x + 20)
}
def makeExecution = m1.makeExecution.zip(m2.makeExecution)
}
case class SequenceMutable(var x: Int = 0) extends MutableX[Seq[Int]] {
val m1 = FromFutureMutable(x)
val m2 = WithIdMutable(x)
override def setX(newX: Int) = {
x = newX
m1.setX(x)
m2.setX(x * 3)
}
def makeExecution = Execution.sequence(Seq(m1.makeExecution, m2.makeExecution))
}
def mutableLaws[T, U <: MutableX[T]](
mutableGen: => U,
expectedOpt: Option[Int => T] = None): Unit = {
expectedOpt.foreach { expected =>
require(expected(10) != expected(20))
}
def validate(ex: Execution[T], seed: Int): Unit = {
expectedOpt.foreach { expected =>
assert(ex.shouldSucceed() == expected(seed))
}
}
val mutable1 = mutableGen
mutable1.setX(10)
val ex1 = mutable1.makeExecution
val mutable2 = mutableGen
mutable2.setX(10)
val ex2 = mutable2.makeExecution
assert(ex1 == ex2)
assert(ex1.hashCode == ex2.hashCode)
validate(ex1, 10)
validate(ex2, 10)
mutable2.setX(20)
// We may have the same hashCode still, but we don't need to
assert(ex1 != ex2)
validate(ex2, 20)
val mutable3 = mutableGen
mutable3.setX(20)
val ex3 = mutable3.makeExecution
assert(ex1 != ex3)
validate(ex3, 20)
mutable3.setX(10)
if (ex1 == ex3) {
// If they are made equal again, the hashCodes must match
assert(ex1.hashCode == ex3.hashCode)
}
validate(ex3, 10)
}
"Execution.fromFuture" in {
mutableLaws(FromFutureMutable(), Some({ x: Int => x }))
}
"Execution.fromFn" in {
mutableLaws(FromFnMutable(), Option.empty[Int => Unit])
}
"Execution.withId" in {
mutableLaws(WithIdMutable(), Some({ x: Int => x }))
}
"Execution#map" in {
mutableLaws(MapMutable(), Some({ x: Int => x * x }))
}
"Execution#zip" in {
mutableLaws(ZipMutable(), Some({ x: Int => (x, x + 20) }))
}
"Execution.sequence" in {
mutableLaws(SequenceMutable(), Some({ x: Int => Seq(x, x * 3) }))
}
}
}
}
| tresata/scalding | scalding-core/src/test/scala/com/twitter/scalding/ExecutionTest.scala | Scala | apache-2.0 | 27,527 |
import org.platanios.tensorflow.api._
import org.platanios.tensorflow.api.tf.learn._
import org.platanios.tensorflow.data.image.MNISTLoader
import java.nio.file.Paths
trait IndexTensorsExample {
// #tensors_example
val t1 = Tensor(1.2, 4.5)
val t2 = Tensor(-0.2, 1.1)
t1 + t2 == Tensor(1.0, 5.6)
// #tensors_example
}
trait IndexLowLevelExample {
// #low_level_example
val inputs = tf.placeholder[Float](Shape(-1, 10))
val outputs = tf.placeholder[Float](Shape(-1, 10))
val predictions = tf.nameScope("Linear") {
val weights = tf.variable[Float]("weights", Shape(10, 1), tf.ZerosInitializer)
tf.matmul(inputs, weights)
}
val loss = tf.sum(tf.square(predictions - outputs))
val optimizer = tf.train.AdaGrad(1.0f)
val trainOp = optimizer.minimize(loss)
// #low_level_example
}
trait IndexSliceExample {
val tensor = Tensor.zeros[Float](Shape(10, 2, 3, 4, 5, 20))
// #slice_example
tensor(2 :: 5, ---, 1) // is equivalent to numpy's 'tensor[2:5, ..., 1]'
// #slice_example
}
trait IndexMNISTExample {
// #mnist_example
// Load and batch data using pre-fetching.
val dataset = MNISTLoader.load(Paths.get("/tmp"))
val trainImages = tf.data.datasetFromTensorSlices(dataset.trainImages.toFloat)
val trainLabels = tf.data.datasetFromTensorSlices(dataset.trainLabels.toLong)
val trainData =
trainImages.zip(trainLabels)
.repeat()
.shuffle(10000)
.batch(256)
.prefetch(10)
// Create the MLP model.
val input = Input(FLOAT32, Shape(-1, 28, 28))
val trainInput = Input(INT64, Shape(-1))
val layer = Flatten[Float]("Input/Flatten") >>
Linear[Float]("Layer_0", 128) >> ReLU[Float]("Layer_0/Activation", 0.1f) >>
Linear[Float]("Layer_1", 64) >> ReLU[Float]("Layer_1/Activation", 0.1f) >>
Linear[Float]("Layer_2", 32) >> ReLU[Float]("Layer_2/Activation", 0.1f) >>
Linear[Float]("OutputLayer", 10)
val loss = SparseSoftmaxCrossEntropy[Float, Long, Float]("Loss") >>
Mean("Loss/Mean")
val optimizer = tf.train.GradientDescent(1e-6f)
val model = Model.simpleSupervised(input, trainInput, layer, loss, optimizer)
// Create an estimator and train the model.
val estimator = InMemoryEstimator(model)
estimator.train(() => trainData, StopCriteria(maxSteps = Some(1000000)))
// #mnist_example
}
trait IndexTensorBoard extends IndexMNISTExample {
// #tensorboard_example
override val loss = SparseSoftmaxCrossEntropy[Float, Long, Float]("Loss") >>
Mean("Loss/Mean") >>
ScalarSummary(name = "Loss", tag = "Loss")
val summariesDir = Paths.get("/tmp/summaries")
override val estimator = InMemoryEstimator(
modelFunction = model,
configurationBase = Configuration(Some(summariesDir)),
trainHooks = Set(
SummarySaver(summariesDir, StepHookTrigger(100)),
CheckpointSaver(summariesDir, StepHookTrigger(1000))),
tensorBoardConfig = TensorBoardConfig(summariesDir))
estimator.train(() => trainData, StopCriteria(maxSteps = Some(100000)))
// #tensorboard_example
}
| eaplatanios/tensorflow_scala | docs/src/main/scala/Index.scala | Scala | apache-2.0 | 3,056 |
/*
* Copyright 2018 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rocks.heikoseeberger.accessus
import akka.actor.ActorSystem
import akka.http.scaladsl.client.RequestBuilding.Get
import akka.http.scaladsl.model.StatusCodes.OK
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.http.scaladsl.server.{ Directives, RejectionHandler, Route }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.stream.scaladsl.{ Keep, Sink, Source }
import scala.concurrent.duration.DurationInt
import scala.concurrent.{ Await, Future }
import utest._
object AccessusTests extends TestSuite {
import Accessus._
private implicit val system: ActorSystem = ActorSystem()
private implicit val mat: Materializer = ActorMaterializer()
private implicit val rejectionHandler: RejectionHandler =
RejectionHandler.newBuilder().handleNotFound(Directives.complete(OK)).result()
private val route = {
import Directives._
path(Segment) { s =>
get {
complete(s"/$s")
}
}
}
override def tests: Tests =
Tests {
'routeOps - {
'withTimestampedAccessLog - {
runAndAssert(route.withTimestampedAccessLog(Sink.seq))
}
'withAccessLog - {
runAndAssert(route.withAccessLog(() => now())(Sink.seq))
}
}
'handlerOps - {
'withTimestampedAccessLog - {
runAndAssert(Route.handlerFlow(route).withTimestampedAccessLog(Sink.seq))
}
'withAccessLog - {
runAndAssert(Route.handlerFlow(route).withAccessLog(() => now())(Sink.seq))
}
}
'withAccessLog - {
runAndAssert(withAccessLog(() => now())(Sink.seq, route))
}
}
override def utestAfterAll(): Unit = {
Await.ready(system.terminate(), 42.seconds)
super.utestAfterAll()
}
private def runAndAssert(handler: Handler[Future[Seq[((HttpRequest, Long), HttpResponse)]]]) = {
import system.dispatcher
val t = now()
Source(List("/test", "/"))
.map(Get.apply)
.viaMat(handler)(Keep.right)
.to(Sink.ignore)
.run()
.map {
case Seq(((req1, t1), res1), ((req2, t2), res2)) =>
assert(req1.uri.path.toString == "/test")
assert(res1.status == OK)
assert(t1 > t)
assert(req2.uri.path.toString == "/")
assert(res2.status == OK)
assert(t2 > t)
}
}
private def now() = System.nanoTime()
}
| hseeberger/accessus | src/test/scala/rocks/heikoseeberger/accessus/AccessusTests.scala | Scala | apache-2.0 | 2,995 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.connector
import scala.annotation.tailrec
import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.blocking
import scala.concurrent.duration._
import scala.util.Failure
import org.apache.kafka.clients.consumer.CommitFailedException
import akka.actor.FSM
import akka.pattern.pipe
import whisk.common.Logging
import whisk.common.TransactionId
trait MessageConsumer {
/** The maximum number of messages peeked (i.e., max number of messages retrieved during a long poll). */
val maxPeek: Int
/**
* Gets messages via a long poll. May or may not remove messages
* from the message connector. Use commit() to ensure messages are
* removed from the connector.
*
* @param duration for the long poll
* @return iterable collection (topic, partition, offset, bytes)
*/
def peek(duration: Duration): Iterable[(String, Int, Long, Array[Byte])]
/**
* Commits offsets from last peek operation to ensure they are removed
* from the connector.
*/
def commit(): Unit
/** Closes consumer. */
def close(): Unit
}
object MessageFeed {
protected sealed trait FeedState
protected[connector] case object Idle extends FeedState
protected[connector] case object FillingPipeline extends FeedState
protected[connector] case object DrainingPipeline extends FeedState
protected sealed trait FeedData
private case object NoData extends FeedData
/** Indicates the consumer is ready to accept messages from the message bus for processing. */
object Ready
/** Steady state message, indicates capacity in downstream process to receive more messages. */
object Processed
/** Indicates the fill operation has completed. */
private case class FillCompleted(messages: Seq[(String, Int, Long, Array[Byte])])
}
/**
* This actor polls the message bus for new messages and dispatches them to the given
* handler. The actor tracks the number of messages dispatched and will not dispatch new
* messages until some number of them are acknowledged.
*
* This is used by the invoker to pull messages from the message bus and apply back pressure
* when the invoker does not have resources to complete processing messages (i.e., no containers
* are available to run new actions). It is also used in the load balancer to consume active
* ack messages.
* When the invoker releases resources (by reclaiming containers) it will send a message
* to this actor which will then attempt to fill the pipeline with new messages.
*
* The actor tries to fill the pipeline with additional messages while the number
* of outstanding requests is below the pipeline fill threshold.
*/
@throws[IllegalArgumentException]
class MessageFeed(description: String,
logging: Logging,
consumer: MessageConsumer,
maximumHandlerCapacity: Int,
longPollDuration: FiniteDuration,
handler: Array[Byte] => Future[Unit],
autoStart: Boolean = true,
logHandoff: Boolean = true)
extends FSM[MessageFeed.FeedState, MessageFeed.FeedData] {
import MessageFeed._
// double-buffer to make up for message bus read overhead
val maxPipelineDepth = maximumHandlerCapacity * 2
private val pipelineFillThreshold = maxPipelineDepth - consumer.maxPeek
require(
consumer.maxPeek <= maxPipelineDepth,
"consumer may not yield more messages per peek than permitted by max depth")
// Immutable Queue
// although on the surface it seems to make sense to use an immutable variable with a mutable Queue,
// Akka Actor state defies the usual "prefer immutable" guideline in Scala, esp. w/ Collections.
// If, for some reason, this Queue was mutable and is accidentally leaked in say an Akka message,
// another Actor or recipient would be able to mutate the internal state of this Actor.
// Best practice dictates a mutable variable pointing at an immutable collection for this reason
private var outstandingMessages = immutable.Queue.empty[(String, Int, Long, Array[Byte])]
private var handlerCapacity = maximumHandlerCapacity
private implicit val tid = TransactionId.dispatcher
logging.info(
this,
s"handler capacity = $maximumHandlerCapacity, pipeline fill at = $pipelineFillThreshold, pipeline depth = $maxPipelineDepth")
when(Idle) {
case Event(Ready, _) =>
fillPipeline()
goto(FillingPipeline)
case _ => stay
}
// wait for fill to complete, and keep filling if there is
// capacity otherwise wait to drain
when(FillingPipeline) {
case Event(Processed, _) =>
updateHandlerCapacity()
sendOutstandingMessages()
stay
case Event(FillCompleted(messages), _) =>
outstandingMessages = outstandingMessages ++ messages
sendOutstandingMessages()
if (shouldFillQueue()) {
fillPipeline()
stay
} else {
goto(DrainingPipeline)
}
case _ => stay
}
when(DrainingPipeline) {
case Event(Processed, _) =>
updateHandlerCapacity()
sendOutstandingMessages()
if (shouldFillQueue()) {
fillPipeline()
goto(FillingPipeline)
} else stay
case _ => stay
}
onTransition { case _ -> Idle => if (autoStart) self ! Ready }
startWith(Idle, MessageFeed.NoData)
initialize()
private implicit val ec = context.system.dispatcher
private def fillPipeline(): Unit = {
if (outstandingMessages.size <= pipelineFillThreshold) {
Future {
blocking {
// Grab next batch of messages and commit offsets immediately
// essentially marking the activation as having satisfied "at most once"
// semantics (this is the point at which the activation is considered started).
// If the commit fails, then messages peeked are peeked again on the next poll.
// While the commit is synchronous and will block until it completes, at steady
// state with enough buffering (i.e., maxPipelineDepth > maxPeek), the latency
// of the commit should be masked.
val records = consumer.peek(longPollDuration)
consumer.commit()
FillCompleted(records.toSeq)
}
}.andThen {
case Failure(e: CommitFailedException) =>
logging.error(this, s"failed to commit $description consumer offset: $e")
case Failure(e: Throwable) => logging.error(this, s"exception while pulling new $description records: $e")
}
.recover {
case _ => FillCompleted(Seq.empty)
}
.pipeTo(self)
} else {
logging.error(this, s"dropping fill request until $description feed is drained")
}
}
/** Send as many messages as possible to the handler. */
@tailrec
private def sendOutstandingMessages(): Unit = {
val occupancy = outstandingMessages.size
if (occupancy > 0 && handlerCapacity > 0) {
// Easiest way with an immutable queue to cleanly dequeue
// Head is the first elemeent of the queue, desugared w/ an assignment pattern
// Tail is everything but the first element, thus mutating the collection variable
val (topic, partition, offset, bytes) = outstandingMessages.head
outstandingMessages = outstandingMessages.tail
if (logHandoff) logging.info(this, s"processing $topic[$partition][$offset] ($occupancy/$handlerCapacity)")
handler(bytes)
handlerCapacity -= 1
sendOutstandingMessages()
}
}
private def shouldFillQueue(): Boolean = {
val occupancy = outstandingMessages.size
if (occupancy <= pipelineFillThreshold) {
logging.debug(
this,
s"$description pipeline has capacity: $occupancy <= $pipelineFillThreshold ($handlerCapacity)")
true
} else {
logging.debug(this, s"$description pipeline must drain: $occupancy > $pipelineFillThreshold")
false
}
}
private def updateHandlerCapacity(): Int = {
logging.debug(self, s"$description received processed msg, current capacity = $handlerCapacity")
if (handlerCapacity < maximumHandlerCapacity) {
handlerCapacity += 1
handlerCapacity
} else {
if (handlerCapacity > maximumHandlerCapacity) logging.error(self, s"$description capacity already at max")
maximumHandlerCapacity
}
}
}
| duynguyen/incubator-openwhisk | common/scala/src/main/scala/whisk/core/connector/MessageConsumer.scala | Scala | apache-2.0 | 9,141 |
package demo
package components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import scalacss.ProdDefaults._
object InfoTemplate {
case class Props(
browsersTested: List[String],
componentFilePath: String,
scalacss: Boolean
)
case class Backend($ : BackendScope[Props, Unit]) {
def render(P: Props, C: PropsChildren) = {
<.div(^.cls := "info-template")(
<.div(^.cls := "component-info")(C),
(<.div(
<.h4("Style :"),
<.a(^.href := "#scalacss", "scalacss")
)).when(P.scalacss),
(<.div(^.marginTop := "10px")(
<.h4("Tested Browsers List :"),
<.ul(^.marginLeft := "50px")(P.browsersTested.map(s => <.li(s)).toTagMod)
)).when(P.browsersTested.nonEmpty),
<.div(^.marginTop := "10px")(
ComponentCredits(
filePath =
s"core/src/main/scala/chandu0101/scalajs/react/components/${P.componentFilePath}")
)
)
}
}
val component = ScalaComponent
.builder[Props]("InfoTemplate")
.renderBackendWithChildren[Backend]
.build
object Style extends StyleSheet.Inline {
import dsl._
val content = style(
textAlign.center,
fontSize(30.px),
paddingTop(40.px)
)
}
def apply(
componentFilePath: String,
scalacss: Boolean = false,
browsersTested: List[String] = List()
)(children: VdomNode*) =
// component.set(key, ref)(
// Props(browsersTested, componentFilePath, scalacss),
// children: _*
// )
component(Props(browsersTested, componentFilePath, scalacss))(children: _*)
}
| aparo/scalajs-react-components | demo/src/main/scala/demo/components/InfoTemplate.scala | Scala | apache-2.0 | 1,671 |
package mimir.lenses
import java.sql.SQLException
import mimir.Database
import mimir.algebra.Type.T
import mimir.algebra._
import mimir.ctables.{Model, VGTerm}
import org.apache.lucene.search.spell.{JaroWinklerDistance, LevensteinDistance, NGramDistance, StringDistance}
/**
* Created by vinayak on 7/20/15.
*/
class SchemaMatchingLens(name: String, args: List[Expression], source: Operator)
extends Lens(name, args, source) {
var targetSchema: Map[String, Type.T] = null
var sourceSchema: Map[String, Type.T] = source.schema.toMap
var db: Database = null
var model: Model = null
def init() = {
println("args: "+args+"; "+ args.length)
if (args.length % 2 != 0)
throw new SQLException("Incorrect parameters for " + lensType + " Lens")
if (targetSchema == null) {
targetSchema = Map[String, Type.T]()
var i = 0
while (i < args.length) {
val col = args(i).toString.toUpperCase
i += 1
val t = Type.fromString(args(i).toString)
i += 1
targetSchema += (col -> t)
}
}
}
def schema(): List[(String, Type.T)] = targetSchema.toList
def lensType = "SCHEMA_MATCHING"
/**
* `view` emits an Operator that defines the Virtual C-Table for the lens
*/
override def view: Operator = {
Project(
targetSchema.keys.toList.zipWithIndex.map { case (key, idx) => ProjectArg(
key,
CaseExpression(
sourceSchema.filter(_._2 == targetSchema(key)).keys.toList.map(b => WhenThenClause(
Comparison(Cmp.Eq,
VGTerm((name, model), idx, List(StringPrimitive(key), StringPrimitive(b))),
BoolPrimitive(true)),
Var(b))
),
NullPrimitive()
))
},
source
)
}
/**
* Initialize the lens' model by building it from scratch. Typically this involves
* using `db` to evaluate `source`
*/
override def build(db: Database): Unit = {
init()
this.db = db
model = new SchemaMatchingModel(this)
model.asInstanceOf[SchemaMatchingModel].learn(targetSchema, sourceSchema)
}
}
case class SchemaAnalysis(model: SchemaMatchingModel, idx: Int, args: List[Expression])
extends Proc(args) {
override def get(v: List[PrimitiveValue]): PrimitiveValue = model.mostLikelyValue(idx, v)
override def rebuild(c: List[Expression]): Expression = SchemaAnalysis(model, idx, c)
override def exprType(bindings: Map[String, T]): T = Type.TBool
}
class SchemaMatchingModel(lens: SchemaMatchingLens) extends Model {
var schema: Map[String, Type.T] = null
var colMapping: Map[String, Map[String, Double]] = null
def getSchemaMatching(criteria: String, targetColumn: String, sourceColumns: List[String]): Map[String, Double] = {
val matcher: StringDistance = criteria match {
case "JaroWinklerDistance" => new JaroWinklerDistance()
case "LevensteinDistance" => new LevensteinDistance()
case "NGramDistance" => new NGramDistance()
case _ => null
}
var total = 0.0
// calculate distance
val sorted = sourceColumns.map(a => {
val dist = matcher.getDistance(targetColumn, a)
total += dist
(a, dist)
}).sortBy(_._2).toMap
// normalize
sorted.map { case (k, v) => (k, v / total) }
}
def learn(targetSchema: Map[String, Type.T], sourceSchema: Map[String, Type.T]) = {
colMapping = targetSchema.map { case (k, v) =>
(k, getSchemaMatching("NGramDistance", k, sourceSchema.filter(_._2 == v).keys.toList))
}
schema = targetSchema
}
override def varTypes: List[T] = List.fill(schema.size)(Type.TBool)
override def sample(seed: Long, idx: Int, args: List[PrimitiveValue]): PrimitiveValue = mostLikelyValue(idx, args)
override def sampleGenerator(idx: Int, args: List[PrimitiveValue]): PrimitiveValue = sample(0, idx, args)
override def mostLikelyValue(idx: Int, args: List[PrimitiveValue]): PrimitiveValue = {
val targetCol = args(0).asString
val sourceCol = args(1).asString
if (colMapping(targetCol).maxBy(_._2)._1.equals(sourceCol))
BoolPrimitive(true)
else BoolPrimitive(false)
}
override def upperBoundExpr(idx: Int, args: List[Expression]): Expression = SchemaAnalysis(this, idx, args)
override def upperBound(idx: Int, args: List[PrimitiveValue]): PrimitiveValue = mostLikelyValue(idx, args)
override def sampleGenExpr(idx: Int, args: List[Expression]): Expression = SchemaAnalysis(this, idx, args)
override def mostLikelyExpr(idx: Int, args: List[Expression]): Expression = SchemaAnalysis(this, idx, args)
override def lowerBoundExpr(idx: Int, args: List[Expression]): Expression = SchemaAnalysis(this, idx, args)
override def lowerBound(idx: Int, args: List[PrimitiveValue]): PrimitiveValue = mostLikelyValue(idx, args)
override def reason(idx: Int, args: List[Expression]): (String, String) = {
val target = schema.keys.toList(idx)
val source = colMapping(target).maxBy(_._2)
("I assumed that " + source._1 + " maps to " + target + " ("+ (source._2 * 100).toInt +"% likely)", "SCHEMA_MATCHING")
}
}
| Legacy25/mimir | mimircore/src/main/scala/mimir/lenses/SchemaMatchingLens.scala | Scala | apache-2.0 | 5,099 |
/************************************************************************
Tinaviz
* ************************************************************************
This application is part of the Tinasoft project: http://tinasoft.eu
Tinaviz main developer: julian.bilcke @ iscpif.fr (twitter.com/flngr)
Copyright (C) 2009-2011 CREA Lab, CNRS/Ecole Polytechnique UMR 7656 (Fr)
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
************************************************************************/
package eu.tinasoft.tinaviz.pipeline
import eu.tinasoft._
import tinaviz.graph._
import tinaviz.io.json.Json
import tinaviz.io.Webpage
import tinaviz.io.GEXF
import tinaviz.util.Vector._
import tinaviz.util.Maths
import tinaviz.layout._
import actors.Actor
import actors.Actor._
import compat.Platform
import com.lowagie.text.pdf.codec.Base64.OutputStream
import tinaviz.{Session, Main, Server}
import reflect.ValDef
/**
*
*/
class Workflow(val session: Session) extends Actor {
def act() {
val pipeline = session.pipeline
while (true) {
receive {
case 'exit =>
println("Workflow: exiting")
exit()
/*
'overwriteCoordinates -> Graph
The Workflow actor is the only one who can updated the pipeline's graph layout
What we are doing here is very simple:
We updated the output (visualized) graph with new coordinates, then we regenerate all the attributes
from the new data. we also warm the "lazy val" cache.
Computing lazy vals can take some time, so if we were to do it in the main thread (draw())
it would make it slower and freezy.
We are doing it here in the Workflow actor, so it don't block the rendering.
*/
case ('overwriteCoordinates, g: Graph) =>
pipeline.setInput(pipeline.input.updatePositionWithCategory(g))
pipeline.setOutput(pipeline.output.updatePositionWithCategory(g).callbackPositionsChanged.warmCache)
session.layout ! 'run
case (cb: Int, something) =>
//println("case "+cb+", "+something)
session.webpage ! cb -> (something match {
case ("pause", enable:Boolean) =>
if (enable) {
println("Workflow: pause enabled -> stopping layout")
session.layout ! 'stop
} else {
println("Workflow: pause disabled -> starting layout")
session.layout ! 'start
}
Map("pause" -> enable)
case ('getNodeAttributes, uuid: String) =>
//println("Workflow: asked for 'getNodeAttributes (on INPUT GRAPH) of " + uuid)
Map("nodes" -> pipeline.input.lessAttributes(uuid))
case ('getNeighbourhood, view: String, todoList: List[String]) =>
val in = pipeline.input
val out = pipeline.output
val container = (view match {
case "meso" => out
case any => in
})
val neighbourList = Map(todoList.zipWithIndex: _*).map {
case (uuid, i) => (uuid, container.neighbours(container.id(uuid)))
}
val nodeList = out.selectionUUID.toList
//System.out.println("calling callback with this data: " + (nodeList, neighbourList))
Map("nodes" -> nodeList, "neighbours" -> neighbourList)
case ('getNeighbourhood, view: String, "selection") =>
val in = pipeline.input
val out = pipeline.output
val container = (view match {
case "meso" => {
//println("taking the current (bad) neighbourhood graph")
out
}
case any => {
//println("taking neighbourhood from original (good) data graph")
in
}
})
//val neighbourListTmp = layoutCache.selectionUUID.map { case uuid => (uuid,container.neighbours(container.id(uuid))) }
val nodeList = out.selectionUUID.toList
val neighbourList = Map(out.selectionUUID.zipWithIndex: _*).map {
case (uuid, i) => (uuid, container.neighbours(container.id(uuid)))
}
//System.out.println("calling callback with this data: " + (nodeList, neighbourList))
Map("nodes" -> nodeList, "neighbours" -> neighbourList)
case ('getNodes, view: String, category: String) =>
//println("Server: asked for 'getNodes " + view + " " + category)
val in = pipeline.input
val out = pipeline.output
val all = (view match {
case "meso" => out
case any => in
}).allNodes
val result = if (category.equalsIgnoreCase("none")) {
all
} else {
all.filter {
case (uuid, attributes) => attributes("category").asInstanceOf[String].equals(category)
}
}
Map("nodes" -> (result.map {
case (k, v) => v
}.toList))
case ("select", uuidList: List[String]) =>
println("selecting nodes: '" + uuidList + "'")
val in = pipeline.input
val out = pipeline.output
val out2 = if (uuidList.size == 0) {
// pipeline.setCategoryCache(pipeline.categoryCache.clearSelection)
in.clearSelection
out.clearSelection
} else {
out + ("selected" -> out.uuid.zipWithIndex.map {
case (_uuid, i) =>
// quick & dirty..I don't remember name of a better function, and I need to release tomorrow
var found = false
uuidList.foreach {
case uuid => if (_uuid equals uuid) found = true
}
val res = if (found) true else out.selected(i)
println("match: " + res)
res
})
}
pipeline.setOutput(out2.callbackSelectionChanged)
self ! "filter.view" -> in.currentView
Map("selection" -> pipeline.output.selectionAttributes, "mouse" -> "left")
case ("select", uuid: String) =>
val in = pipeline.input
val out = pipeline.output
println("SELECTING...")
val out2 = if (uuid == null | (uuid.equals(" ") || uuid.isEmpty)) {
//pipeline.setCategoryCache(pipeline.categoryCache.clearSelection)
//println(".. NOTHING!")
in.clearSelection
out.clearSelection
} else {
out + ("selected" -> out.uuid.zipWithIndex.map {
case (_uuid, i) =>
val res = if (_uuid equals uuid) true else out.selected(i)
res
})
}
pipeline.setOutput(out2.callbackSelectionChanged)
//println("calling pipeline.output.updateSelectedWithCategory( g )")
/*pipeline.output.updateSelectedWithCategory(
out2
) */
//println("out2.selection.size: " + out2.selection.size)
// println("pipeline.output.size: " + pipeline.output.size)
//session.webpage ! cb -> (pipeline.output.selectionAttributes, "left")
Map("selection" -> pipeline.output.selectionAttributes, "mouse" -> "left")
//self ! "filter.view" -> pipeline.input.currentView
case ("selectByPattern", pattern: String) =>
val in = pipeline.input
val out = pipeline.output
println("selectByPattern: "+pattern)
pipeline.setOutput(
(if (pattern == null | (pattern.equals(" ") || pattern.isEmpty)) {
// pipeline.setCategoryCache(pipeline.categoryCache.clearSelection)
in.clearSelection
out.clearSelection
} else {
out + ("selected" -> out.label.zipWithIndex.map {
case (label, i) => if (label.toLowerCase contains pattern.toLowerCase) true else out.selected(i)
})
}).callbackSelectionChanged
)
self ! "filter.view" -> pipeline.input.currentView
Map("selection" -> pipeline.output.selectionAttributes, "mouse" -> "left")
/**Search and select a node depending on it's neighbour label match **/
case ("selectByNeighbourPattern", pattern: String, category: String) =>
val ref = pipeline.input
val out = pipeline.output
println("selectByNeighbourPattern(" + pattern + ", " + category + ")")
pipeline.setOutput(
(if (pattern == null | (pattern.equals(" ") || pattern.isEmpty)) {
//pipeline.setCategoryCache(pipeline.categoryCache.clearSelection)
ref.clearSelection
out.clearSelection
} else {
out + ("selected" -> out.label.zipWithIndex.map {
case (label, i) =>
val originalID = ref.id(out.uuid(i)) // out local graph out has a relative ID (int)..
// we need to retrieve the reference ID (int) from the UUID (string)
var matched = false
ref.label.zipWithIndex foreach {
case (potentialNeighbourLabel, potentialNeighbourID) =>
if (
ref.hasAnyLink(potentialNeighbourID, originalID) // if this is a neighbour..
&& ref.category(potentialNeighbourID).equalsIgnoreCase(category) // that match category..
&& (potentialNeighbourLabel.toLowerCase contains pattern.toLowerCase) // that match search..
) matched = true // we select our node
}
if (matched) true else out.selected(i)
})
}).callbackSelectionChanged
)
self ! "filter.view" -> pipeline.input.currentView
Map("selection" -> pipeline.output.selectionAttributes, "mouse" -> "left")
case ("highlightByPattern", pattern: String) =>
val in = pipeline.input
val out = pipeline.output
println("highlightByPattern: "+pattern)
pipeline.setOutput(out + ("highlighted" -> out.label.map {
case label => if (pattern == null | pattern.isEmpty) false else (label.toLowerCase contains pattern.toLowerCase)
}))
//Webpage ! "_callbackSelectionChanged" -> "left"
self ! "filter.view" -> in.currentView // will automatically update the highlight section
Map("selection" -> pipeline.output.selectionAttributes, "mouse" -> "left")
case ("export", "GEXF") =>
println("received a GEXF export query")
(new GEXF(session)) ! pipeline.output
Map()
case (key: String, value) =>
//println("Workflow: " + cb + " -> " + key + " -> " + value)
pipeline.applyKey(key, value)
// WARNING actually caching is not really used (didn't have the time to debug it) so a straightforward
// workflow is used instead. since it was that easy, I simply resetted the "categoryCache" when we unselect nodes
// if you happen to refactorate this, you will need to clear the selection in the other caches, too
// IDEA maybe, a better solution would be to define, for each "modifier", which kind of data is modified,
// and operation is done
val out = pipeline.output
val f = pipeline.input.updateSelectedWithCategory(out)
val output: Graph = null
//var tmp = new Graph
//var updateNeeded =
key match {
case "filter.view" =>
//println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.node.category" => // might impact the filters!
//println("Workflow: received msg: \"" + key + "\"")
var ff = Filters.nodeWeight2(f, f)
ff = Filters.edgeWeight(ff, f)
//println("fff: "+ff.uuid.size)
val g = Filters.weightToSize(ff, f)
//println("g: "+g.uuid.size)
//println("g':"+Filters.clean(Filters.category(g)).uuid.size)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
//println("h: "+h.uuid.size)
pipeline.setOutput(h)
case "filter.a.node.weight" =>
// println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.a.edge.weight" =>
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.b.node.weight" =>
// println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.b.edge.weight" =>
//println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.a.node.size" =>
//println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.b.node.size" =>
//println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case any => // we don't need to update the scene for other attributes
}
Map(key -> value)
case any =>
println("unknow cb -> msg "+any)
Map()
})
case ("camera.mouse", kind: Symbol, side: Symbol, count: Symbol, position: (Double, Double)) =>
val out = pipeline.output
val (cz, cp, sr) = (out.cameraZoom, out.cameraPosition, out.selectionRadius)
def model2screen(p: (Double, Double)): (Int, Int) = (((p._1 + cp._1) * cz).toInt, ((p._2 + cp._2) * cz).toInt)
def screen2model(p: (Double, Double)): (Double, Double) = ((p._1 - cp._1) / cz, (p._2 - cp._2) / cz)
val o = screen2model(position)
val r = (sr / cz)
kind match {
case 'Move =>
var changed = false
// TODO a selection counter
val out2 = out + ("highlighted" -> out.highlighted.zipWithIndex.map {
case (before, i) =>
val l = out.size(i) // maths hack
val p = out.position(i)
val ggg = (p.isInRange(o, r) || p.isInRange(o, l + (l / 2.0))) // maths
if (ggg != before) changed = true
ggg
}.toArray)
if (changed) pipeline.setOutput(out2.callbackSelectionChanged)
case ('Click) =>
//println("Click!")
var somethingIsSelected = false
val doubleClicked = count match {
case 'Simple => false
case 'Double => true
case any => true
}
// TODO a selection counter
val out2 = out + ("selected" -> out.selected.zipWithIndex.map {
case (previousSelectionState, i) =>
val l = out.size(i) // maths hack
val p = out.position(i)
val nodeHasBeenTouched = (p.isInRange(o, r) || p.isInRange(o, l + (l / 2.0))) // maths
if (nodeHasBeenTouched) somethingIsSelected = true
if (nodeHasBeenTouched) println("touched a node of degree " + out.degree(i))
(previousSelectionState, nodeHasBeenTouched)
}.map {
case (previousSelectionState, nodeHasBeenTouched) =>
if (doubleClicked) {
nodeHasBeenTouched
} else {
if (nodeHasBeenTouched) !previousSelectionState else previousSelectionState
}
}.toArray)
//println("selection count, before: "+out.selection.size+" after: "+out2.selection.size)
pipeline.setOutput(out2.callbackSelectionChanged)
session.webpage ! session.webpage.CB_CLICK -> Map(
"selection" -> pipeline.output.selectionAttributes,
"mouse" -> (side match {
case 'Left => if (doubleClicked) "doubleLeft" else "left"
case 'Right => "right"
case any => "none"
})
)
// check if we need to recompute the meso field
if (doubleClicked) {
if (somethingIsSelected) {
//out2.currentView match {
// case "macro" =>
// obsolete (will give "error, filter has no "view" method" error)
//session.webpage ! "filter.view" -> "meso"
//}
} else {
// zoom?
}
} else {
//println("Workflow: updating view (is it OK?)")
//self ! "filter.view" -> out2.currentView
}
case 'Drag =>
val pause = try {
pipeline.input.pause
} catch {
case x => true
}
//self ! "pause" -> true
case 'Release =>
//pauseBugger = false
//self ! "pause" -> pauseBuffer
case any =>
}
case ("export", "GEXF") =>
//println("received a GEXF export query")
(new GEXF(session)) ! pipeline.output
case x: scala.xml.Elem =>
//println("received a GEXF from GEXF Exporter.. passing to the webpage")
session.webpage ! 'download -> x.toString
// new ExportGraphDialog(x.toString)
// case (key: String, value: Any) =>
// setSome(-1, key, value)
//pipeline.applyKey(key, value)
/*
if (pushToOutput) {
output//.warmCache
} */
case (key: String, value) =>
println("Workflow: " + key + " -> " + value)
pipeline.applyKey(key, value)
// WARNING actually caching is not really used (didn't have the time to debug it) so a straightforward
// workflow is used instead. since it was that easy, I simply resetted the "categoryCache" when we unselect nodes
// if you happen to refactorate this, you will need to clear the selection in the other caches, too
// IDEA maybe, a better solution would be to define, for each "modifier", which kind of data is modified,
// and operation is done
val out = pipeline.output
val f = pipeline.input.updateSelectedWithCategory(out)
val output: Graph = null
//var tmp = new Graph
//var updateNeeded =
key match {
case "filter.view" =>
println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.node.category" => // might impact the filters!
println("Workflow: received msg: \"" + key + "\"")
var ff = Filters.nodeWeight2(f, f)
ff = Filters.edgeWeight(ff, f)
//println("fff: "+ff.uuid.size)
val g = Filters.weightToSize(ff, f)
//println("g: "+g.uuid.size)
//println("g':"+Filters.clean(Filters.category(g)).uuid.size)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
//println("h: "+h.uuid.size)
pipeline.setOutput(h)
case "filter.a.node.weight" =>
println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.a.edge.weight" =>
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.b.node.weight" =>
println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.b.edge.weight" =>
println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.a.node.size" =>
println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case "filter.b.node.size" =>
println("Workflow: received msg: \"" + key + "\"")
val g = Filters.weightToSize(Filters.edgeWeight(Filters.nodeWeight2(f, f), f), f)
val h = Filters.clean(Filters.category(g)).callbackNodeCountChanged
pipeline.setOutput(h)
case any => // we don't need to update the scene for other attributes
}
case s: String =>
case msg => println("Workflow: unknow single msg: " + msg)
}
}
}
}
| moma/tinaviz | src/main/scala/eu/tinasoft/tinaviz/pipeline/Workflow.scala | Scala | gpl-3.0 | 24,391 |
/*
* Copyright 2016 David Russell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.onetapbeyond.opencpu.spark.executor
import io.onetapbeyond.opencpu.spark.executor.R._
import io.onetapbeyond.opencpu.r.executor._
import org.apache.spark._
import org.scalatest._
import scala.collection.JavaConverters._
class OpenCPUSparkExecutorTestSpec
extends FlatSpec with Matchers with BeforeAndAfter {
private val batchDataSize = 10
private val master = "local[2]"
private val appName = "opencpu-spark-executor-test"
private var sc: SparkContext = _
// Prepare SparkContext (sc) ahead of each unit test.
before {
val conf = new SparkConf().setMaster(master)
.setAppName(appName)
sc = new SparkContext(conf)
}
// Release SparkContext (sc) following each unit test.
after {
if (sc != null)
sc.stop()
}
"Spark RDD[OCPUTask] analyze transformation" should "execute OCPUTask on a dedicated OCPU server." in {
// Dedicated OCPU sever endpoint.
val endpoint = "http://public.opencpu.org/ocpu"
// Prepare sample Spark batch test data.
val numRDD = sc.parallelize(1 to batchDataSize)
// Prepare RDD[OCPUTask], the sample OCPUTasks that will
// analyze the Spark batch sample data.
val taskRDD = numRDD.map(num => {
var scalaInputs = Map("n" -> num, "mean" -> num)
val javaInputs = scalaInputs.asJava
// Build and return sample OCPUTask instance.
OCPU.R()
.pkg("stats")
.function("rnorm")
.input(javaInputs)
.library()
})
// Generate RDD[OCPUResult] by executing the analyze operation
// on RDD[OCPUTask] using a dedicated OCPU server.
val resultRDD = taskRDD.analyze(endpoint)
resultRDD.cache
// Process sample RDD[OCPUResult].
val resultCount = resultRDD.count
val successCount = resultRDD.filter(result => result.success).count
// Verify RDD[OCPUResult] data.
assert(resultCount == batchDataSize)
assert(resultCount == successCount)
}
"Spark RDD[OCPUTask] analyze transformation" should "execute OCPUTask across a cluster of OCPU servers." in {
// Cluster (simulated) of OCPU sever endpoints.
val endpoints = Array("http://public.opencpu.org/ocpu",
"http://public.opencpu.org/ocpu",
"http://public.opencpu.org/ocpu")
// Prepare sample Spark batch test data.
val numRDD = sc.parallelize(1 to batchDataSize)
// Prepare RDD[OCPUTask], the sample OCPUTasks that will
// analyze the Spark batch sample data.
val taskRDD = numRDD.map(num => {
var scalaInputs = Map("n" -> num, "mean" -> num)
val javaInputs = scalaInputs.asJava
// Build and return sample OCPUTask instance.
OCPU.R()
.pkg("stats")
.function("rnorm")
.input(javaInputs)
.library()
})
// Execute sample OCPUTask across cluster of OCPU servers.
// Generate RDD[OCPUResult] by executing the analyze operation
// on RDD[OCPUTask] using a cluster of OCPU servers.
val resultRDD = taskRDD.analyze(endpoints)
resultRDD.cache
// Process sample RDD[OCPUResult].
val resultCount = resultRDD.count
val successCount = resultRDD.filter(result => result.success).count
// Verify RDD[OCPUResult] data.
assert(resultCount == batchDataSize)
assert(resultCount == successCount)
}
}
| onetapbeyond/opencpu-spark-executor | src/test/scala/io/onetapbeyond/opencpu/spark/executor/OpenCPUSparkExecutorTestSpec.scala | Scala | apache-2.0 | 3,738 |
package it.seralf.apache.archive
import java.net.URL
import org.jsoup.Jsoup
import us.codecraft.xsoup.Xsoup
import scala.collection.JavaConversions._
object MBoxParser {
val timeout = 3000
def getList(project: String, period: String) = {
val url = s"http://mail-archives.apache.org/mod_mbox/$project/$period.mbox/ajax/boxlist"
val doc = Jsoup.parse(new URL(url), timeout)
val mbox_list = Xsoup.compile("//body/boxlist/mbox").evaluate(doc)
.getElements
.map { msgbox =>
msgbox.attr("id").replaceAll("(.*)\\.mbox", "$1")
}
mbox_list
}
} | seralf/apart | src/main/scala/it/seralf/apache/archive/MBoxParser.scala | Scala | apache-2.0 | 592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.calcite.rel.RelNode
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.operators.join.JoinType
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.plan.logical.Minus
import org.apache.flink.table.expressions.{Alias, Asc, Call, Expression, ExpressionParser, Ordering, TableFunctionCall, UnresolvedAlias}
import org.apache.flink.table.plan.ProjectionTranslator._
import org.apache.flink.table.plan.logical._
import org.apache.flink.table.sinks.TableSink
import _root_.scala.collection.JavaConverters._
import _root_.scala.annotation.varargs
/**
* A Table is the core component of the Table API.
* Similar to how the batch and streaming APIs have DataSet and DataStream,
* the Table API is built around [[Table]].
*
* Use the methods of [[Table]] to transform data. Use [[TableEnvironment]] to convert a [[Table]]
* back to a DataSet or DataStream.
*
* When using Scala a [[Table]] can also be converted using implicit conversions.
*
* Example:
*
* {{{
* val env = ExecutionEnvironment.getExecutionEnvironment
* val tEnv = TableEnvironment.getTableEnvironment(env)
*
* val set: DataSet[(String, Int)] = ...
* val table = set.toTable(tEnv, 'a, 'b)
* ...
* val table2 = ...
* val set2: DataSet[MyType] = table2.toDataSet[MyType]
* }}}
*
* Operations such as [[join]], [[select]], [[where]] and [[groupBy]] either take arguments
* in a Scala DSL or as an expression String. Please refer to the documentation for the expression
* syntax.
*
* @param tableEnv The [[TableEnvironment]] to which the table is bound.
* @param logicalPlan logical representation
*/
class Table(
private[flink] val tableEnv: TableEnvironment,
private[flink] val logicalPlan: LogicalNode) {
private val tableSchema: TableSchema = new TableSchema(
logicalPlan.output.map(_.name).toArray,
logicalPlan.output.map(_.resultType).toArray)
def relBuilder = tableEnv.getRelBuilder
def getRelNode: RelNode = logicalPlan.toRelNode(relBuilder)
/**
* Returns the schema of this table.
*/
def getSchema: TableSchema = tableSchema
/**
* Prints the schema of this table to the console in a tree format.
*/
def printSchema(): Unit = print(tableSchema.toString)
/**
* Performs a selection operation. Similar to an SQL SELECT statement. The field expressions
* can contain complex expressions and aggregations.
*
* Example:
*
* {{{
* tab.select('key, 'value.avg + " The average" as 'average)
* }}}
*/
def select(fields: Expression*): Table = {
val expandedFields = expandProjectList(fields, logicalPlan, tableEnv)
val (aggNames, propNames) = extractAggregationsAndProperties(expandedFields, tableEnv)
if (propNames.nonEmpty) {
throw ValidationException("Window properties can only be used on windowed tables.")
}
if (aggNames.nonEmpty) {
val projectsOnAgg = replaceAggregationsAndProperties(
expandedFields, tableEnv, aggNames, propNames)
val projectFields = extractFieldReferences(expandedFields)
new Table(tableEnv,
Project(projectsOnAgg,
Aggregate(Nil, aggNames.map(a => Alias(a._1, a._2)).toSeq,
Project(projectFields, logicalPlan).validate(tableEnv)
).validate(tableEnv)
).validate(tableEnv)
)
} else {
new Table(tableEnv,
Project(expandedFields.map(UnresolvedAlias), logicalPlan).validate(tableEnv))
}
}
/**
* Performs a selection operation. Similar to an SQL SELECT statement. The field expressions
* can contain complex expressions and aggregations.
*
* Example:
*
* {{{
* tab.select("key, value.avg + ' The average' as average")
* }}}
*/
def select(fields: String): Table = {
val fieldExprs = ExpressionParser.parseExpressionList(fields)
select(fieldExprs: _*)
}
/**
* Renames the fields of the expression result. Use this to disambiguate fields before
* joining to operations.
*
* Example:
*
* {{{
* tab.as('a, 'b)
* }}}
*/
def as(fields: Expression*): Table = {
new Table(tableEnv, AliasNode(fields, logicalPlan).validate(tableEnv))
}
/**
* Renames the fields of the expression result. Use this to disambiguate fields before
* joining to operations.
*
* Example:
*
* {{{
* tab.as("a, b")
* }}}
*/
def as(fields: String): Table = {
val fieldExprs = ExpressionParser.parseExpressionList(fields)
as(fieldExprs: _*)
}
/**
* Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
* clause.
*
* Example:
*
* {{{
* tab.filter('name === "Fred")
* }}}
*/
def filter(predicate: Expression): Table = {
new Table(tableEnv, Filter(predicate, logicalPlan).validate(tableEnv))
}
/**
* Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
* clause.
*
* Example:
*
* {{{
* tab.filter("name = 'Fred'")
* }}}
*/
def filter(predicate: String): Table = {
val predicateExpr = ExpressionParser.parseExpression(predicate)
filter(predicateExpr)
}
/**
* Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
* clause.
*
* Example:
*
* {{{
* tab.where('name === "Fred")
* }}}
*/
def where(predicate: Expression): Table = {
filter(predicate)
}
/**
* Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
* clause.
*
* Example:
*
* {{{
* tab.where("name = 'Fred'")
* }}}
*/
def where(predicate: String): Table = {
filter(predicate)
}
/**
* Groups the elements on some grouping keys. Use this before a selection with aggregations
* to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
*
* Example:
*
* {{{
* tab.groupBy('key).select('key, 'value.avg)
* }}}
*/
def groupBy(fields: Expression*): GroupedTable = {
new GroupedTable(this, fields)
}
/**
* Groups the elements on some grouping keys. Use this before a selection with aggregations
* to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
*
* Example:
*
* {{{
* tab.groupBy("key").select("key, value.avg")
* }}}
*/
def groupBy(fields: String): GroupedTable = {
val fieldsExpr = ExpressionParser.parseExpressionList(fields)
groupBy(fieldsExpr: _*)
}
/**
* Removes duplicate values and returns only distinct (different) values.
*
* Example:
*
* {{{
* tab.select("key, value").distinct()
* }}}
*/
def distinct(): Table = {
new Table(tableEnv, Distinct(logicalPlan).validate(tableEnv))
}
/**
* Joins two [[Table]]s. Similar to an SQL join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary. You can use
* where and select clauses after a join to further specify the behaviour of the join.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.join(right).where('a === 'b && 'c > 3).select('a, 'b, 'd)
* }}}
*/
def join(right: Table): Table = {
join(right, None, JoinType.INNER)
}
/**
* Joins two [[Table]]s. Similar to an SQL join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.join(right, "a = b")
* }}}
*/
def join(right: Table, joinPredicate: String): Table = {
join(right, joinPredicate, JoinType.INNER)
}
/**
* Joins two [[Table]]s. Similar to an SQL join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.join(right, 'a === 'b).select('a, 'b, 'd)
* }}}
*/
def join(right: Table, joinPredicate: Expression): Table = {
join(right, Some(joinPredicate), JoinType.INNER)
}
/**
* Joins two [[Table]]s. Similar to an SQL left outer join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]] and its [[TableConfig]] must
* have nullCheck enabled.
*
* Example:
*
* {{{
* left.leftOuterJoin(right, "a = b").select('a, 'b, 'd)
* }}}
*/
def leftOuterJoin(right: Table, joinPredicate: String): Table = {
join(right, joinPredicate, JoinType.LEFT_OUTER)
}
/**
* Joins two [[Table]]s. Similar to an SQL left outer join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]] and its [[TableConfig]] must
* have nullCheck enabled.
*
* Example:
*
* {{{
* left.leftOuterJoin(right, 'a === 'b).select('a, 'b, 'd)
* }}}
*/
def leftOuterJoin(right: Table, joinPredicate: Expression): Table = {
join(right, Some(joinPredicate), JoinType.LEFT_OUTER)
}
/**
* Joins two [[Table]]s. Similar to an SQL right outer join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]] and its [[TableConfig]] must
* have nullCheck enabled.
*
* Example:
*
* {{{
* left.rightOuterJoin(right, "a = b").select('a, 'b, 'd)
* }}}
*/
def rightOuterJoin(right: Table, joinPredicate: String): Table = {
join(right, joinPredicate, JoinType.RIGHT_OUTER)
}
/**
* Joins two [[Table]]s. Similar to an SQL right outer join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]] and its [[TableConfig]] must
* have nullCheck enabled.
*
* Example:
*
* {{{
* left.rightOuterJoin(right, 'a === 'b).select('a, 'b, 'd)
* }}}
*/
def rightOuterJoin(right: Table, joinPredicate: Expression): Table = {
join(right, Some(joinPredicate), JoinType.RIGHT_OUTER)
}
/**
* Joins two [[Table]]s. Similar to an SQL full outer join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]] and its [[TableConfig]] must
* have nullCheck enabled.
*
* Example:
*
* {{{
* left.fullOuterJoin(right, "a = b").select('a, 'b, 'd)
* }}}
*/
def fullOuterJoin(right: Table, joinPredicate: String): Table = {
join(right, joinPredicate, JoinType.FULL_OUTER)
}
/**
* Joins two [[Table]]s. Similar to an SQL full outer join. The fields of the two joined
* operations must not overlap, use [[as]] to rename fields if necessary.
*
* Note: Both tables must be bound to the same [[TableEnvironment]] and its [[TableConfig]] must
* have nullCheck enabled.
*
* Example:
*
* {{{
* left.fullOuterJoin(right, 'a === 'b).select('a, 'b, 'd)
* }}}
*/
def fullOuterJoin(right: Table, joinPredicate: Expression): Table = {
join(right, Some(joinPredicate), JoinType.FULL_OUTER)
}
private def join(right: Table, joinPredicate: String, joinType: JoinType): Table = {
val joinPredicateExpr = ExpressionParser.parseExpression(joinPredicate)
join(right, Some(joinPredicateExpr), joinType)
}
private def join(right: Table, joinPredicate: Option[Expression], joinType: JoinType): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException("Only tables from the same TableEnvironment can be joined.")
}
new Table(
tableEnv,
Join(this.logicalPlan, right.logicalPlan, joinType, joinPredicate, correlated = false)
.validate(tableEnv))
}
/**
* Minus of two [[Table]]s with duplicate records removed.
* Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not
* exist in the right table. Duplicate records in the left table are returned
* exactly once, i.e., duplicates are removed. Both tables must have identical field types.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.minus(right)
* }}}
*/
def minus(right: Table): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException("Only tables from the same TableEnvironment can be " +
"subtracted.")
}
new Table(tableEnv, Minus(logicalPlan, right.logicalPlan, all = false)
.validate(tableEnv))
}
/**
* Minus of two [[Table]]s. Similar to an SQL EXCEPT ALL.
* Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in
* the right table. A record that is present n times in the left table and m times
* in the right table is returned (n - m) times, i.e., as many duplicates as are present
* in the right table are removed. Both tables must have identical field types.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.minusAll(right)
* }}}
*/
def minusAll(right: Table): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException("Only tables from the same TableEnvironment can be " +
"subtracted.")
}
new Table(tableEnv, Minus(logicalPlan, right.logicalPlan, all = true)
.validate(tableEnv))
}
/**
* Unions two [[Table]]s with duplicate records removed.
* Similar to an SQL UNION. The fields of the two union operations must fully overlap.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.union(right)
* }}}
*/
def union(right: Table): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException("Only tables from the same TableEnvironment can be unioned.")
}
new Table(tableEnv, Union(logicalPlan, right.logicalPlan, all = false).validate(tableEnv))
}
/**
* Unions two [[Table]]s. Similar to an SQL UNION ALL. The fields of the two union operations
* must fully overlap.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.unionAll(right)
* }}}
*/
def unionAll(right: Table): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException("Only tables from the same TableEnvironment can be unioned.")
}
new Table(tableEnv, Union(logicalPlan, right.logicalPlan, all = true).validate(tableEnv))
}
/**
* Intersects two [[Table]]s with duplicate records removed. Intersect returns records that
* exist in both tables. If a record is present in one or both tables more than once, it is
* returned just once, i.e., the resulting table has no duplicate records. Similar to an
* SQL INTERSECT. The fields of the two intersect operations must fully overlap.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.intersect(right)
* }}}
*/
def intersect(right: Table): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException(
"Only tables from the same TableEnvironment can be intersected.")
}
new Table(tableEnv, Intersect(logicalPlan, right.logicalPlan, all = false).validate(tableEnv))
}
/**
* Intersects two [[Table]]s. IntersectAll returns records that exist in both tables.
* If a record is present in both tables more than once, it is returned as many times as it
* is present in both tables, i.e., the resulting table might have duplicate records. Similar
* to an SQL INTERSECT ALL. The fields of the two intersect operations must fully overlap.
*
* Note: Both tables must be bound to the same [[TableEnvironment]].
*
* Example:
*
* {{{
* left.intersectAll(right)
* }}}
*/
def intersectAll(right: Table): Table = {
// check that right table belongs to the same TableEnvironment
if (right.tableEnv != this.tableEnv) {
throw new ValidationException(
"Only tables from the same TableEnvironment can be intersected.")
}
new Table(tableEnv, Intersect(logicalPlan, right.logicalPlan, all = true).validate(tableEnv))
}
/**
* Sorts the given [[Table]]. Similar to SQL ORDER BY.
* The resulting Table is globally sorted across all parallel partitions.
*
* Example:
*
* {{{
* tab.orderBy('name.desc)
* }}}
*/
def orderBy(fields: Expression*): Table = {
val order: Seq[Ordering] = fields.map {
case o: Ordering => o
case e => Asc(e)
}
new Table(tableEnv, Sort(order, logicalPlan).validate(tableEnv))
}
/**
* Sorts the given [[Table]]. Similar to SQL ORDER BY.
* The resulting Table is sorted globally sorted across all parallel partitions.
*
* Example:
*
* {{{
* tab.orderBy("name.desc")
* }}}
*/
def orderBy(fields: String): Table = {
val parsedFields = ExpressionParser.parseExpressionList(fields)
orderBy(parsedFields: _*)
}
/**
* Limits a sorted result from an offset position.
* Similar to a SQL LIMIT clause. Limit is technically part of the Order By operator and
* thus must be preceded by it.
*
* Example:
*
* {{{
* // returns unlimited number of records beginning with the 4th record
* tab.orderBy('name.desc).limit(3)
* }}}
*
* @param offset number of records to skip
*/
def limit(offset: Int): Table = {
new Table(tableEnv, Limit(offset = offset, child = logicalPlan).validate(tableEnv))
}
/**
* Limits a sorted result to a specified number of records from an offset position.
* Similar to a SQL LIMIT clause. Limit is technically part of the Order By operator and
* thus must be preceded by it.
*
* Example:
*
* {{{
* // returns 5 records beginning with the 4th record
* tab.orderBy('name.desc).limit(3, 5)
* }}}
*
* @param offset number of records to skip
* @param fetch number of records to be returned
*/
def limit(offset: Int, fetch: Int): Table = {
new Table(tableEnv, Limit(offset, fetch, logicalPlan).validate(tableEnv))
}
/**
* Joins this [[Table]] to a user-defined [[org.apache.calcite.schema.TableFunction]]. Similar
* to an SQL cross join, but it works with a table function. It returns rows from the outer
* table (table on the left of the operator) that produces matching values from the table
* function (which is defined in the expression on the right side of the operator).
*
* Example:
*
* {{{
* class MySplitUDTF extends TableFunction[String] {
* def eval(str: String): Unit = {
* str.split("#").foreach(collect)
* }
* }
*
* val split = new MySplitUDTF()
* table.join(split('c) as ('s)).select('a,'b,'c,'s)
* }}}
*/
def join(udtf: Expression): Table = {
joinUdtfInternal(udtf, JoinType.INNER)
}
/**
* Joins this [[Table]] to a user-defined [[org.apache.calcite.schema.TableFunction]]. Similar
* to an SQL cross join, but it works with a table function. It returns rows from the outer
* table (table on the left of the operator) that produces matching values from the table
* function (which is defined in the expression on the right side of the operator).
*
* Example:
*
* {{{
* class MySplitUDTF extends TableFunction<String> {
* public void eval(String str) {
* str.split("#").forEach(this::collect);
* }
* }
*
* TableFunction<String> split = new MySplitUDTF();
* tableEnv.registerFunction("split", split);
*
* table.join("split(c) as (s)").select("a, b, c, s");
* }}}
*/
def join(udtf: String): Table = {
joinUdtfInternal(udtf, JoinType.INNER)
}
/**
* Joins this [[Table]] to a user-defined [[org.apache.calcite.schema.TableFunction]]. Similar
* to an SQL left outer join with ON TRUE, but it works with a table function. It returns all
* the rows from the outer table (table on the left of the operator), and rows that do not match
* the condition from the table function (which is defined in the expression on the right
* side of the operator). Rows with no matching condition are filled with null values.
*
* Example:
*
* {{{
* class MySplitUDTF extends TableFunction[String] {
* def eval(str: String): Unit = {
* str.split("#").foreach(collect)
* }
* }
*
* val split = new MySplitUDTF()
* table.leftOuterJoin(split('c) as ('s)).select('a,'b,'c,'s)
* }}}
*/
def leftOuterJoin(udtf: Expression): Table = {
joinUdtfInternal(udtf, JoinType.LEFT_OUTER)
}
/**
* Joins this [[Table]] to a user-defined [[org.apache.calcite.schema.TableFunction]]. Similar
* to an SQL left outer join with ON TRUE, but it works with a table function. It returns all
* the rows from the outer table (table on the left of the operator), and rows that do not match
* the condition from the table function (which is defined in the expression on the right
* side of the operator). Rows with no matching condition are filled with null values.
*
* Example:
*
* {{{
* class MySplitUDTF extends TableFunction<String> {
* public void eval(String str) {
* str.split("#").forEach(this::collect);
* }
* }
*
* TableFunction<String> split = new MySplitUDTF();
* tableEnv.registerFunction("split", split);
*
* table.leftOuterJoin("split(c) as (s)").select("a, b, c, s");
* }}}
*/
def leftOuterJoin(udtf: String): Table = {
joinUdtfInternal(udtf, JoinType.LEFT_OUTER)
}
private def joinUdtfInternal(udtfString: String, joinType: JoinType): Table = {
val udtf = ExpressionParser.parseExpression(udtfString)
joinUdtfInternal(udtf, joinType)
}
private def joinUdtfInternal(udtf: Expression, joinType: JoinType): Table = {
var alias: Option[Seq[String]] = None
// unwrap an Expression until we get a TableFunctionCall
def unwrap(expr: Expression): TableFunctionCall = expr match {
case Alias(child, name, extraNames) =>
alias = Some(Seq(name) ++ extraNames)
unwrap(child)
case Call(name, args) =>
val function = tableEnv.getFunctionCatalog.lookupFunction(name, args)
unwrap(function)
case c: TableFunctionCall => c
case _ =>
throw new TableException(
"Cross/Outer Apply operators only accept expressions that define table functions.")
}
val call = unwrap(udtf)
.as(alias)
.toLogicalTableFunctionCall(this.logicalPlan)
.validate(tableEnv)
new Table(
tableEnv,
Join(this.logicalPlan, call, joinType, None, correlated = true).validate(tableEnv))
}
/**
* Writes the [[Table]] to a [[TableSink]]. A [[TableSink]] defines an external storage location.
*
* A batch [[Table]] can only be written to a
* [[org.apache.flink.table.sinks.BatchTableSink]], a streaming [[Table]] requires a
* [[org.apache.flink.table.sinks.StreamTableSink]].
*
* @param sink The [[TableSink]] to which the [[Table]] is written.
* @tparam T The data type that the [[TableSink]] expects.
*/
def writeToSink[T](sink: TableSink[T]): Unit = {
// get schema information of table
val rowType = getRelNode.getRowType
val fieldNames: Array[String] = rowType.getFieldNames.asScala.toArray
val fieldTypes: Array[TypeInformation[_]] = rowType.getFieldList.asScala
.map(field => FlinkTypeFactory.toTypeInfo(field.getType)).toArray
// configure the table sink
val configuredSink = sink.configure(fieldNames, fieldTypes)
// emit the table to the configured table sink
tableEnv.writeToSink(this, configuredSink)
}
/**
* Groups the records of a table by assigning them to windows defined by a time or row interval.
*
* For streaming tables of infinite size, grouping into windows is required to define finite
* groups on which group-based aggregates can be computed.
*
* For batch tables of finite size, windowing essentially provides shortcuts for time-based
* groupBy.
*
* __Note__: Computing windowed aggregates on a streaming table is only a parallel operation
* if additional grouping attributes are added to the `groupBy(...)` clause.
* If the `groupBy(...)` only references a window alias, the streamed table will be processed
* by a single task, i.e., with parallelism 1.
*
* @param window window that specifies how elements are grouped.
* @return A windowed table.
*/
def window(window: Window): WindowedTable = {
new WindowedTable(this, window)
}
/**
* Defines over-windows on the records of a table.
*
* An over-window defines for each record an interval of records over which aggregation
* functions can be computed.
*
* Example:
*
* {{{
* table
* .window(Over partitionBy 'c orderBy 'rowTime preceding 10.seconds as 'ow)
* .select('c, 'b.count over 'ow, 'e.sum over 'ow)
* }}}
*
* __Note__: Computing over window aggregates on a streaming table is only a parallel operation
* if the window is partitioned. Otherwise, the whole stream will be processed by a single
* task, i.e., with parallelism 1.
*
* __Note__: Over-windows for batch tables are currently not supported.
*
* @param overWindows windows that specify the record interval over which aggregations are
* computed.
* @return An OverWindowedTable to specify the aggregations.
*/
@varargs
def window(overWindows: OverWindow*): OverWindowedTable = {
if (tableEnv.isInstanceOf[BatchTableEnvironment]) {
throw TableException("Over-windows for batch tables are currently not supported..")
}
if (overWindows.size != 1) {
throw TableException("Over-Windows are currently only supported single window.")
}
new OverWindowedTable(this, overWindows.toArray)
}
var tableName: String = _
/**
* Registers an unique table name under the table environment
* and return the registered table name.
*/
override def toString: String = {
if (tableName == null) {
tableName = "UnnamedTable$" + tableEnv.attrNameCntr.getAndIncrement()
tableEnv.registerTable(tableName, this)
}
tableName
}
}
/**
* A table that has been grouped on a set of grouping keys.
*/
class GroupedTable(
private[flink] val table: Table,
private[flink] val groupKey: Seq[Expression]) {
/**
* Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
* The field expressions can contain complex expressions and aggregations.
*
* Example:
*
* {{{
* tab.groupBy('key).select('key, 'value.avg + " The average" as 'average)
* }}}
*/
def select(fields: Expression*): Table = {
val (aggNames, propNames) = extractAggregationsAndProperties(fields, table.tableEnv)
if (propNames.nonEmpty) {
throw ValidationException("Window properties can only be used on windowed tables.")
}
val projectsOnAgg = replaceAggregationsAndProperties(
fields, table.tableEnv, aggNames, propNames)
val projectFields = extractFieldReferences(fields ++ groupKey)
new Table(table.tableEnv,
Project(projectsOnAgg,
Aggregate(groupKey, aggNames.map(a => Alias(a._1, a._2)).toSeq,
Project(projectFields, table.logicalPlan).validate(table.tableEnv)
).validate(table.tableEnv)
).validate(table.tableEnv))
}
/**
* Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
* The field expressions can contain complex expressions and aggregations.
*
* Example:
*
* {{{
* tab.groupBy("key").select("key, value.avg + ' The average' as average")
* }}}
*/
def select(fields: String): Table = {
val fieldExprs = ExpressionParser.parseExpressionList(fields)
select(fieldExprs: _*)
}
}
class WindowedTable(
private[flink] val table: Table,
private[flink] val window: Window) {
/**
* Groups the elements by a mandatory window and one or more optional grouping attributes.
* The window is specified by referring to its alias.
*
* If no additional grouping attribute is specified and if the input is a streaming table,
* the aggregation will be performed by a single task, i.e., with parallelism 1.
*
* Aggregations are performed per group and defined by a subsequent `select(...)` clause similar
* to SQL SELECT-GROUP-BY query.
*
* Example:
*
* {{{
* tab.window([window] as 'w)).groupBy('w, 'key).select('key, 'value.avg)
* }}}
*/
def groupBy(fields: Expression*): WindowGroupedTable = {
val fieldsWithoutWindow = fields.filterNot(window.alias.equals(_))
if (fields.size != fieldsWithoutWindow.size + 1) {
throw new ValidationException("GroupBy must contain exactly one window alias.")
}
new WindowGroupedTable(table, fieldsWithoutWindow, window)
}
/**
* Groups the elements by a mandatory window and one or more optional grouping attributes.
* The window is specified by referring to its alias.
*
* If no additional grouping attribute is specified and if the input is a streaming table,
* the aggregation will be performed by a single task, i.e., with parallelism 1.
*
* Aggregations are performed per group and defined by a subsequent `select(...)` clause similar
* to SQL SELECT-GROUP-BY query.
*
* Example:
*
* {{{
* tab.window([window].as("w")).groupBy("w, key").select("key, value.avg")
* }}}
*/
def groupBy(fields: String): WindowGroupedTable = {
val fieldsExpr = ExpressionParser.parseExpressionList(fields)
groupBy(fieldsExpr: _*)
}
}
class OverWindowedTable(
private[flink] val table: Table,
private[flink] val overWindows: Array[OverWindow]) {
def select(fields: Expression*): Table = {
val expandedFields = expandProjectList(
fields,
table.logicalPlan,
table.tableEnv)
val expandedOverFields = resolveOverWindows(expandedFields, overWindows, table.tableEnv)
new Table(
table.tableEnv,
Project(expandedOverFields.map(UnresolvedAlias), table.logicalPlan).validate(table.tableEnv))
}
def select(fields: String): Table = {
val fieldExprs = ExpressionParser.parseExpressionList(fields)
select(fieldExprs: _*)
}
}
class WindowGroupedTable(
private[flink] val table: Table,
private[flink] val groupKeys: Seq[Expression],
private[flink] val window: Window) {
/**
* Performs a selection operation on a window grouped table. Similar to an SQL SELECT statement.
* The field expressions can contain complex expressions and aggregations.
*
* Example:
*
* {{{
* windowGroupedTable.select('key, 'window.start, 'value.avg as 'valavg)
* }}}
*/
def select(fields: Expression*): Table = {
// get group keys by removing window alias
val (aggNames, propNames) = extractAggregationsAndProperties(fields, table.tableEnv)
val projectsOnAgg = replaceAggregationsAndProperties(
fields, table.tableEnv, aggNames, propNames)
val projectFields = (table.tableEnv, window) match {
// event time can be arbitrary field in batch environment
case (_: BatchTableEnvironment, w: EventTimeWindow) =>
extractFieldReferences(fields ++ groupKeys ++ Seq(w.timeField))
case (_, _) =>
extractFieldReferences(fields ++ groupKeys)
}
new Table(table.tableEnv,
Project(
projectsOnAgg,
WindowAggregate(
groupKeys,
window.toLogicalWindow,
propNames.map(a => Alias(a._1, a._2)).toSeq,
aggNames.map(a => Alias(a._1, a._2)).toSeq,
Project(projectFields, table.logicalPlan).validate(table.tableEnv)
).validate(table.tableEnv)
).validate(table.tableEnv))
}
/**
* Performs a selection operation on a window grouped table. Similar to an SQL SELECT statement.
* The field expressions can contain complex expressions and aggregations.
*
* Example:
*
* {{{
* windowGroupedTable.select("key, window.start, value.avg as valavg")
* }}}
*/
def select(fields: String): Table = {
val fieldExprs = ExpressionParser.parseExpressionList(fields)
select(fieldExprs: _*)
}
}
| hwstreaming/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala | Scala | apache-2.0 | 34,642 |
package pl.touk.nussknacker.engine.avro.sink.flink
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient
import org.apache.avro.{AvroRuntimeException, Schema}
import org.scalatest.BeforeAndAfter
import pl.touk.nussknacker.engine.avro.KafkaAvroIntegrationMockSchemaRegistry.schemaRegistryMockClient
import pl.touk.nussknacker.engine.avro.encode.{BestEffortAvroEncoder, ValidationMode}
import pl.touk.nussknacker.engine.avro.helpers.KafkaAvroSpecMixin
import pl.touk.nussknacker.engine.avro.schema.TestSchemaWithRecord
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.ConfluentSchemaRegistryProvider
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.client.{ConfluentSchemaRegistryClientFactory, MockConfluentSchemaRegistryClientFactory}
import pl.touk.nussknacker.engine.avro.schemaregistry.{ExistingSchemaVersion, SchemaRegistryProvider}
import pl.touk.nussknacker.engine.avro.{AvroUtils, KafkaAvroTestProcessConfigCreator}
import pl.touk.nussknacker.engine.graph.expression
import pl.touk.nussknacker.engine.process.compiler.FlinkProcessCompiler
import pl.touk.nussknacker.engine.process.registrar.FlinkProcessRegistrar
import pl.touk.nussknacker.engine.spel.Implicits.asSpelExpression
import pl.touk.nussknacker.engine.testing.LocalModelData
private object KafkaAvroSinkFactoryWithEditorIntegrationTest {
val avroEncoder = BestEffortAvroEncoder(ValidationMode.strict)
def encode(a: Any, schema: Schema): AnyRef =
avroEncoder.encode(a, schema)
.valueOr(es => throw new AvroRuntimeException(es.toList.mkString(",")))
object MyRecord extends TestSchemaWithRecord {
override val stringSchema: String =
s"""
|{
| "type": "record",
| "name": "MyRecord",
| "fields": [
| {
| "name": "id",
| "type": "string"
| },
| { "name": "arr", "type": { "type": "array", "items": "long" } },
| {
| "name": "amount",
| "type": ["double", "string"]
| },
| {
| "name": "nested",
| "type": {
| "type": "record",
| "name": "nested",
| "fields": [
| {
| "name": "id",
| "type": "string"
| }
| ]
| }
| }
| ]
|}
""".stripMargin
val toSampleParams: List[(String, expression.Expression)] = List(
"id" -> "'record1'",
"amount" -> "20.0",
"arr" -> "{1L}",
"nested.id" -> "'nested_record1'"
)
override def exampleData: Map[String, Any] = Map(
"id" -> "record1",
"amount" -> 20.0,
"arr" -> List(1L),
"nested" -> Map(
"id" -> "nested_record1"
)
)
}
val topicSchemas = Map(
"record" -> MyRecord.schema,
"long" -> AvroUtils.parseSchema("""{"type": "long"}"""),
"array" -> AvroUtils.parseSchema("""{"type": "array", "items": "long"}""")
)
}
class KafkaAvroSinkFactoryWithEditorIntegrationTest extends KafkaAvroSpecMixin with BeforeAndAfter {
import KafkaAvroSinkFactoryWithEditorIntegrationTest._
private var topicConfigs: Map[String, TopicConfig] = Map.empty
private lazy val processConfigCreator: KafkaAvroTestProcessConfigCreator = new KafkaAvroTestProcessConfigCreator {
override protected def createSchemaRegistryProvider: SchemaRegistryProvider =
ConfluentSchemaRegistryProvider(new MockConfluentSchemaRegistryClientFactory(schemaRegistryMockClient))
}
override protected def schemaRegistryClient: SchemaRegistryClient = schemaRegistryMockClient
override protected def confluentClientFactory: ConfluentSchemaRegistryClientFactory = new MockConfluentSchemaRegistryClientFactory(schemaRegistryMockClient)
override protected def beforeAll(): Unit = {
super.beforeAll()
val modelData = LocalModelData(config, processConfigCreator)
registrar = FlinkProcessRegistrar(new FlinkProcessCompiler(modelData), executionConfigPreparerChain(modelData))
topicSchemas.foreach { case (topicName, schema) =>
topicConfigs = topicConfigs + (topicName -> createAndRegisterTopicConfig(topicName, schema))
}
}
test("record") {
val topicConfig = topicConfigs("record")
val sourceParam = SourceAvroParam.forGeneric(topicConfig, ExistingSchemaVersion(1))
val sinkParam = SinkAvroParam(topic = topicConfig.output, versionOption = ExistingSchemaVersion(1),
valueParams = MyRecord.toSampleParams, key = "", validationMode = None, sinkId = "kafka-avro")
val process = createAvroProcess(sourceParam, sinkParam)
runAndVerifyResult(process, topicConfig, event = MyRecord.record, expected = MyRecord.record)
}
test("primitive at top level") {
val topicConfig = topicConfigs("long")
val sourceParam = SourceAvroParam.forGeneric(topicConfig, ExistingSchemaVersion(1))
val sinkParam = SinkAvroParam(topicConfig, ExistingSchemaVersion(1), "42L", validationMode = None).copy(sinkId = "kafka-avro")
val process = createAvroProcess(sourceParam, sinkParam)
val encoded = encode(42L, topicSchemas("long"))
runAndVerifyResult(process, topicConfig, event = encoded, expected = encoded)
}
test("array at top level") {
val topicConfig = topicConfigs("array")
val sourceParam = SourceAvroParam.forGeneric(topicConfig, ExistingSchemaVersion(1))
val sinkParam = SinkAvroParam(topicConfig, ExistingSchemaVersion(1), "{42L}").copy(sinkId = "kafka-avro")
val process = createAvroProcess(sourceParam, sinkParam)
val thrown = intercept[IllegalArgumentException] {
runAndVerifyResult(process, topicConfig, event = null, expected = null)
}
thrown.getMessage shouldBe "Compilation errors: CustomNodeError(end,Unsupported Avro type. Top level Arrays are not supported,None)"
}
}
| TouK/nussknacker | engine/flink/avro-components-utils/src/test/scala/pl/touk/nussknacker/engine/avro/sink/flink/KafkaAvroSinkFactoryWithEditorIntegrationTest.scala | Scala | apache-2.0 | 5,909 |
package magnolia1.examples
import magnolia1.*
trait Eq[T]:
def equal(value: T, value2: T): Boolean
object Eq extends AutoDerivation[Eq]:
def join[T](ctx: CaseClass[Eq, T]): Eq[T] = (v1, v2) =>
ctx.params.forall { p => p.typeclass.equal(p.deref(v1), p.deref(v2)) }
override def split[T](ctx: SealedTrait[Eq, T]): Eq[T] = (v1, v2) =>
ctx.choose(v1) { sub =>
sub.typeclass.equal(sub.value, sub.cast(v2))
}
given Eq[String] = _ == _
given Eq[Int] = _ == _
given [T: Eq]: Eq[Option[T]] =
case (Some(v1), Some(v2)) => summon[Eq[T]].equal(v1, v2)
case (None, None) => true
case _ => false
given [T: Eq, C[x] <: Iterable[x]]: Eq[C[T]] = (v1, v2) =>
v1.size == v2.size && (v1.iterator zip v2.iterator).forall(
(summon[Eq[T]].equal).tupled
)
| propensive/magnolia | src/examples/eq.scala | Scala | apache-2.0 | 824 |
private[this] object {{__stats_name}} {
val RequestsCounter = scopedStats.scope("{{clientFuncNameForWire}}").counter("requests")
val SuccessCounter = scopedStats.scope("{{clientFuncNameForWire}}").counter("success")
val FailuresCounter = scopedStats.scope("{{clientFuncNameForWire}}").counter("failures")
val FailuresScope = scopedStats.scope("{{clientFuncNameForWire}}").scope("failures")
}
{{#functionInfo}}
{{>function}} = {
{{__stats_name}}.RequestsCounter.incr()
val inputArgs = {{funcObjectName}}.Args({{argNames}})
val replyDeserializer: Array[Byte] => _root_.com.twitter.util.Try[{{typeName}}] =
response => {
val decodeResult: _root_.com.twitter.util.Try[{{funcObjectName}}.Result] =
_root_.com.twitter.util.Try {
decodeResponse(response, {{funcObjectName}}.Result)
}
decodeResult match {
case t@_root_.com.twitter.util.Throw(_) =>
t.cast[{{typeName}}]
case _root_.com.twitter.util.Return(result) =>
val serviceException: Throwable =
{{#hasThrows}}
if (false)
null // can never happen, but needed to open a block
{{#throws}}
else if (result.{{throwName}}.isDefined)
setServiceName(result.{{throwName}}.get)
{{/throws}}
else
null
{{/hasThrows}}
{{^hasThrows}}
null
{{/hasThrows}}
{{#isVoid}}
if (serviceException != null) _root_.com.twitter.util.Throw(serviceException)
else _root_.com.twitter.util.Return.Unit
{{/isVoid}}
{{^isVoid}}
if (result.success.isDefined)
_root_.com.twitter.util.Return(result.success.get)
else if (serviceException != null)
_root_.com.twitter.util.Throw(serviceException)
else
_root_.com.twitter.util.Throw(missingResult("{{clientFuncNameForWire}}"))
{{/isVoid}}
}
}
val serdeCtx = new _root_.com.twitter.finagle.thrift.DeserializeCtx[{{typeName}}](inputArgs, replyDeserializer)
_root_.com.twitter.finagle.context.Contexts.local.let(
_root_.com.twitter.finagle.thrift.DeserializeCtx.Key,
serdeCtx
) {
val serialized = encodeRequest("{{clientFuncNameForWire}}", inputArgs)
this.service(serialized).flatMap { response =>
Future.const(serdeCtx.deserialize(response))
}.respond { response =>
val responseClass = responseClassifier.applyOrElse(
ctfs.ReqRep(inputArgs, response),
ctfs.ResponseClassifier.Default)
responseClass match {
case ctfs.ResponseClass.Successful(_) =>
{{__stats_name}}.SuccessCounter.incr()
case ctfs.ResponseClass.Failed(_) =>
{{__stats_name}}.FailuresCounter.incr()
response match {
case Throw(ex) =>
setServiceName(ex)
{{__stats_name}}.FailuresScope.counter(Throwables.mkString(ex): _*).incr()
case _ =>
}
}
}
}
}
{{/functionInfo}}
| thirstycrow/scrooge | scrooge-generator/src/main/resources/scalagen/finagleClientFunction.scala | Scala | apache-2.0 | 2,950 |
/*
* Copyright 2016 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.common.rich
import org.scalajs.dom.raw.Event
final class RichEvent(val self: Event) extends AnyVal {
}
| frugalmechanic/fm-common | js/src/main/scala/fm/common/rich/RichEvent.scala | Scala | apache-2.0 | 744 |
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package hydrograph.engine.spark.components.base
import org.apache.spark.sql._
/**
* The Class StraightPullComponentBase.
*
* @author Bitwise
*
*/
abstract class StraightPullComponentBase {
def createComponent():Map[String,DataFrame]
}
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/base/StraightPullComponentBase.scala | Scala | apache-2.0 | 1,020 |
package artie.instances
import artie.Util
import org.specs2.mutable.Specification
final class DatabaseGeneratorInstancesSpec extends Specification {
object autoimport extends DatabaseGeneratorInstances with DatabaseColumnReaderInstances
import autoimport._
val table = "db_table"
val createTable =
s"""CREATE TABLE $table (
| l BIGINT PRIMARY KEY,
| s VARCHAR(10),
| d DOUBLE,
| f FLOAT,
| i INT,
|)""".stripMargin
val insertRows =
s"INSERT INTO $table (l, s, d, f, i) VALUES (0, 'hello', 0.0, 1.0, 1)"
val db = artie.h2("mem:test_db;DB_CLOSE_DELAY=-1;MODE=MySQL", "user", "pwd")
val query = s"select * from $table"
"DatabaseGenerator instances" >> {
step {
Util.prepare(db, Seq(createTable, insertRows))
}
"read single values" >> {
dbSingleGen[Int].apply(query, db) === Seq(0)
dbSingleGen[Long].apply(query, db) === Seq(0L)
dbSingleGen[Float].apply(query, db) === Seq(0.0f)
dbSingleGen[Double].apply(query, db) === Seq(0.0)
dbSingleGen[String].apply(query, db) === Seq("0")
}
"read tuples of different sizes" >> {
dbTuple2Gen[Long, String].apply(query, db) === Seq((0L, "hello"))
dbTuple3Gen[Long, String, Double].apply(query, db) === Seq((0L, "hello", 0.0))
dbTuple4Gen[Long, String, Double, Float].apply(query, db) === Seq((0L, "hello", 0.0, 1.0f))
dbTuple5Gen[Long, String, Double, Float, Int].apply(query, db) === Seq((0L, "hello", 0.0, 1.0f, 1))
}
"read generic nested tuple n" >> {
dbSingleGen[(Long, (String, Double))].apply(query, db) === Seq((0L, ("hello", 0.0)))
}
}
}
| pheymann/artie | core/src/it/scala/artie/instances/DatabaseGeneratorInstancesSpec.scala | Scala | mit | 1,665 |
package org.http4s
import java.time.{ZoneOffset, Instant}
import org.http4s.headers.{`Set-Cookie`, `Content-Type`}
import scalaz.\\/
import scalaz.concurrent.Task
trait MessageOps extends Any {
type Self
/** Remove headers that satisfy the predicate
*
* @param f predicate
* @return a new message object which lacks the specified headers
*/
final def filterHeaders(f: Header => Boolean): Self =
transformHeaders(_.filter(f))
/** Generates a new message object with the specified key/value pair appended to the [[org.http4s.AttributeMap]]
*
* @param key [[AttributeKey]] with which to associate the value
* @param value value associated with the key
* @tparam A type of the value to store
* @return a new message object with the key/value pair appended
*/
def withAttribute[A](key: AttributeKey[A], value: A): Self
/** Generates a new message object with the specified key/value pair appended to the [[org.http4s.AttributeMap]]
*
* @param entry [[AttributeEntry]] entry to add
* @tparam V type of the value to store
* @return a new message object with the key/value pair appended
*/
def withAttribute[V](entry: AttributeEntry[V]): Self = withAttribute(entry.key, entry.value)
def transformHeaders(f: Headers => Headers): Self
/** Added the [[org.http4s.headers.Content-Type]] header to the response */
final def withType(t: MediaType): Self =
putHeaders(`Content-Type`(t))
final def withContentType(contentType: Option[`Content-Type`]): Self =
contentType match {
case Some(t) => putHeaders(t)
case None => filterHeaders(_.is(`Content-Type`))
}
final def removeHeader(key: HeaderKey): Self = filterHeaders(_ isNot key)
/** Replaces the [[Header]]s of the incoming Request object
*
* @param headers [[Headers]] containing the desired headers
* @return a new Request object
*/
final def replaceAllHeaders(headers: Headers): Self =
transformHeaders(_ => headers)
/** Replace the existing headers with those provided */
final def replaceAllHeaders(headers: Header*): Self =
replaceAllHeaders(Headers(headers.toList))
/** Add the provided headers to the existing headers, replacing those of the same header name
* The passed headers are assumed to contain no duplicate Singleton headers.
*/
final def putHeaders(headers: Header*): Self =
transformHeaders(_.put(headers: _*))
final def withTrailerHeaders(trailerHeaders: Task[Headers]): Self =
withAttribute(Message.Keys.TrailerHeaders, trailerHeaders)
/** Decode the [[Message]] to the specified type
*
* @param decoder [[EntityDecoder]] used to decode the [[Message]]
* @tparam T type of the result
* @return the `Task` which will generate the `DecodeResult[T]`
*/
def attemptAs[T](implicit decoder: EntityDecoder[T]): DecodeResult[T]
/** Decode the [[Message]] to the specified type
*
* If no valid [[Status]] has been described, allow Ok
* @param decoder [[EntityDecoder]] used to decode the [[Message]]
* @tparam T type of the result
* @return the `Task` which will generate the T
*/
final def as[T](implicit decoder: EntityDecoder[T]): Task[T] =
attemptAs(decoder).fold(throw _, identity)
}
trait RequestOps extends Any with MessageOps {
def withPathInfo(pi: String): Self
/** Helper method for decoding [[Request]]s
*
* Attempt to decode the [[Request]] and, if successful, execute the continuation to get a [[Response]].
* If decoding fails, a BadRequest [[Response]] is generated.
*/
final def decode[A](f: A => Task[Response])(implicit decoder: EntityDecoder[A]): Task[Response] =
decodeWith(decoder, strict = false)(f)
/** Helper method for decoding [[Request]]s
*
* Attempt to decode the [[Request]] and, if successful, execute the continuation to get a [[Response]].
* If decoding fails, a BadRequest [[Response]] is generated. If the decoder does not support the
* [[MediaType]] of the [[Request]], a `UnsupportedMediaType` [[Response]] is generated instead.
*/
final def decodeStrict[A](f: A => Task[Response])(implicit decoder: EntityDecoder[A]): Task[Response] =
decodeWith(decoder, true)(f)
/** Like [[decode]], but with an explicit decoder.
* @param strict If strict, will return a [[Status.UnsupportedMediaType]] http Response if this message's
* [[MediaType]] is not supported by the provided decoder
*/
def decodeWith[A](decoder: EntityDecoder[A], strict: Boolean)(f: A => Task[Response]): Task[Response]
}
trait ResponseOps extends Any with MessageOps {
/** Change the status of this response object
*
* @param status value to replace on the response object
* @return a new response object with the new status code
*/
def withStatus(status: Status): Self
/** Add a Set-Cookie header for the provided [[Cookie]] */
final def addCookie(cookie: Cookie): Self =
putHeaders(`Set-Cookie`(cookie))
/** Add a Set-Cookie header with the provided values */
final def addCookie(name: String,
content: String,
expires: Option[Instant] = None): Self =
addCookie(Cookie(name, content, expires))
/** Add a [[org.http4s.headers.Set-Cookie]] which will remove the specified cookie from the client */
final def removeCookie(cookie: Cookie): Self = putHeaders(`Set-Cookie`(cookie.copy(content = "",
expires = Some(Instant.ofEpochSecond(0)), maxAge = Some(0))))
/** Add a [[org.http4s.headers.Set-Cookie]] which will remove the specified cookie from the client */
final def removeCookie(name: String): Self = putHeaders(`Set-Cookie`(
Cookie(name, "", expires = Some(Instant.ofEpochSecond(0)), maxAge = Some(0))
))
}
| hvesalai/http4s | core/src/main/scala/org/http4s/MessageOps.scala | Scala | apache-2.0 | 5,791 |
package barneshut
import java.awt._
import java.awt.event._
import javax.swing._
import javax.swing.event._
import scala.collection.parallel.TaskSupport
import scala.collection.parallel.Combiner
import scala.collection.parallel.mutable.ParHashSet
import common._
class Simulator(val taskSupport: TaskSupport, val timeStats: TimeStatistics) {
def updateBoundaries(boundaries: Boundaries, body: Body): Boundaries = {
boundaries.minX = math.min(boundaries.minX, body.x)
boundaries.minY = math.min(boundaries.minY, body.y)
boundaries.maxX = math.max(boundaries.maxX, body.x)
boundaries.maxY = math.max(boundaries.maxY, body.y)
boundaries
}
def mergeBoundaries(a: Boundaries, b: Boundaries): Boundaries = {
a.minX = math.min(a.minX, b.minX)
a.minY = math.min(a.minY, b.minY)
a.maxX = math.max(a.maxX, b.maxX)
a.maxY = math.max(a.maxY, b.maxY)
a
}
def computeBoundaries(bodies: Seq[Body]): Boundaries = timeStats.timed("boundaries") {
val parBodies = bodies.par
parBodies.tasksupport = taskSupport
parBodies.aggregate(new Boundaries)(updateBoundaries, mergeBoundaries)
}
def computeSectorMatrix(bodies: Seq[Body], boundaries: Boundaries): SectorMatrix = timeStats.timed("matrix") {
val parBodies = bodies.par
parBodies.tasksupport = taskSupport
parBodies.aggregate(new SectorMatrix(boundaries, SECTOR_PRECISION))((agg, b) => agg += b, (a, b) => a.combine(b))
}
def computeQuad(sectorMatrix: SectorMatrix): Quad = timeStats.timed("quad") {
sectorMatrix.toQuad(taskSupport.parallelismLevel)
}
def updateBodies(bodies: Seq[Body], quad: Quad): Seq[Body] = timeStats.timed("update") {
val parBodies = bodies.par
parBodies.tasksupport = taskSupport
parBodies.map(b => b.updated(quad)).seq
}
def eliminateOutliers(bodies: Seq[Body], sectorMatrix: SectorMatrix, quad: Quad): Seq[Body] = timeStats.timed("eliminate") {
def isOutlier(b: Body): Boolean = {
val dx = quad.massX - b.x
val dy = quad.massY - b.y
val d = math.sqrt(dx * dx + dy * dy)
// object is far away from the center of the mass
if (d > eliminationThreshold * sectorMatrix.boundaries.size) {
val nx = dx / d
val ny = dy / d
val relativeSpeed = b.xspeed * nx + b.yspeed * ny
// object is moving away from the center of the mass
if (relativeSpeed < 0) {
val escapeSpeed = math.sqrt(2 * gee * quad.mass / d)
// object has the espace velocity
-relativeSpeed > 2 * escapeSpeed
} else false
} else false
}
def outliersInSector(x: Int, y: Int): Combiner[Body, ParHashSet[Body]] = {
val combiner = ParHashSet.newCombiner[Body]
combiner ++= sectorMatrix(x, y).filter(isOutlier)
combiner
}
val sectorPrecision = sectorMatrix.sectorPrecision
val horizontalBorder = for (x <- 0 until sectorPrecision; y <- Seq(0, sectorPrecision - 1)) yield (x, y)
val verticalBorder = for (y <- 1 until sectorPrecision - 1; x <- Seq(0, sectorPrecision - 1)) yield (x, y)
val borderSectors = horizontalBorder ++ verticalBorder
// compute the set of outliers
val parBorderSectors = borderSectors.par
parBorderSectors.tasksupport = taskSupport
val outliers = parBorderSectors.map({ case (x, y) => outliersInSector(x, y) }).reduce(_ combine _).result
// filter the bodies that are outliers
val parBodies = bodies.par
parBodies.filter(!outliers(_)).seq
}
def step(bodies: Seq[Body]): (Seq[Body], Quad) = {
// 1. compute boundaries
val boundaries = computeBoundaries(bodies)
// 2. compute sector matrix
val sectorMatrix = computeSectorMatrix(bodies, boundaries)
// 3. compute quad tree
val quad = computeQuad(sectorMatrix)
// 4. eliminate outliers
val filteredBodies = eliminateOutliers(bodies, sectorMatrix, quad)
// 5. update body velocities and positions
val newBodies = updateBodies(filteredBodies, quad)
(newBodies, quad)
}
}
| adihubba/progfun1 | parprog1-barneshut/src/main/scala/barneshut/Simulator.scala | Scala | mit | 4,023 |
package ammonite.runtime
import java.io._
object Ser {
def serialize(m: AnyRef): Array[Byte] = {
val baos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(baos)
try {
oos.writeObject(m)
baos.toByteArray
}
finally oos.close()
}
def deserialize(b: Array[Byte], loader: ClassLoader): AnyRef = {
val bais = new ByteArrayInputStream(b)
val ois = new ClassLoaderObjectInputStream(loader, bais)
try ois.readObject()
finally ois.close()
}
}
// from akka.util
/**
* ClassLoaderObjectInputStream tries to utilize the provided ClassLoader to load Classes and falls
* back to ObjectInputStreams resolver.
*
* @param classLoader - the ClassLoader which is to be used primarily
* @param is - the InputStream that is wrapped
*/
class ClassLoaderObjectInputStream(classLoader: ClassLoader, is: InputStream) extends ObjectInputStream(is) {
override protected def resolveClass(objectStreamClass: ObjectStreamClass): Class[_] =
try Class.forName(objectStreamClass.getName, false, classLoader) catch {
case cnfe: ClassNotFoundException ⇒ super.resolveClass(objectStreamClass)
}
} | alexarchambault/ammonium | amm/runtime/src/main/scala/ammonite/runtime/Ser.scala | Scala | mit | 1,165 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.types.{LongType, StructField, StructType}
import org.apache.spark.storage.StorageLevel
class SQLTransformerSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
test("params") {
ParamsSuite.checkParams(new SQLTransformer())
}
test("transform numeric data") {
val original = Seq((0, 1.0, 3.0), (2, 2.0, 5.0)).toDF("id", "v1", "v2")
val sqlTrans = new SQLTransformer().setStatement(
"SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
val result = sqlTrans.transform(original)
val resultSchema = sqlTrans.transformSchema(original.schema)
val expected = Seq((0, 1.0, 3.0, 4.0, 3.0), (2, 2.0, 5.0, 7.0, 10.0))
.toDF("id", "v1", "v2", "v3", "v4")
assert(result.schema.toString == resultSchema.toString)
assert(resultSchema == expected.schema)
assert(result.collect().toSeq == expected.collect().toSeq)
assert(original.sparkSession.catalog.listTables().count() == 0)
}
test("read/write") {
val t = new SQLTransformer()
.setStatement("select * from __THIS__")
testDefaultReadWrite(t)
}
test("transformSchema") {
val df = spark.range(10)
val outputSchema = new SQLTransformer()
.setStatement("SELECT id + 1 AS id1 FROM __THIS__")
.transformSchema(df.schema)
val expected = StructType(Seq(StructField("id1", LongType, nullable = false)))
assert(outputSchema === expected)
}
test("SPARK-22538: SQLTransformer should not unpersist given dataset") {
val df = spark.range(10)
df.cache()
df.count()
assert(df.storageLevel != StorageLevel.NONE)
new SQLTransformer()
.setStatement("SELECT id + 1 AS id1 FROM __THIS__")
.transform(df)
assert(df.storageLevel != StorageLevel.NONE)
}
}
| ron8hu/spark | mllib/src/test/scala/org/apache/spark/ml/feature/SQLTransformerSuite.scala | Scala | apache-2.0 | 2,848 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.