code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package motylwg.camusic.test
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import motylwg.camusic._
@RunWith(classOf[JUnitRunner])
class PlayerTest extends FunSuite {
test("readBytes") {
val data = List(
List(true, true, true, true, false, false, false, false, true, true),
List(false, false, false, false, true, true, true, true, false, false))
val pm = new PitchMapper(MinorPitches)
val pitchMap = pm.getShuffledMap
val dm = new DurationMapper(BalancedDurations)
val durationMap = dm.getShuffledMap
val source = new CaSource(pitchMap, durationMap)
assert(source.readBytes(data) === (List(0xf0, 0x0f), List(List(true, true), List(false, false))))
}
/*
test("musicString") {
val ints = List(0, 1, 2, 3)
val caPlayer = new CaPlayer()
assert(caPlayer.musicString(ints) === "C5i G5w C5h D5q")
}
*/
}
|
motylwg/CaMusic
|
src/test/scala/PlayerTest.scala
|
Scala
|
mit
| 922
|
package hubcat
import dispatch._
import org.json4s.JsonDSL._
import org.json4s.native.Printer.compact
import org.json4s.native.JsonMethods.render
trait RepoPulls
extends Client.Completion { self: RepoRequests =>
private def base = apiHost / "repos" / user / repo / "pulls"
class Pulls extends Client.Completion {
class PullsComments extends Client.Completion {
private [this] def base = apiHost / "repos" / user / repo / "pulls" / "comments"
case class Filter(
_sort: Option[String] = None,
_direction: Option[String] = None)
extends Client.Completion {
/** http://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request */
override def apply[T](handler: Client.Handler[T]) =
request(base <<? Map.empty[String, String] ++
_sort.map("sort" -> _) ++
_direction.map("direction" -> _))(handler)
}
case class Comment(id: Int) extends Client.Completion {
/** http://developer.github.com/v3/pulls/comments/#get-a-single-comment */
override def apply[T](handler: Client.Handler[T]) =
request(base / id)(handler)
/** http://developer.github.com/v3/pulls/comments/#edit-a-comment */
def edit(body: String) = complete(base.POST / id << compact(render(("body" -> body))))
/** http://developer.github.com/v3/pulls/comments/#delete-a-comment */
def delete = complete(base.DELETE / id)
}
override def apply[T](handler: Client.Handler[T]) =
filter(handler)
def filter = Filter()
def get(id: Int) = Comment(id)
}
case class Filter(
_state: Option[String] = None,
_head: Option[String] = None,
_base: Option[String] = None,
_accept: String = Accept.GithubJson)
extends Client.Completion {
def state(s: String) = copy(_state = Some(s))
def head(h: String) = copy(_head = Some(h))
def base(b: String) = copy(_head = Some(b))
def accepting = new {
def raw = copy(_accept = Accept.RawJson)
def text = copy(_accept = Accept.TextJson)
def html = copy(_accept = Accept.HtmlJson)
def fullJson = copy(_accept = Accept.FullJson)
}
override def apply[T](handler: Client.Handler[T]) =
request(RepoPulls.this.base <:< Map("Accept" -> _accept) <<? Map.empty[String, String] ++
_state.map("state" -> _) ++
_head.map("head" -> _) ++
_base.map("base" -> _))(handler)
}
/** http://developer.github.com/v3/pulls/#list-pull-requests */
def filter = Filter()
/** http://developer.github.com/v3/pulls/#list-pull-requests */
override def apply[T](handler: Client.Handler[T]) =
filter(handler)
/** http://developer.github.com/v3/pulls/#create-a-pull-request */
def create(title: String, head: String) =
PullBuilder(title, head)
def comments = new PullsComments
}
/** Operations defined for a specific pull request */
case class Pull(
id: Int, _accept: String = Accept.GithubJson)
extends Client.Completion {
private def acceptHeader = Map("Accept" -> _accept)
class PullComments extends Client.Completion {
private [this] def base = apiHost / "repos" / user / repo / "pulls" / id / "comments"
/** http://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request */
override def apply[T](handler: Client.Handler[T]) =
request(base)(handler)
/** Starts a new thread of review. http://developer.github.com/v3/pulls/comments/#create-a-comment */
def create(body: String, commit: String, path: String, position: Int) =
complete(base.POST << compact(
render(("body" -> body) ~
("commit_id" -> commit) ~
("path" -> path) ~
("position" -> position))))
/** Creates a response in reply to a thread of review. http://developer.github.com/v3/pulls/comments/#create-a-comment */
def reply(to: Int, body: String) =
complete(base.POST << compact(render(
("body" -> body) ~ ("in_reply_to" -> to))))
}
/** Update operation fields */
case class Update(
_title: Option[String] = None,
_body: Option[String] = None,
_state: Option[String] = None)
extends Client.Completion {
def title(t: String) = copy(_title = Some(t))
def body(b: String) = copy(_body = Some(b))
def state(s: String) = copy(_state = Some(s))
override def apply[T](handler: Client.Handler[T]) =
request(base.PATCH / id << pmap)(handler)
private def pmap =
compact(render(
("title" -> _title) ~
("body" -> _body) ~
("state" -> _state)))
}
def accepting = new {
def raw = copy(_accept = Accept.RawJson)
def text = copy(_accept = Accept.TextJson)
def html = copy(_accept = Accept.HtmlJson)
def fullJson = copy(_accept = Accept.FullJson)
def diff = copy(_accept = Accept.Diff)
def patch = copy(_accept = Accept.Patch)
}
/** http://developer.github.com/v3/pulls/#get-a-single-pull-request */
override def apply[T](handler: Client.Handler[T]) =
request(base / id <:< acceptHeader)(handler)
/** http://developer.github.com/v3/pulls/#update-a-pull-request */
def update = Update()
/** http://developer.github.com/v3/pulls/#list-commits-on-a-pull-request */
def commits = complete(base / id / "commits" <:< acceptHeader)
/** http://developer.github.com/v3/pulls/#list-pull-requests-files */
def files = complete(base / id / "files" <:< acceptHeader)
/** http://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged */
def merged = complete(base / id / "merge")
/** http://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade */
def merge(msg: Option[String] = None) =
complete(base.PUT / id / "merge" << compact(render(("commit_message" -> msg))))
def comments = new PullComments
}
/** Builder for creating a new pull request */
case class PullBuilder(
title: String,
head: String,
_base: String = "master",
_body: Option[String] = None,
_issue: Option[Int] = None)
extends Client.Completion {
def body(b: String) = copy(_body = Some(b))
def base(b: String) = copy(_base = b)
override def apply[T](handler: Client.Handler[T]) =
request(RepoPulls.this.base.POST << pmap)(handler)
def pmap =
compact(render(
("title" -> title) ~
("body" -> _body) ~
("base" -> _base) ~
("head" -> head) ~
("issue" -> _issue)))
}
def pulls = new Pulls
def pull(id: Int): Pull = Pull(id)
}
|
rintcius/hubcat
|
src/main/scala/pulls.scala
|
Scala
|
mit
| 6,768
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution.command
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.execution.command.{CarbonDropTableCommand, DropDatabaseCommand, RunnableCommand}
case class CarbonDropDatabaseCommand(command: DropDatabaseCommand)
extends RunnableCommand {
override val output = command.output
override def run(sparkSession: SparkSession): Seq[Row] = {
val dbName = command.databaseName
// DropHiveDB command will fail if cascade is false and one or more table exists in database
val rows = command.run(sparkSession)
if (command.cascade) {
val tablesInDB = CarbonEnv.get.carbonMetastore.getAllTables()
.filterNot(_.database.exists(_.equalsIgnoreCase(dbName)))
tablesInDB.foreach { tableName =>
CarbonDropTableCommand(true, Some(dbName), tableName.table).run(sparkSession)
}
}
CarbonEnv.get.carbonMetastore.dropDatabaseDirectory(dbName)
rows
}
}
|
Sephiroth-Lin/incubator-carbondata
|
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
|
Scala
|
apache-2.0
| 1,778
|
package org.automanlang.core.question
import org.automanlang.core.AutomanAdapter
import org.automanlang.core.answer.{Answer, ScalarOutcome, AbstractScalarAnswer}
import scala.concurrent.ExecutionContext.Implicits.global
abstract class DiscreteScalarQuestion extends Question {
type AA = AbstractScalarAnswer[A]
type O = ScalarOutcome[A]
protected var _confidence: Double = 0.95
def confidence_=(c: Double) { _confidence = c }
def confidence: Double = _confidence
def num_possibilities: BigInt
protected[automanlang] def getOutcome(adapter: AutomanAdapter) : O = {
ScalarOutcome(this, schedulerFuture(adapter))
}
protected[automanlang] def composeOutcome(o: O, adapter: AutomanAdapter) : O = {
// unwrap future from previous Outcome
val f = o.f map {
case Answer(value, cost, conf, id, dist) =>
if (this.confidence <= conf) {
Answer(
value,
BigDecimal(0.00).setScale(2, math.BigDecimal.RoundingMode.FLOOR),
conf,
id,
dist
)
} else {
startScheduler(adapter)
}
case _ => startScheduler(adapter)
}
ScalarOutcome(this, f)
}
}
|
dbarowy/AutoMan
|
libautoman/src/main/scala/org/automanlang/core/question/DiscreteScalarQuestion.scala
|
Scala
|
gpl-2.0
| 1,193
|
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql.streaming
import scala.reflect.runtime.universe.TypeTag
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.{InternalRow, JavaTypeInference}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.hive.SnappyStoreHiveCatalog
import org.apache.spark.sql.sources.SchemaRelationProvider
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.streaming.SnappyStreamingContext
import org.apache.spark.streaming.api.java.JavaDStream
import org.apache.spark.streaming.dstream.DStream
object StreamSqlHelper {
def registerRelationDestroy(): Unit = {
SnappyStoreHiveCatalog.registerRelationDestroy()
}
def clearStreams(): Unit = {
StreamBaseRelation.clearStreams()
}
/**
* Returns a Catalyst Schema for the given java bean class.
*/
protected def getSchema(beanClass: Class[_]): Seq[AttributeReference] = {
val (dataType, _) = JavaTypeInference.inferDataType(beanClass)
dataType.asInstanceOf[StructType].fields.map { f =>
AttributeReference(f.name, f.dataType, f.nullable)()
}
}
def getSchemaDStream(ssc: SnappyStreamingContext, tableName: String): SchemaDStream = {
val catalog = ssc.snappySession.sessionState.catalog
catalog.lookupRelation(catalog.newQualifiedTableName(tableName)) match {
case LogicalRelation(sr: StreamPlan, _, _) => new SchemaDStream(ssc,
LogicalDStreamPlan(sr.schema.toAttributes, sr.rowStream)(ssc))
case _ =>
throw new AnalysisException(s"Table $tableName not a stream table")
}
}
/**
* Creates a [[SchemaDStream]] from an DStream of Product (e.g. case classes).
*/
def createSchemaDStream[A <: Product : TypeTag](ssc: SnappyStreamingContext,
stream: DStream[A]): SchemaDStream = {
val encoder = ExpressionEncoder[A]()
val schema = encoder.schema
val logicalPlan = LogicalDStreamPlan(schema.toAttributes,
stream.map(encoder.toRow(_).copy()))(ssc)
new SchemaDStream(ssc, logicalPlan)
}
def createSchemaDStream(ssc: SnappyStreamingContext, rowStream: DStream[Row],
schema: StructType): SchemaDStream = {
val encoder = RowEncoder(schema)
val logicalPlan = LogicalDStreamPlan(schema.toAttributes,
rowStream.map(encoder.toRow(_).copy()))(ssc)
new SchemaDStream(ssc, logicalPlan)
}
def createSchemaDStream(ssc: SnappyStreamingContext,
rowStream: JavaDStream[_], beanClass: Class[_]): SchemaDStream = {
val encoder = ExpressionEncoder.javaBean(beanClass.asInstanceOf[Class[Any]])
val schema = encoder.schema
val logicalPlan = LogicalDStreamPlan(schema.toAttributes,
rowStream.dstream.map(encoder.toRow(_).copy()))(ssc)
new SchemaDStream(ssc, logicalPlan)
}
}
trait StreamPlan {
def rowStream: DStream[InternalRow]
def schema: StructType
}
trait StreamPlanProvider extends SchemaRelationProvider
|
vjr/snappydata
|
core/src/main/scala/org/apache/spark/sql/streaming/StreamSqlHelper.scala
|
Scala
|
apache-2.0
| 3,733
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp
/**
* Trait used to create annotators with input columns of variable length.
* */
trait HasMultipleInputAnnotationCols extends HasInputAnnotationCols {
/** Annotator reference id. The Annotator type is the same for any of the input columns*/
val inputAnnotatorType: String
lazy override val inputAnnotatorTypes: Array[String] = getInputCols.map(_ =>inputAnnotatorType)
/**
* Columns that contain annotations necessary to run this annotator
* AnnotatorType is the same for all input columns in that annotator.
*/
override def setInputCols(value: Array[String]): this.type = {
set(inputCols, value)
}
}
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/nlp/HasMultipleInputAnnotationCols.scala
|
Scala
|
apache-2.0
| 1,271
|
package org.jetbrains.plugins.scala
package worksheet.processor
import com.intellij.openapi.vfs.newvfs.FileAttribute
import com.intellij.psi.PsiFile
/**
* User: Dmitry.Naydanov
* Date: 30.07.14.
*/
trait WorksheetPerFileConfig {
protected val enabled = "enabled"
protected val disabled = "disable"
def isEnabled(file: PsiFile, attribute: FileAttribute): Boolean = FileAttributeUtilCache.readAttribute(attribute, file).contains("enabled")
def setEnabled(file: PsiFile, attribute: FileAttribute, e: Boolean) {
FileAttributeUtilCache.writeAttribute(attribute, file, if (e) enabled else disabled)
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/worksheet/processor/WorksheetPerFileConfig.scala
|
Scala
|
apache-2.0
| 618
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008.calculations
import org.joda.time.{LocalDate, Period, PeriodType}
object PeriodCalculator {
def periodHeadingComponents(startDate: LocalDate, endDate: LocalDate): PeriodHeadingComponents = {
val friendlyEndDate = endDate.toString("d MMM yyyy")
val yearEndDate = endDate.toString("yyyy")
val months = monthsInPeriod(startDate, endDate)
months match {
case 12 => PeriodHeadingComponents(monthCount = 12, messageKey = "", dateText = yearEndDate)
case months if (months > 1) => PeriodHeadingComponents(monthCount = months, messageKey = "periodHeader.plural", dateText = friendlyEndDate)
case _ => PeriodHeadingComponents(monthCount = months, messageKey = "periodHeader.singular", dateText = friendlyEndDate)
}
}
private def monthsInPeriod(startDate: LocalDate, endDate: LocalDate): Int = {
val period = new Period(startDate.minusDays(1), endDate, PeriodType.yearMonthDay().withYearsRemoved())
period match {
case p if p.getDays > 15 => p.getMonths + 1
case p if p.getMonths < 1 => 1
case p => p.getMonths
}
}
}
case class PeriodHeadingComponents(monthCount: Int, messageKey: String, dateText: String)
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/calculations/PeriodCalculator.scala
|
Scala
|
apache-2.0
| 1,829
|
package com.cloudray.scalapress.search
import org.scalatest.{OneInstancePerTest, FunSuite}
import org.scalatest.mock.MockitoSugar
/** @author Stephen Samuel */
class FacetFieldTest extends FunSuite with MockitoSugar with OneInstancePerTest {
test("facet field apply") {
assert(TagsFacetField === FacetField("tags").get)
assert(AttributeFacetField(4) === FacetField("attr_facet_4").get)
assert(None === FacetField("asqew"))
}
}
|
vidyacraghav/scalapress
|
src/test/scala/com/cloudray/scalapress/search/FacetFieldTest.scala
|
Scala
|
apache-2.0
| 447
|
package controllers.admin
import scalaz._
import Scalaz._
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import net.liftweb.json._
import net.liftweb.json.JsonParser._
import io.megam.auth.funnel.{ FunnelResponse, FunnelResponses }
import io.megam.auth.funnel.FunnelErrors._
import io.megam.auth.stack.Role._
import play.api.mvc._
import controllers.stack.Results
import controllers.stack.{APIAuthElement, PermissionElement}
object Balances extends Controller with APIAuthElement with PermissionElement {
def update = StackAction(parse.tolerantText, AuthorityKey -> Administrator) { implicit request =>
(Validation.fromTryCatchThrowable[Result, Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new CannotAuthenticateError("Invalid header.", "Read docs.megam.io/api."))
val email = freq.maybeEmail.getOrElse(throw new CannotAuthenticateError("Email not found (or) invalid.", "Read docs.megam.io/api."))
val org = freq.maybeOrg.getOrElse(throw new CannotAuthenticateError("Org not found (or) invalid.", "Read docs.megam.io/api."))
val admin = canPermit(grabAuthBag).getOrElse(throw new PermissionNotThere("admin authority is required to access this resource.", "Read docs.megam.io/api."))
val clientAPIBody = freq.clientAPIBody.getOrElse(throw new Error("Body not found (or) invalid."))
models.admin.Balances.update(clientAPIBody) match {
case Success(succ) =>
Status(CREATED)(
FunnelResponse(CREATED, "Your balances updated successfully.", "Megam::Balances").toJson(true))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => { val rn: FunnelResponse = new HttpReturningError(nels(t)); Status(rn.code)(rn.toJson(true)) } })
}
}
|
indykish/vertice_gateway
|
app/controllers/admin/Balances.scala
|
Scala
|
mit
| 2,186
|
/**
* Copyright 2015 Zaradai
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zaradai.lattrac
import com.zaradai.lattrac.capture._
import com.zaradai.lattrac.store.Observer
class LatencyServiceDecorator(builder: LatencyBuilder) extends LatencyTracerService {
private[this] lazy val service = builder.buildWithAutoLifecycle
override def trace(id: String, location: String): Unit =
service.trace(id, location)
override def observe(observer: Observer[TraceEvent]): Unit =
service.observe(observer)
}
object FineLatencyTracer extends LatencyServiceDecorator(LatencyTracerBuilder.nanos.inMemory.disruptored)
object FineLatencyTracerBackedByMongo extends LatencyServiceDecorator(LatencyTracerBuilder.nanos.usingMongo().disruptored)
object CourseLatencyTracer extends LatencyServiceDecorator(LatencyTracerBuilder.millis.inMemory.queued)
object CourseLatencyTracerBackByMongo extends LatencyServiceDecorator(LatencyTracerBuilder.millis.usingMongo().queued)
|
zaradai/lattrac
|
src/main/scala/com/zaradai/lattrac/LatencyTracer.scala
|
Scala
|
apache-2.0
| 1,493
|
package com.ibm.spark.kernel.api
import java.io.{InputStream, PrintStream}
import com.ibm.spark.comm.CommManager
import com.ibm.spark.interpreter._
import com.ibm.spark.kernel.protocol.v5._
import com.ibm.spark.kernel.protocol.v5.kernel.ActorLoader
import com.ibm.spark.magic.MagicLoader
import com.typesafe.config.Config
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
import com.ibm.spark.global.ExecuteRequestState
class KernelSpec extends FunSpec with Matchers with MockitoSugar
with BeforeAndAfter
{
private val BadCode = Some("abc foo bar")
private val GoodCode = Some("val foo = 1")
private val ErrorCode = Some("val foo = bar")
private val ErrorMsg = "Name: error\\n" +
"Message: bad\\n" +
"StackTrace: 1"
private var mockConfig: Config = _
private var mockActorLoader: ActorLoader = _
private var mockInterpreter: Interpreter = _
private var mockCommManager: CommManager = _
private var mockMagicLoader: MagicLoader = _
private var kernel: KernelLike = _
before {
mockConfig = mock[Config]
mockInterpreter = mock[Interpreter]
when(mockInterpreter.interpret(BadCode.get))
.thenReturn((Results.Incomplete, null))
when(mockInterpreter.interpret(GoodCode.get))
.thenReturn((Results.Success, Left(new ExecuteOutput("ok"))))
when(mockInterpreter.interpret(ErrorCode.get))
.thenReturn((Results.Error, Right(ExecuteError("error","bad", List("1")))))
mockCommManager = mock[CommManager]
mockActorLoader = mock[ActorLoader]
mockMagicLoader = mock[MagicLoader]
kernel = new Kernel(
mockConfig, mockActorLoader, mockInterpreter, mockCommManager,
mockMagicLoader
)
}
after {
ExecuteRequestState.reset()
}
describe("Kernel") {
describe("#eval") {
it("should return syntax error") {
kernel eval BadCode should be((false, "Syntax Error!"))
}
it("should return ok") {
kernel eval GoodCode should be((true, "ok"))
}
it("should return error") {
kernel eval ErrorCode should be((false, ErrorMsg))
}
it("should return error on None") {
kernel eval None should be ((false, "Error!"))
}
}
describe("#out") {
it("should throw an exception if the ExecuteRequestState has not been set") {
intercept[IllegalArgumentException] {
kernel.out
}
}
it("should create a new PrintStream instance if the ExecuteRequestState has been set") {
ExecuteRequestState.processIncomingKernelMessage(
new KernelMessage(Nil, "", mock[Header], mock[ParentHeader],
mock[Metadata], "")
)
kernel.out shouldBe a [PrintStream]
}
}
describe("#err") {
it("should throw an exception if the ExecuteRequestState has not been set") {
intercept[IllegalArgumentException] {
kernel.err
}
}
it("should create a new PrintStream instance if the ExecuteRequestState has been set") {
ExecuteRequestState.processIncomingKernelMessage(
new KernelMessage(Nil, "", mock[Header], mock[ParentHeader],
mock[Metadata], "")
)
// TODO: Access the underlying streamType field to assert stderr?
kernel.err shouldBe a [PrintStream]
}
}
describe("#in") {
it("should throw an exception if the ExecuteRequestState has not been set") {
intercept[IllegalArgumentException] {
kernel.in
}
}
it("should create a new InputStream instance if the ExecuteRequestState has been set") {
ExecuteRequestState.processIncomingKernelMessage(
new KernelMessage(Nil, "", mock[Header], mock[ParentHeader],
mock[Metadata], "")
)
kernel.in shouldBe a [InputStream]
}
}
describe("#stream") {
it("should throw an exception if the ExecuteRequestState has not been set") {
intercept[IllegalArgumentException] {
kernel.stream
}
}
it("should create a StreamMethods instance if the ExecuteRequestState has been set") {
ExecuteRequestState.processIncomingKernelMessage(
new KernelMessage(Nil, "", mock[Header], mock[ParentHeader],
mock[Metadata], "")
)
kernel.stream shouldBe a [StreamMethods]
}
}
}
}
|
codeaudit/spark-kernel
|
kernel/src/test/scala/com/ibm/spark/kernel/api/KernelSpec.scala
|
Scala
|
apache-2.0
| 4,425
|
package com.xored.scalajs.react.examples.hello
import com.xored.scalajs.react._
object HelloMessage extends TypedReactSpec {
case class State()
case class Props(name: String)
def getInitialState(self: This) = State()
@scalax
def render(self: This) = {
<div>Hello {self.props.name}</div>
}
}
|
Aste88/scala-js-react
|
scalajs-react-examples/src/main/scala/com/xored/scalajs/react/examples/hello/HelloMessage.scala
|
Scala
|
apache-2.0
| 312
|
package feature.format
import feature._
/** A record representing the information contained in one line of a file in GTF2.2 feature.format
* ([[http://mblab.wustl.edu/GTF22.html feature.format specification]])
*
* @param line String containing one valid GTF2.2 line with or without trailing newline character
*/
class GTF22Record(private val line: String) extends FeatureBuilderModifier {
// Remove trailing comment, then trailing whitespace, then final semicolon
private val trimmed: String = line.split("#")(0).replaceAll("""\\s+$""", "").replaceAll(";$", "")
// Fields are tab-separated
private val tokens: Array[String] = trimmed.split("\\t")
validate()
private def validate(): Unit = {
if (!isComment) {
// Can't have two tabs in a row
if (tokens.contains(""))
throw new IllegalArgumentException(s"Malformed GTF2.2 line. Empty field.\\n$line")
// Must have at least 9 tab-separated fields
if (tokens.length < 9)
throw new IllegalArgumentException(s"Invalid GTF2.2 line. Must have at least 9 tab-separated fields.\\n$line")
if (start >= end)
throw new IllegalArgumentException(s"Invalid GTF2.2 line. Invalid start and end.\\n$line")
if (start < 0)
throw new IllegalArgumentException(s"Invalid GTF2.2 line. Invalid start position.\\n$line")
if (frame.isDefined && (frame.get < 0 || frame.get > 2))
throw new IllegalArgumentException(s"Invalid GTF2.2 line. Invalid frame.\\n$line")
// Compute lazy orientation and score so they get validated
val o = orientation
val s = score
if (!ignore) {
featureType match {
case _: MatureRNA =>
// Check for required attributes gene_id and transcript_id
if (!attributes.contains("gene_id"))
throw new IllegalArgumentException(s"Invalid GTF2.2 line. Must have gene_id attribute.\\n$line")
if (!attributes.contains("transcript_id"))
throw new IllegalArgumentException(s"Invalid GTF2.2 line. Must have transcript_id attribute.\\n$line")
case _ => Unit
}
}
}
}
/** True if the line represents a comment (starting with '#'), false otherwise. */
lazy val isComment: Boolean = line.startsWith("#")
/** True if the line is to be ignored, either because it is a comment or because
* the feature type is not supported (see [[http://mblab.wustl.edu/GTF22.html feature.format specification]]).
*/
lazy val ignore: Boolean = isComment || featureType == Ignore
/** The chromosome or reference sequence name. */
lazy val chr: String = tokens(0).replaceFirst("^chr", "")
/** The annotation source (see [[http://mblab.wustl.edu/GTF22.html feature.format specification]]). */
lazy val source: String = tokens(1)
/** The feature type. For valid values see [[http://mblab.wustl.edu/GTF22.html feature.format specification]]. */
lazy val featureType: FeatureType = FeatureType.forLabel(tokens(2))
/** Zero-based start position in reference coordinates, inclusive.
*
* GTF2.2 feature.format uses 1-based positions. This library uses 0-based positions.
* Therefore, this value has been converted to a 0-based position. The start position
* is the first position that is included in the feature.
*/
lazy val start: Int = Utils.zeroBasedInclusiveStart(tokens(3).toInt)
/** Zero-based end position in reference coordinates, exclusive.
*
* GTF2.2 feature.format uses 1-based positions, and furthermore, uses fully closed
* intervals so the end position is the last position included in the feature.
* This library uses 0-based positions and half open intervals, so the end
* position is the position after the last position included in the feature.
* Therefore, this value is equal to the integer in the GTF2.2 file, because
* it is theoretically converted twice for these two differences.
*
*/
lazy val end: Int = Utils.zeroBasedExclusiveEnd(tokens(4).toInt)
/** The floating point score, or None if '.' is specified in the line. */
lazy val score: Option[Float] = tokens(5) match {
case "." => None
case s: String => {
try {
Some(s.toFloat)
} catch {
case n: NumberFormatException => throw new IllegalArgumentException(s"Invalid GTF2 line. Invalid score.\\n$line")
}
}
}
/** The [[Orientation]].
*
* This value is equal to [[Plus]] if '+' is specified in the file or [[Minus]] if
* '-' is specified.
*
*/
lazy val orientation: Orientation = tokens(6) match {
case "+" => Plus
case "-" => Minus
case _ => throw new IllegalArgumentException(s"Invalid GTF2 orientation.\\n$line")
}
/** The frame, or None if '.' is specified in the file. */
lazy val frame: Option[Int] = tokens(7) match {
case "." => None
case s: String => Some(s.toInt)
}
/** Map of attribute name to value.
*
* Empty attributes (having value "") are not included in this map.
*
* See [[http://mblab.wustl.edu/GTF22.html feature.format specification]] for details on GTF2.2 attributes.
*
*/
lazy val attributes: Map[String, String] = {
// Map of attribute name to attribute value
val as = new scala.collection.mutable.HashMap[String, String]
// Attribute key/value pairs are separated by semicolons
tokens(8).split("; ").foreach(s => {
// Key and value are separated by a single space
val ss: Array[String] = s.split(" ")
if(ss.length < 2) throw new IllegalArgumentException(s"Invalid GTF2 attribute: $s\\n$line")
val k: String = ss.head
// There can only be one value between double quotes
if(ss.tail.mkString(" ").split("\\"").length > 2)
throw new IllegalArgumentException(s"Invalid GTF2 attribute spacing:\\n$line")
val v: String = ss.tail.mkString(" ").replace("\\"", "") // String values are between double quotes
if(k == "" || k.startsWith(" ")) throw new IllegalArgumentException(s"Invalid GTF2 attribute spacing: $s\\n$line")
if(v.startsWith(" ")) throw new IllegalArgumentException(s"Invalid GTF2 attribute spacing:\\n$line")
// Skip empty string attribute values
if(v != "") as.put(k, v)
})
// Convert to an immutable map
as.toMap
}
/** Transcript ID, or None if attribute value is "" */
lazy val transcriptId: Option[String] = attributes.get("transcript_id")
/** Gene ID, or None if attribute value is "" */
lazy val geneId: Option[String] = attributes.get("gene_id")
/** Returns a new [[FeatureBuilder]] consisting of the passed [[FeatureBuilder]] with
* the information in this [[GTF22Record]] added to it.
*
* The feature type must not be one of the CNS types (see [[http://mblab.wustl.edu/GTF22.html feature.format specification]])
* as these features are specified on a single GTF2.2 line and should not be incorporated into an existing
* [[FeatureBuilder]]. The feature type must also not be an ignored type.
*
* @param fb Original [[FeatureBuilder]] with the same transcript ID and gene ID as this record
* @return New [[FeatureBuilder]] with additional information from this record incorporated
*/
override def op(fb: FeatureBuilder): FeatureBuilder = {
// Only operate on the FeatureBuilder if it has the same transcript ID and gene ID as this record
if(transcriptId != fb.featureId)
throw new IllegalArgumentException(s"Transcript ID ($transcriptId) must be equal to existing feature ID (${fb.featureId})")
if(geneId != fb.geneId)
throw new IllegalArgumentException(s"Gene ID ($geneId) must be equal to existing gene ID (${fb.geneId})")
// Create the block for this record
val blk = Block(chr, start, end, orientation)
featureType match {
// Assume the CDS will be specified so don't do anything special with start codon, 5'-UTR, 3'-UTR
case Exon | StartCodon | UTR5 | UTR3 => fb.addBlock(blk)
// Include the stop codon in our CDS
case CDS | StopCodon =>
fb.addBlock(blk).setCdsStart(Utils.updateCdsStart(fb, start)).setCdsEnd(Utils.updateCdsEnd(fb, end))
// These feature types must have only one block
case Intergenic | IntergenicCNS | IntronicCNS | Ignore =>
throw new IllegalArgumentException("Only feature types representing parts of transcripts can be used" +
s"to update a ${classOf[FeatureBuilder].getName}. GTF2.2 line:\\n$line")
}
}
}
private object Utils {
/*
Update CDS start after encountering a new block.
Replace existing start if new start is less, or add new start if doesn't exist yet
*/
def updateCdsStart(fb: FeatureBuilder, ns: Int): Int = if(fb.cdsStart.isDefined) math.min(ns, fb.cdsStart.get) else ns
/*
Update CDS end after encountering a new block.
Replace existing end if new end is greater, or add new end if doesn't exist yet
*/
def updateCdsEnd(fb: FeatureBuilder, ne: Int): Int = if(fb.cdsEnd.isDefined) math.max(ne, fb.cdsEnd.get) else ne
def zeroBasedInclusiveStart(gtf2start: Int): Int = gtf2start - 1
def zeroBasedExclusiveEnd(gtf2end: Int): Int = gtf2end
}
/** A GTF2.2 feature type */
sealed trait FeatureType {val name: String}
/** A GTF2.2 feature type that represents part of a mature RNA transcript */
sealed trait MatureRNA extends FeatureType
/** GTF2.2 CDS feature type */
case object CDS extends MatureRNA {val name = "CDS"}
/** GTF2.2 start codon feature type */
case object StartCodon extends MatureRNA {val name = "start_codon"}
/** GTF2.2 stop codon feature type */
case object StopCodon extends MatureRNA {val name = "stop_codon"}
/** GTF2.2 5'-UTR feature type */
case object UTR5 extends MatureRNA {val name = "5UTR"}
/** GTF2.2 3'-UTR feature type */
case object UTR3 extends MatureRNA {val name = "3UTR"}
/** GTF2.2 intergenic feature type */
case object Intergenic extends FeatureType {val name = "inter"}
/** GTF2.2 intergenic CNS feature type */
case object IntergenicCNS extends FeatureType {val name = "inter_CNS"}
/** GTF2.2 intronic CNS feature type */
case object IntronicCNS extends FeatureType {val name = "intron_CNS"}
/** GTF2.2 exon feature type */
case object Exon extends MatureRNA {val name = "exon"}
/** Nonstandard feature type. See [[http://mblab.wustl.edu/GTF22.html feature.format specification]] for details. */
case object Ignore extends FeatureType {val name = "ignore"}
/** Utilities for GTF2.2 [[FeatureType]]s */
object FeatureType {
/** Returns the [[FeatureType]] specified by the GTF2.2 value
*
* @param label GTF2.2 feature type label
*/
def forLabel(label: String): FeatureType = {
label match {
case Exon.name => Exon
case CDS.name => CDS
case StartCodon.name => StartCodon
case StopCodon.name => StopCodon
case UTR5.name => UTR5
case UTR3.name => UTR3
case Intergenic.name => Intergenic
case IntergenicCNS.name => IntergenicCNS
case IntronicCNS.name => IntronicCNS
case _ => Ignore
}
}
}
|
pamelarussell/sgxlib
|
src/main/scala/feature/format/GTF22Record.scala
|
Scala
|
mit
| 11,045
|
package com.anchortab.snippet
import java.util.Locale
import scala.math._
import net.liftweb._
import common._
import http._
import LiftRules._
import rest._
import js._
import JE._
import JsExp._
import util._
import Helpers._
import json._
import Extraction._
import mongodb._
import BsonDSL._
import org.bson.types.ObjectId
import com.anchortab.model._
import com.anchortab.actor._
import com.newrelic.api.agent._
object Api extends RestHelper with Loggable with AccountDeletion {
private val localizationCache = new collection.mutable.HashMap[Locale, JObject] with collection.mutable.SynchronizedMap[Locale, JObject]
def statelessRewrite : RewritePF = {
case RewriteRequest(ParsePath("api" :: "v1" :: "user" :: userId :: "tabs" :: Nil, _, _, _), _, _) =>
RewriteResponse("api" :: "v1" :: "user" :: userId :: "tabs" :: "0" :: Nil)
}
serve {
//////////
// API "embed" resource.
//
// Calls in this namespace are used by the Tab itself to retrieve the information
// it needs to render, or to add an email address to the Tab's email list.
//////////
case req @ Req("api" :: "v1" :: "embed" :: tabId :: Nil, _, GetRequest) =>
{
NewRelic.setTransactionName("API", "/api/v1/embed")
NewRelic.addCustomParameter("tabId", tabId)
for {
tab <- (Tab.find(tabId):Box[Tab]) ?~ {
NewRelic.ignoreTransaction
"Unknown tab."
} ~> 404
user <- tab.user.filter(_.tabsActive_?) ?~ {
NewRelic.ignoreTransaction
"This tab has been disabled."
} ~> 403
plan = user.plan
callbackFnName <- req.param("callback") ?~ "Callback not specified." ~> 400
} yield {
val remoteIp = req.header("X-Forwarded-For") openOr req.remoteAddr
val userAgent = req.userAgent openOr "unknown"
val domain = req.header("X-Embedded-Domain")
Tab.update("_id" -> tab._id, "$inc" -> ("stats.views" -> 1))
if (user.firstSteps.get(UserFirstStep.Keys.EmbedYourTab).isDefined) {
User.update("_id" -> user._id,
"$unset" -> (
("firstSteps." + UserFirstStep.Keys.EmbedYourTab) -> true
)
)
}
User.update("_id" -> user._id,
"$inc" -> (
("quotaCounts." + Plan.Quotas.Views) -> 1
)
)
QuotasActor ! CheckQuotaCounts(user._id)
EventActor ! TrackEvent(Event(
Event.Types.TabView,
Some(remoteIp),
Some(userAgent),
Some(user._id),
Some(tab._id),
domain = domain
))
val whitelabelTab = tab.appearance.whitelabel && plan.hasFeature_?(Plan.Features.WhitelabeledTabs)
val colorScheme = {
if (tab.appearance.colorScheme.name == TabColorScheme.Custom.name && ! plan.hasFeature_?(Plan.Features.CustomColorSchemes)) {
TabColorScheme.Red
} else {
tab.appearance.colorScheme
}
}
val localizedContent = localizationCache.getOrElseUpdate(S.locale, {
List(
"tab-firstName",
"tab-emailAddress",
"tab-submit",
"tab-subscribe",
"tab-minimizeAnchorTab",
"tab-maximizeAnchorTab",
"tab-somethingWentWrong",
"tab-invalidEmail"
).map({ localizationKey =>
(localizationKey -> S.?(localizationKey))
}).foldLeft(JObject(Nil))(_ ~ _)
})
val tabJson =
("delay" -> tab.appearance.delay) ~
("colorScheme" -> decompose(colorScheme)) ~
("whitelabel" -> whitelabelTab) ~
("customText" -> tab.appearance.customText) ~
("submitButtonText" -> tab.appearance.customSubmitButtonText) ~
("mobileTabText" -> tab.appearance.customMobileTabText) ~
("collectName" -> tab.appearance.collectName) ~
("i18n" -> localizedContent)
Call(callbackFnName, tabJson)
}
}
// JSONP can't be semantic. :(
case req @ Req("api" :: "v1" :: "embed" :: tabId :: "submit" :: Nil, _, GetRequest) =>
{
NewRelic.setTransactionName("API", "/api/v1/embed/star/submit")
NewRelic.addCustomParameter("tabId", tabId)
for {
tab <- (Tab.find(tabId):Box[Tab]) ?~ {
NewRelic.ignoreTransaction
"Unknown tab."
} ~> 404
user <- tab.user.filter(_.tabsActive_?) ?~ {
NewRelic.ignoreTransaction
"This tab has been disabled."
} ~> 403
callbackFnName <- req.param("callback") ?~! "Callback not specified." ~> 400
email <- req.param("email").filter(_.trim.nonEmpty) ?~! "Email was not specified." ~> 400
name = req.param("name").map(_.trim).filter(_.nonEmpty)
} yield {
val remoteIp = req.header("X-Forwarded-For") openOr req.remoteAddr
val userAgent = req.userAgent openOr "unknown"
val domain = req.header("X-Embedded-Domain")
val submitResult = {
val successMessage = "tab-successConfirm"
val iFrameParameters =
tab.service.iFrameParameters.map(decompose _)
("success" -> 1) ~
("email" -> email) ~
("message" -> S.?(successMessage)) ~
("iFrame" -> iFrameParameters)
}
QuotasActor ! CheckQuotaCounts(user._id)
EventActor ! TrackEvent(Event(
Event.Types.TabSubmit,
Some(remoteIp),
Some(userAgent),
Some(user._id),
Some(tab._id),
domain = domain
))
ServiceWrapperSubmissionActor ! SubscribeEmailToServiceWrapper(tab, email, name)
Call(callbackFnName, submitResult)
}
}
//////////
// API "user" resource.
//////////
case Req("api" :: "v1" :: "user" :: Nil, _, GetRequest) =>
{
for {
user <- statelessUser.is
} yield {
user.asJson
}
} ?~ "Authentication Failed." ~> 401
//////////
// API "tab" resource
//////////
case Req("api" :: "v1" :: "tab" :: tabId :: Nil, _, GetRequest) =>
{
for {
currentUser <- statelessUser.is ?~ "Authentication Failed." ~> 401
possibleTab = (Tab.find(tabId):Box[Tab])
tab <- possibleTab.filter { tabInQuestion =>
tabInQuestion.userId == currentUser._id || currentUser.admin_?
} ?~ "Unknown tab." ~> 404
} yield {
tab.asJson
}
}
//////////
// API "admin" resource.
//////////
case req @ Req("api" :: "v1" :: "admin" :: "users" :: Nil, _, GetRequest) =>
{
for {
possibleAdminUser <- (statelessUser.is ?~ "Authentication Failed." ~> 401)
adminUser <- (Full(possibleAdminUser).filter(_.admin_?) ?~ "Not authorized." ~> 403)
} yield {
decompose(User.findAll.map(_.asJson))
}
}
case req @ Req("api" :: "v1" :: "admin" :: "users" :: Nil, _, PostRequest) =>
{
for {
possibleAdminUser <- (statelessUser.is ?~ "Authentication Failed." ~> 401)
adminUser <- (Full(possibleAdminUser).filter(_.admin_?) ?~ "Not authorized." ~> 403)
requestBody <- req.body ?~ "No request body." ~> 400
requestJson <- tryo(Serialization.read[JValue](new String(requestBody))) ?~! "Invalid JSON." ~> 400
email <- tryo(requestJson \\ "email").map(_.extract[String]) ?~ "Email is missing" ~> 400
password <- tryo(requestJson \\ "password").map(_.extract[String]) ?~ "Password is missing." ~> 400
} yield {
val user = User(email, User.hashPassword(password))
user.save
("id" -> user._id.toString):JObject
}
}
case req @ Req("api" :: "v1" :: "admin" :: "user" :: id :: Nil, _, GetRequest) =>
{
for {
possibleAdminUser <- (statelessUser.is ?~ "Authentication Failed." ~> 401)
adminUser <- (Full(possibleAdminUser).filter(_.admin_?) ?~ "Not authorized." ~> 403)
user <- (User.find(id):Box[User]) ?~! "User not found." ~> 404
} yield {
user.asJson
}
}
case req @ Req("api" :: "v1" :: "admin" :: "user" :: id :: Nil, _, DeleteRequest) =>
{
for {
possibleAdminUser <- (statelessUser.is ?~ "Authentication Failed." ~> 401)
adminUser <- (Full(possibleAdminUser).filter(_.admin_?) ?~ "Not authorized." ~> 403)
user <- (User.find(id):Box[User]) ?~! "User not found." ~> 404
//deleteResult <- deleteAccount(user)
} yield {
user.delete
OkResponse()
}
}
}
}
|
farmdawgnation/anchortab
|
src/main/scala/com/anchortab/snippet/Api.scala
|
Scala
|
apache-2.0
| 8,963
|
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package parser
import com.comcast.ip4s._
import org.http4s.headers.Origin
import org.http4s.syntax.all._
class OriginHeaderSuite extends munit.FunSuite {
val host1 = Origin.Host(Uri.Scheme.http, Uri.RegName("www.foo.com"), Some(12345))
val host2 = Origin.Host(Uri.Scheme.https, Uri.Ipv4Address(ipv4"127.0.0.1"), None)
val hostString1 = "http://www.foo.com:12345"
val hostString2 = "https://127.0.0.1"
test("Origin value method should Render a host with a port number") {
val origin: Origin = host1
assertEquals(origin.value, hostString1)
}
test("Origin value method should Render a host without a port number") {
val origin: Origin = host2
assertEquals(origin.value, hostString2)
}
test("Origin value method should Render an empty origin") {
val origin: Origin = Origin.`null`
assertEquals(origin.value, "null")
}
test("OriginHeader parser should Parse a host with a port number") {
val text = hostString1
val origin = host1
val headers = Headers(("Origin", text))
val extracted = headers.get[Origin]
assertEquals(extracted, Some(origin))
}
test("OriginHeader parser should Parse a host without a port number") {
val text = hostString2
val origin = host2
val headers = Headers(("Origin", text))
val extracted = headers.get[Origin]
assertEquals(extracted, Some(origin))
}
test("OriginHeader parser should Parse a 'null' origin") {
val text = "null"
val origin = Origin.`null`
val headers = Headers(("Origin", text))
val extracted = headers.get[Origin]
assertEquals(extracted, Some(origin))
}
test("OriginHeader should fail on a list of multiple hosts") {
val text = s"$hostString1 $hostString2"
val headers = Headers(("Origin", text))
val extracted = headers.get[Origin]
assertEquals(extracted, None)
}
test("OriginHeader should fail on an empty string") {
val text = ""
val headers = Headers(("Origin", text))
val extracted = headers.get[Origin]
assertEquals(extracted, None)
}
}
|
http4s/http4s
|
tests/shared/src/test/scala/org/http4s/parser/OriginHeaderSuite.scala
|
Scala
|
apache-2.0
| 2,661
|
package com.clarifi.reporting.ermine
import com.clarifi.reporting.Supply
import com.clarifi.reporting.ermine.Kind.{ zipKinds, kindVars, subKind }
import com.clarifi.reporting.ermine.Type.{ typeVars, allTypeVars }
import scala.collection.immutable.List
/** A type annotation with possible (pseudo)-existential holes
*
* @author EAK, DJD
*/
case class Annot(loc: Loc, eksists: List[KindVar], exists: List[V[Kind]], body: Type) extends Located {
def map(f: Type => Type): Annot = Annot(loc, eksists, exists, f(body))
def close(implicit su: Supply): Annot = {
val nbody = body.closeWith(exists).nf
val dks = (kindVars(exists) ++ kindVars(nbody) -- eksists).toList
val ndks = refreshList(Bound, loc, dks)
val km = zipKinds(dks, ndks)
Annot(loc, eksists ++ ndks, subKind(km, exists), subKind(km, nbody))
}
def subst(ks: PartialFunction[V[Unit],Kind], ts: PartialFunction[V[Kind],Type]): Annot =
Annot(loc, eksists, exists map { v => v map { k => k subst ks } }, body.subst(ks, ts))
}
object Annot {
val annotAny: Annot = {
val v = V(Loc.builtin,-1,None,Bound,Star(Loc.builtin))
Annot(Loc.builtin, List(), List(v), VarT(v))
}
def plain(l: Loc, ty: Type): Annot = Annot(l, List(), List(), ty)
implicit def relocatableAnnot: Relocatable[Annot] = new Relocatable[Annot] {
def setLoc(a: Annot, l: Loc) = Annot(l, a.eksists, a.exists, a.body)
}
implicit def annotHasTypeVars: HasTypeVars[Annot] = new HasTypeVars[Annot] {
def vars(a: Annot) = typeVars(a.body) -- a.exists
def allVars(a: Annot) = allTypeVars(a.body) -- a.exists
def sub(ks: PartialFunction[KindVar, Kind], ts: PartialFunction[TypeVar, Type], a: Annot): Annot = a.subst(ks, ts)
}
}
|
ermine-language/ermine-legacy
|
src/main/scala/com/clarifi/reporting/ermine/Annot.scala
|
Scala
|
bsd-2-clause
| 1,718
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
/** A subclass of `HashMap` that systematically rejects `null` keys and values.
*
* This class is used as the implementation of some other hashtable-like data
* structures that require non-`null` keys and values to correctly implement
* their specifications.
*/
private[util] class NullRejectingHashMap[K, V](
initialCapacity: Int, loadFactor: Float)
extends HashMap[K, V](initialCapacity, loadFactor) {
def this() =
this(HashMap.DEFAULT_INITIAL_CAPACITY, HashMap.DEFAULT_LOAD_FACTOR)
def this(initialCapacity: Int) =
this(initialCapacity, HashMap.DEFAULT_LOAD_FACTOR)
def this(m: Map[_ <: K, _ <: V]) = {
this(m.size())
putAll(m)
}
// Use Nodes that will reject `null`s in `setValue()`
override private[util] def newNode(key: K, hash: Int, value: V,
previous: HashMap.Node[K, V], next: HashMap.Node[K, V]): HashMap.Node[K, V] = {
new NullRejectingHashMap.Node(key, hash, value, previous, next)
}
override def get(key: Any): V = {
if (key == null)
throw new NullPointerException()
super.get(key)
}
override def containsKey(key: Any): Boolean = {
if (key == null)
throw new NullPointerException()
super.containsKey(key)
}
override def put(key: K, value: V): V = {
if (key == null || value == null)
throw new NullPointerException()
super.put(key, value)
}
override def putIfAbsent(key: K, value: V): V = {
if (value == null)
throw new NullPointerException()
val old = get(key) // throws if `key` is null
if (old == null)
super.put(key, value)
old
}
@noinline
override def putAll(m: Map[_ <: K, _ <: V]): Unit = {
/* The only purpose of `impl` is to capture the wildcards as named types,
* so that we prevent type inference from inferring deprecated existential
* types.
*/
@inline
def impl[K1 <: K, V1 <: V](m: Map[K1, V1]): Unit = {
val iter = m.entrySet().iterator()
while (iter.hasNext()) {
val entry = iter.next()
put(entry.getKey(), entry.getValue())
}
}
impl(m)
}
override def remove(key: Any): V = {
if (key == null)
throw new NullPointerException()
super.remove(key)
}
override def remove(key: Any, value: Any): Boolean = {
val old = get(key) // throws if `key` is null
if (old != null && old.equals(value)) { // false if `value` is null
super.remove(key)
true
} else {
false
}
}
override def replace(key: K, oldValue: V, newValue: V): Boolean = {
if (oldValue == null || newValue == null)
throw new NullPointerException()
val old = get(key) // throws if `key` is null
if (oldValue.equals(old)) { // false if `old` is null
super.put(key, newValue)
true
} else {
false
}
}
override def replace(key: K, value: V): V = {
if (value == null)
throw new NullPointerException()
val old = get(key) // throws if `key` is null
if (old != null)
super.put(key, value)
old
}
override def containsValue(value: Any): Boolean = {
if (value == null)
throw new NullPointerException()
super.containsValue(value)
}
override def clone(): AnyRef =
new NullRejectingHashMap[K, V](this)
}
private object NullRejectingHashMap {
private final class Node[K, V](key: K, hash: Int, value: V,
previous: HashMap.Node[K, V], next: HashMap.Node[K, V])
extends HashMap.Node[K, V](key, hash, value, previous, next) {
override def setValue(v: V): V = {
if (v == null)
throw new NullPointerException()
super.setValue(v)
}
}
}
|
scala-js/scala-js
|
javalib/src/main/scala/java/util/NullRejectingHashMap.scala
|
Scala
|
apache-2.0
| 3,925
|
package org.elasticmq
import org.joda.time.{Duration, DateTime}
case class QueueData(
name: String,
defaultVisibilityTimeout: MillisVisibilityTimeout,
delay: Duration,
receiveMessageWait: Duration,
created: DateTime,
lastModified: DateTime,
deadLettersQueue: Option[DeadLettersQueueData] = None,
isFifo: Boolean = false,
hasContentBasedDeduplication: Boolean = false,
copyMessagesTo: Option[String] = None,
moveMessagesTo: Option[String] = None,
tags: Map[String, String] = Map[String, String]()
)
case class DeadLettersQueueData(name: String, maxReceiveCount: Int)
|
adamw/elasticmq
|
core/src/main/scala/org/elasticmq/QueueData.scala
|
Scala
|
apache-2.0
| 617
|
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.guacamole.commands
import org.bdgenomics.guacamole.TestUtil
import org.bdgenomics.guacamole.TestUtil.SparkFunSuite
import org.bdgenomics.guacamole.pileup.Pileup
import org.scalatest.Matchers
class SomaticPoCIndelCallerSuite extends SparkFunSuite with Matchers {
test("no indels") {
val normalReads = Seq(
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0)
)
val normalPileup = Pileup(normalReads, 2)
val tumorReads = Seq(
TestUtil.makeRead("TCGGTCGA", "8M", "3G4", 0),
TestUtil.makeRead("TCGGTCGA", "8M", "3G4", 0),
TestUtil.makeRead("TCGGTCGA", "8M", "3G4", 0)
)
val tumorPileup = Pileup(tumorReads, 2)
SomaticPoCIndelCaller.callSimpleIndelsAtLocus(tumorPileup, normalPileup).size should be(0)
}
test("single-base deletion") {
val normalReads = Seq(
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0))
val normalPileup = Pileup(normalReads, 2)
val tumorReads = Seq(
TestUtil.makeRead("TCGTCGA", "3M1D4M", "3^A4", 0),
TestUtil.makeRead("TCGTCGA", "3M1D4M", "3^A4", 0),
TestUtil.makeRead("TCGTCGA", "3M1D4M", "3^A4", 0))
val tumorPileup = Pileup(tumorReads, 2)
val genotypes = SomaticPoCIndelCaller.callSimpleIndelsAtLocus(tumorPileup, normalPileup)
genotypes.size should be(1)
val genotype = genotypes(0)
val variant = genotype.getVariant
variant.getReferenceAllele should be("GA")
variant.getAlternateAllele should be("G")
}
test("multiple-base deletion") {
val normalReads = Seq(
TestUtil.makeRead("TCGAAGCTTCGAAGCT", "16M", "16", 0),
TestUtil.makeRead("TCGAAGCTTCGAAGCT", "16M", "16", 0),
TestUtil.makeRead("TCGAAGCTTCGAAGCT", "16M", "16", 0)
)
val normalPileup = Pileup(normalReads, 4)
val tumorReads = Seq(
TestUtil.makeRead("TCGAAAAGCT", "5M6D5M", "5^GCTTCG5", 0),
TestUtil.makeRead("TCGAAAAGCT", "5M6D5M", "5^GCTTCG5", 0),
TestUtil.makeRead("TCGAAAAGCT", "5M6D5M", "5^GCTTCG5", 0)
)
val tumorPileup = Pileup(tumorReads, 4)
val genotypes = SomaticPoCIndelCaller.callSimpleIndelsAtLocus(tumorPileup, normalPileup)
genotypes.size should be(1)
val genotype = genotypes(0)
val variant = genotype.getVariant
variant.getReferenceAllele should be("AGCTTCG")
variant.getAlternateAllele should be("A")
}
test("single-base insertion") {
val normalReads = Seq(
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0)
)
val normalPileup = Pileup(normalReads, 2)
val tumorReads = Seq(
TestUtil.makeRead("TCGAGTCGA", "4M1I4M", "8", 0),
TestUtil.makeRead("TCGAGTCGA", "4M1I4M", "8", 0),
TestUtil.makeRead("TCGAGTCGA", "4M1I4M", "8", 0)
)
val tumorPileup = Pileup(tumorReads, 3)
val genotypes = SomaticPoCIndelCaller.callSimpleIndelsAtLocus(tumorPileup, normalPileup)
genotypes.size should be(1)
val genotype = genotypes(0)
val variant = genotype.getVariant
variant.getReferenceAllele should be("A")
variant.getAlternateAllele should be("AG")
}
test("multiple-base insertion") {
val normalReads = Seq(
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0),
TestUtil.makeRead("TCGATCGA", "8M", "8", 0)
)
val tumorReads = Seq(
TestUtil.makeRead("TCGAGGTCTCGA", "4M4I4M", "8", 0),
TestUtil.makeRead("TCGAGGTCTCGA", "4M4I4M", "8", 0),
TestUtil.makeRead("TCGAGGTCTCGA", "4M4I4M", "8", 0)
)
val genotypes = SomaticPoCIndelCaller.callSimpleIndelsAtLocus(Pileup(tumorReads, 3), Pileup(normalReads, 3))
genotypes.size should be(1)
val genotype = genotypes(0)
val variant = genotype.getVariant
variant.getReferenceAllele should be("A")
variant.getAlternateAllele should be("AGGTC")
}
test("insertions and deletions") {
/*
idx: 01234 56 7890123456
ref: TCGAA TC GATCGATCGA
seq: TC ATCTCAAAAGA GATCGA
*/
val normalReads = Seq(
TestUtil.makeRead("TCGAATCGATCGATCGA", "17M", "17", 10),
TestUtil.makeRead("TCGAATCGATCGATCGA", "17M", "17", 10),
TestUtil.makeRead("TCGAATCGATCGATCGA", "17M", "17", 10)
)
val tumorReads = Seq(
TestUtil.makeRead("TCATCTCAAAAGAGATCGA", "2M2D1M2I2M4I2M2D6M", "2^GA5^TC6", 10),
TestUtil.makeRead("TCATCTCAAAAGAGATCGA", "2M2D1M2I2M4I2M2D6M", "2^GA5^TC6", 10),
TestUtil.makeRead("TCATCTCAAAAGAGATCGA", "2M2D1M2I2M4I2M2D6M", "2^GA5^TC6", 10)
)
def testLocus(locus: Int, refBases: String, altBases: String) = {
val tumorPileup = Pileup(tumorReads, locus)
val normalPileup = Pileup(normalReads, locus)
val genotypes = SomaticPoCIndelCaller.callSimpleIndelsAtLocus(tumorPileup, normalPileup)
genotypes.size should be(1)
val genotype = genotypes(0)
val variant = genotype.getVariant
variant.getReferenceAllele should be(refBases)
variant.getAlternateAllele should be(altBases)
}
testLocus(11, "CGA", "C")
testLocus(14, "A", "ATC")
testLocus(16, "C", "CAAAA")
testLocus(18, "ATC", "A")
}
}
|
ryan-williams/guacamole
|
src/test/scala/org/bdgenomics/guacamole/commands/SomaticPoCIndelCallerSuite.scala
|
Scala
|
apache-2.0
| 6,173
|
package leo.modules.agent.rules
package control_rules
import leo.Configuration
import leo.datastructures.{AnnotatedClause, Clause, Signature}
import leo.datastructures.blackboard.{DataType, Delta}
import leo.modules.GeneralState
/**
* This rule selects a clause from Unprocessed
* to be processed next.
*/
class SelectionRule(inType : DataType[AnnotatedClause],
outType : DataType[AnnotatedClause],
canSelectNext : () => Boolean,
unprocessed : UnprocessedSet,
blockable : Seq[DataType[Any]] = Seq()
)
(implicit state : GeneralState[AnnotatedClause])
extends Rule{
private val maxRound = try {Configuration.valueOf("ll").get.head.toInt} catch {case e : Exception => -1}
var actRound = 0
var work : Int = 0
val maxWork : Int = try {Configuration.valueOf("nSelect").get.head.toInt} catch {case e : Exception => 1}
override val name: String = "selection_rule"
override val inTypes: Seq[DataType[Any]] = inType +: blockable
override val outTypes: Seq[DataType[Any]] = Seq(outType)
override def canApply(r: Delta): Seq[Hint] = {
unprocessed.synchronized{
work -= r.removes(inType).size // TODO Save selected and only delete those
val canSelect = canSelectNext()
if(work == 0 && canSelect && unprocessed.unprocessedLeft){
if(actRound >= maxRound && maxRound > 0){
leo.Out.debug(s"[Selection] (Round = ${actRound}) : Maximum number of iterations reached.")
return Seq()
}
state.incProofLoopCount()
actRound += 1
var res : Seq[Hint] = Seq()
while(work < maxWork && unprocessed.unprocessedLeft){
val c = unprocessed.nextUnprocessed
if (Clause.effectivelyEmpty(c.cl)) {
return Seq()
} else {
work += 1
leo.Out.debug(s"[Selection] (Round = ${actRound}) : ${c.pretty(state.signature)}")
res = new MoveHint(c, inType, outType) +: res
}
}
res
} else {
// println(s"UnprocessedLeft : ${unprocessed.unprocessedLeft}\\n work : ${work}\\n canSelect : ${canSelect}")
Seq()
}
}
}
}
|
lex-lex/Leo-III
|
oldsrc/main/scala/leo/modules/agent/rules/control_rules/SelectionRule.scala
|
Scala
|
bsd-3-clause
| 2,234
|
package com.harrys.hyppo.worker.actor.amqp
import java.util.UUID
import akka.testkit.TestActorRef
import com.harrys.hyppo.worker.actor.RabbitMQTests
import com.harrys.hyppo.worker.api.proto.CreateIngestionTasksRequest
import com.harrys.hyppo.worker.{TestConfig, TestObjects}
/**
* Created by jpetty on 9/17/15.
*/
class QueueProxyActorTests extends RabbitMQTests("QueueProxyActorTests", TestConfig.coordinatorWithRandomQueuePrefix()) {
val injector = TestConfig.localCoordinatorInjector(system, config)
"The Queue Proxy" must {
val proxy = TestActorRef(injector.getInstance(classOf[EnqueueWorkQueueProxy]), "enqueue")
"successfully enqueue messages" in {
val source = TestObjects.testIngestionSource(name = "queue proxy")
val integration = TestObjects.testProcessedDataIntegration(source)
val testJob = TestObjects.testIngestionJob(source)
val work = CreateIngestionTasksRequest(integration, UUID.randomUUID(), Seq(), testJob)
val queueName = naming.integrationWorkQueueName(work)
proxy ! work
within(config.rabbitMQTimeout){
awaitAssert(helpers.checkQueueSize(connection, queueName) shouldBe 1)
}
}
}
}
|
harrystech/hyppo-worker
|
worker/src/test/scala/com/harrys/hyppo/worker/actor/amqp/QueueProxyActorTests.scala
|
Scala
|
mit
| 1,208
|
package emmy.distribution
import emmy.autodiff._
import scalaz.Scalaz.Id
trait NormalFactor[U[_], S] extends Factor with Node {
def mu: Expression[U, Double, S]
def sigma: Expression[U, Double, S]
def variable: Expression[U, Double, S]
override def parents = Seq(mu, sigma)
override lazy val logp: Expression[Id, Double, Any] = {
implicit val ops = variable.ops
val x = (variable - mu) / sigma
sum(-(log(sigma) + x * x / 2.0)).toDouble
}
}
class UnitNormalSample[U[_], S](implicit
val ops: ContainerOps.Aux[U, S],
val so: ScalarOps[U[Double], U[Double]],
val vt: Evaluable[ValueOps[U, Double, S]]
)
extends Expression[U, Double, S] {
override def eval(ec: GradientContext): Evaluable[U[Double]] = {
ctx ⇒
{
val valueT = vt(ctx)
valueT.rnd
}
}
}
class NormalSample[U[_], S](
val mu: Expression[U, Double, S],
val sigma: Expression[U, Double, S]
)(implicit val ops: ContainerOps.Aux[U, S])
extends ContinuousVariable[U, S] with NormalFactor[U, S] {
override val variable: NormalSample[U, S] = this
override val vt: Evaluable[ValueOps[U, Double, S]] =
mu.vt
override val so: ScalarOps[U[Double], U[Double]] =
ScalarOps.liftBoth[U, Double, Double](ScalarOps.doubleOps, ops)
private val unit = new UnitNormalSample()(ops, so, vt)
private val upstream = mu + unit * sigma
override def eval(ec: GradientContext): Evaluable[U[Double]] = {
ec(upstream)
}
override def grad[W[_], T](gc: GradientContext, v: Parameter[W, T]): Gradient[W, U] = {
gc(upstream, v)
}
override def toString: String = {
s"~ N($mu, $sigma)"
}
}
case class Normal[U[_], S](
mu: Expression[U, Double, S],
sigma: Expression[U, Double, S]
)(implicit ops: ContainerOps.Aux[U, S])
extends Distribution[U, Double, S] {
override def sample: ContinuousVariable[U, S] =
new NormalSample(mu, sigma)
def factor(v: Expression[U, Double, S]) = {
val self = this
new NormalFactor[U, S] {
override val mu: Expression[U, Double, S] = self.mu
override val sigma: Expression[U, Double, S] = self.sigma
override val variable: Expression[U, Double, S] = v
}
}
override def observe(data: U[Double]): Observation[U, Double, S] =
new NormalObservation(mu, sigma, data)
class NormalObservation private[Normal] (
val mu: Expression[U, Double, S],
val sigma: Expression[U, Double, S],
val value: Evaluable[U[Double]]
)(implicit val ops: ContainerOps.Aux[U, S])
extends Observation[U, Double, S] with NormalFactor[U, S] {
override val variable: NormalObservation = this
override val vt: Evaluable[ValueOps[U, Double, S]] =
mu.vt
override val so: ScalarOps[U[Double], U[Double]] =
ScalarOps.liftBoth[U, Double, Double](ScalarOps.doubleOps, ops)
override def toString: String = {
s"($value <- N($mu, $sigma))"
}
}
}
|
fvlankvelt/emmy
|
src/main/scala/emmy/distribution/Normal.scala
|
Scala
|
apache-2.0
| 2,998
|
package org.leebli.parser.jar
import java.io.File
import java.io.FileInputStream
import org.junit.Test
import junit.framework.Assert
class JarParserTestCase {
@Test
def testParseJarOnly() {
val cl = ClassLoader.getSystemClassLoader();
val urls = System.getProperty("java.class.path").split(File.pathSeparator);
println(urls.mkString(","))
val jarUrl = urls find (_.contains("parser-jar-testproject"))
Assert.assertTrue(jarUrl.isDefined)
val jarFile = JarParser.parseJarFile("parser-jar-testproject.jar",
Some(10), new FileInputStream(jarUrl.head))
Assert.assertEquals(jarFile.name, "parser-jar-testproject.jar")
Assert.assertEquals(jarFile.size, Some(10))
Assert.assertFalse(jarFile.classes.isEmpty)
Assert.assertEquals(jarFile.classes.size, 4)
Assert.assertTrue(jarFile.mavenInfo.isDefined)
val mavenInfo = jarFile.mavenInfo.head
Assert.assertEquals(mavenInfo.groupId, "org.leebli.parsers")
Assert.assertEquals(mavenInfo.artifactId, "parser-jar-testproject")
Assert.assertEquals(mavenInfo.version, "1.0")
println(jarFile.classes.mkString(","))
}
@Test
def testParseJarAndClasses() {
val cl = ClassLoader.getSystemClassLoader();
val urls = System.getProperty("java.class.path").split(File.pathSeparator);
println(urls.mkString(","))
val jarUrl = urls find (_.contains("parser-jar-testproject"))
Assert.assertTrue(jarUrl.isDefined)
val jarFile = JarParser.parseJarFile("parser-jar-testproject.jar",
Some(10), new FileInputStream(jarUrl.head), true)
Assert.assertEquals(jarFile.name, "parser-jar-testproject.jar")
Assert.assertEquals(jarFile.size, Some(10))
Assert.assertFalse(jarFile.classes.isEmpty)
Assert.assertEquals(jarFile.classes.size, 4)
Assert.assertTrue(jarFile.mavenInfo.isDefined)
val mavenInfo = jarFile.mavenInfo.head
Assert.assertEquals(mavenInfo.groupId, "org.leebli.parsers")
Assert.assertEquals(mavenInfo.artifactId, "parser-jar-testproject")
Assert.assertEquals(mavenInfo.version, "1.0")
println(jarFile.classes.mkString(","))
val javaClass = jarFile.classes find (_.name == "org.leebli.parser.bytecode.test.TestAttribute")
Assert.assertTrue(javaClass.isDefined)
}
}
|
jonathan-macke/leebli
|
parsers/archive-parser/src/test/scala/org/leebli/parser/jar/JarParserTestCase.scala
|
Scala
|
apache-2.0
| 2,267
|
/*
* Copyright (c) 2014-2019 Israel Herraiz <isra@herraiz.org>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// ---------------------
// Test for example 3.13
// ---------------------
package chap03
import org.specs2.mutable._
object Ex13Spec extends Specification {
"The foldRight function" should {
"behave like foldLeft with empty lists" in {
Ex13.foldRight(Nil: List[Int], 0)(_+_) mustEqual Ex10.foldLeft(Nil: List[Int],0)(_+_)
}
"behave like List's foldRight with empty lists" in {
Ex13.foldRight(Nil: List[Int], 0)(_+_) mustEqual (Nil: List[Int]).foldRight(0)(_+_)
}
"behave like foldLeft reversedly with non-empty lists" in {
Ex13.foldRight(List(1,3,5,4),Nil: List[Int]) {
(x,y) => x :: y
} mustEqual Ex10.foldLeft(List(4,5,3,1),Nil: List[Int]) {
(x,y) => y :: x
}
}
"behave like List's foldRight with non-empty lists" in {
Ex13.foldRight(List(1,2),List(3,4)) {
(x,y) => x :: y
} mustEqual (List(1,2)).foldRight(List(3,4)) {
(x,y) => x :: y
}
}
}
"The foldLeft function" should {
"behave like foldLeft (ex. 10) with empty lists" in {
Ex13.foldLeft(Nil: List[Int], 0)(_+_) mustEqual Ex10.foldLeft(Nil: List[Int],0)(_+_)
}
"behave like foldLeft (ex. 10) with non-empty lists" in {
Ex13.foldLeft(List(1,3,5,4),Nil: List[Int]) {
(x,y) => y :: x
} mustEqual Ex10.foldLeft(List(1,3,5,4),Nil: List[Int]) {
(x,y) => y :: x
}
}
"behave like List's foldLeft with empty lists" in {
Ex13.foldLeft(Nil: List[Int], 0)(_+_) mustEqual (Nil: List[Int]).foldLeft(0)(_+_)
}
"behave like List's foldLeft with non-empty lists" in {
Ex13.foldLeft(List(1,2),List(3,4)) {
(x,y) => y :: x
} mustEqual (List(1,2)).foldLeft(List(3,4)) {
(x,y) => y :: x
}
}
}
}
|
iht/fpinscala
|
src/test/scala/chap03/ex13Spec.scala
|
Scala
|
mit
| 2,931
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.HmrcAccountingPeriod
import uk.gov.hmrc.ct.ct600.v2.calculations.CorporationTaxCalculator
import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever
case class E1033(value: Int) extends CtBoxIdentifier("First Financial Year") with CtInteger
object E1033 extends CorporationTaxCalculator with Calculated[E1033, CT600EBoxRetriever] {
override def calculate(fieldValueRetriever: CT600EBoxRetriever): E1033 = {
E1033(financialYear1(HmrcAccountingPeriod(fieldValueRetriever.e1021(), fieldValueRetriever.e1022)))
}
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E1033.scala
|
Scala
|
apache-2.0
| 1,226
|
package com.sksamuel.elastic4s
sealed trait Response[+U] {
/**
* Returns the http status code of the response.
*/
def status: Int
/**
* Returns the body included in the HTTP response or None if this response did not include a body.
*
* @return
*/
def body: Option[String]
/**
* Returns any HTTP headers that were included in the response.
*/
def headers: Map[String, String]
/**
* Returns the marshalled response U if this is an instance of [[RequestSuccess]], otherwise
* throws an exception.
*/
def result: U
/**
* Returns error details as an instance of [[ElasticError]] if this is [[RequestFailure]].
* Otherwise throws an exception.
*/
def error: ElasticError
/**
* Returns true if this response is an error state.
*
* @return
*/
def isError: Boolean
/**
* Returns true if this response was successful
*/
final def isSuccess: Boolean = !isError
def map[V](f: U => V): Response[V]
def flatMap[V](f: U => Response[V]): Response[V]
final def fold[V](ifError: => V)(f: U => V): V = if (isError) ifError else f(result)
final def fold[V](onError: RequestFailure => V, onSuccess: U => V): V = this match {
case failure: RequestFailure => onError(failure)
case RequestSuccess(_, _, _, result) => onSuccess(result)
}
final def foreach[V](f: U => V): Unit = if (!isError) f(result)
final def toOption: Option[U] = if (isError) None else Some(result)
final def toEither: Either[ElasticError, U] = if (isError) Left(error) else Right(result)
}
case class RequestSuccess[U](override val status: Int, // the http status code of the response
override val body: Option[String], // the http response body if the response included one
override val headers: Map[String, String], // any http headers included in the response
override val result: U) extends Response[U] {
override def isError = false
override def error = throw new NoSuchElementException(s"Request success $result")
final def map[V](f: U => V): Response[V] = RequestSuccess(status, body, headers, f(result))
final def flatMap[V](f: U => Response[V]): Response[V] = f(result)
}
case class RequestFailure(override val status: Int, // the http status code of the response
override val body: Option[String], // the http response body if the response included one
override val headers: Map[String, String], // any http headers included in the response
override val error: ElasticError) extends Response[Nothing] {
override def result = throw new NoSuchElementException(s"Request Failure $error")
override def isError = true
final def map[V](f: Nothing => V): Response[V] = this
final def flatMap[V](f: Nothing => Response[V]): Response[V] = this
}
|
sksamuel/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/responses.scala
|
Scala
|
apache-2.0
| 2,921
|
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package fov
import com.anathema_roguelike.main.display.DisplayCellTransformation
abstract class LightLevelShader(lightLevels: LightLevels) extends DisplayCellTransformation {
protected def getLightLevels: LightLevels = lightLevels
}
|
carlminden/anathema-roguelike
|
src/com/anathema_roguelike/fov/LightLevelShader.scala
|
Scala
|
gpl-3.0
| 1,114
|
package com.datawizards.dmg.customizations
import com.datawizards.dmg.TestModel._
import com.datawizards.dmg.dialects.{H2Dialect, HiveDialect, MySQLDialect, RedshiftDialect}
import com.datawizards.dmg.generator.HiveGenerator
import com.datawizards.dmg.{DataModelGenerator, DataModelGeneratorBaseTest}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class UnderscoreConversionTest extends DataModelGeneratorBaseTest {
test("Underscore conversion - H2") {
val expected =
"""CREATE TABLE person_with_underscore_with_multiple_names(
| person_name VARCHAR,
| person_age INT
|);""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[PersonWithUnderscoreWithMultipleNames](H2Dialect)
}
}
test("Underscore conversion - Hive") {
val expected =
"""CREATE TABLE PEOPLE(
| name STRING,
| person_age INT
|)
|TBLPROPERTIES( 'MODEL_GENERATOR_METADATA_HASH' = '1071146412')
|;""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[PersonWithUnderscoreWithMultipleNames](new HiveGenerator)
}
}
test("Underscore conversion - Redshift") {
val expected =
"""CREATE TABLE person_with_underscore_with_multiple_names(
| name VARCHAR,
| age INTEGER
|);""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[PersonWithUnderscoreWithMultipleNames](RedshiftDialect)
}
}
test("Underscore conversion - MySQL") {
val expected =
"""CREATE TABLE PersonWithUnderscoreWithMultipleNames(
| personName VARCHAR,
| personAge INT
|);""".stripMargin
assertResultIgnoringNewLines(expected) {
DataModelGenerator.generate[PersonWithUnderscoreWithMultipleNames](MySQLDialect)
}
}
}
|
mateuszboryn/data-model-generator
|
src/test/scala/com/datawizards/dmg/customizations/UnderscoreConversionTest.scala
|
Scala
|
apache-2.0
| 1,935
|
package is.hail.utils.prettyPrint
import java.io.{StringWriter, Writer}
import java.util.ArrayDeque
import is.hail.utils.ArrayBuilder
import scala.annotation.tailrec
object Doc {
def render(doc: Doc, width: Int, ribbonWidth: Int, _maxLines: Int, out: Writer): Unit = {
// All groups whose formatting is still undetermined. The innermost group is at the end.
// A group which has been determined to be non-flat is popped from the front, even if the
// group is still open. This is safe because top-level Lines (not in any group) always print
// as newlines.
// Each GroupN contains the contents of the group that has been scanned so far.
val pendingGroups = new ArrayDeque[GroupN]
// Represents the rest of the document past the node currently being scanned.
val kont = new ArrayDeque[KontNode]
// The current position in the document, if it had been formatted entirely in one line.
// This is only used to take the difference between two globalPos values, to determine
// the length of a group if it is formatted flat.
var globalPos: Int = 0
val maxLines = if (_maxLines > 0) _maxLines else Int.MaxValue
var lines: Int = 0
var remainingInLine: Int = math.min(width, ribbonWidth)
var indentation: Int = 0
var currentNode = doc
// Group openings and closes are deferred until the next Line. This forces any Text to be
// considered part of the group containing the previous Line. For opens, this is a slight
// performance optimization, because text at the beginning of a group can be written eagerly,
// regardless of how the group is formatted. For closes, this is more correct: if text
// immediately follows the end of a group, then the group may fit on the current line, yet
// the following text would exceed the max width.
var pendingOpens: Int = 0
var pendingCloses: Int = 0
def scan(node: ScannedNode, size: Int): Unit = {
globalPos += size
if (pendingGroups.isEmpty) {
printNode(node, false)
} else {
pendingGroups.getLast.contents += node
while (!pendingGroups.isEmpty && globalPos - pendingGroups.getFirst.start > remainingInLine) {
val head = pendingGroups.removeFirst()
head.end = globalPos
printNode(head, false)
}
}
}
// Process the top of kont until a non-empty ConcatK is found; move the first contained node
// to currentNode.
@tailrec def advance(): Unit = {
if (kont.isEmpty) {
currentNode = null
} else {
kont.peek() match {
case ConcatK(k) =>
if (k.isEmpty) {
kont.pop()
advance()
} else {
currentNode = k.next()
}
case PopGroupK =>
if (pendingOpens > 0) pendingOpens -= 1 else pendingCloses += 1
kont.pop()
advance()
case UnindentK(i) =>
indentation -= i
kont.pop()
advance()
}
}
}
def printNode(node: ScannedNode, flatten: Boolean): Unit = node match {
case TextN(t) =>
remainingInLine -= t.length
out.write(t)
case LineN(i, ifFlat: String) =>
if (flatten) {
remainingInLine -= ifFlat.length
out.write(ifFlat)
} else {
lines += 1
if (lines >= maxLines) throw new MaxLinesExceeded()
out.write('\n')
out.write(" " * i)
remainingInLine = math.min(width - i, ribbonWidth)
}
case GroupN(contents, start, stop) =>
val h = stop - start <= remainingInLine
var i = 0
while (i < contents.size) {
printNode(contents(i), h)
i += 1
}
}
def closeGroups(): Unit =
while (pendingCloses > 0) {
if (pendingGroups.isEmpty) {
pendingCloses = 0
return
}
val last = pendingGroups.removeLast()
last.end = globalPos
if (pendingGroups.isEmpty) {
printNode(last, true)
} else {
pendingGroups.getLast.contents += last
}
pendingCloses -= 1
}
def openGroups(): Unit = {
while (pendingOpens > 0) {
pendingGroups.addLast(GroupN(new ArrayBuilder[ScannedNode](), globalPos, -1))
pendingOpens -= 1
}
}
try {
while (currentNode != null) {
currentNode match {
case Text(t) =>
scan(TextN(t), t.length)
advance()
case Line(ifFlat) =>
closeGroups()
openGroups()
scan(LineN(indentation, ifFlat), ifFlat.length)
advance()
case Group(body) =>
kont.push(PopGroupK)
pendingOpens += 1
currentNode = body
case Indent(i, body) =>
indentation += i
kont.push(UnindentK(i))
currentNode = body
case Concat(bodyIt) =>
kont.push(ConcatK(bodyIt.iterator))
advance()
}
}
closeGroups()
} catch {
case _: MaxLinesExceeded =>
// 'maxLines' have been printed, so break out of the loop and stop printing.
}
}
}
abstract class Doc {
def render(width: Int, ribbonWidth: Int, maxLines: Int, out: Writer): Unit =
Doc.render(this, width, ribbonWidth, maxLines, out)
def render(width: Int, ribbonWidth: Int, maxLines: Int): String = {
val out = new StringWriter()
render(width, ribbonWidth, maxLines, out)
out.toString
}
}
private[prettyPrint] case class Text(t: String) extends Doc
private[prettyPrint] case class Line(ifFlat: String) extends Doc
private[prettyPrint] case class Group(body: Doc) extends Doc
private[prettyPrint] case class Indent(i: Int, body: Doc) extends Doc
private[prettyPrint] case class Concat(it: Iterable[Doc]) extends Doc
private[prettyPrint] abstract class ScannedNode
private[prettyPrint] case class TextN(t: String) extends ScannedNode
private[prettyPrint] case class LineN(indentation: Int, ifFlat: String) extends ScannedNode
private[prettyPrint] case class GroupN(contents: ArrayBuilder[ScannedNode], start: Int, var end: Int) extends ScannedNode
private[prettyPrint] abstract class KontNode
private[prettyPrint] case object PopGroupK extends KontNode
private[prettyPrint] case class UnindentK(indent: Int) extends KontNode
private[prettyPrint] case class ConcatK(kont: Iterator[Doc]) extends KontNode
private[prettyPrint] class MaxLinesExceeded() extends Exception
|
danking/hail
|
hail/src/main/scala/is/hail/utils/prettyPrint/PrettyPrintWriter.scala
|
Scala
|
mit
| 6,555
|
package recipestore.db.tinkerpop
import com.google.common.base.Strings
/**
* DSE as of when this class is created, still lacks Gremlin Fluent Api.Hence
* a bunch of helper classes for some common DSE Graph operations.
*
*/
object GremlinQueryFactory {
def addVertexScript(label: String, propertyMap: Map[String, AnyRef]): String = {
val propertyChain = if (propertyMap == null) ""
else propertyMap
.filterNot(entry => entry._1.toLowerCase().equals("label"))
.filterNot(_._2 == null)
.filterNot(e => e._1.startsWith("id"))
//Not a mistake.We are not interpolating actual value, rather
// use the binding mechanism available in DSE graph api
.map(e => s"'${e._1}', ${e._1}")
.mkString(",")
val propertyValuesString = if (Strings.isNullOrEmpty(propertyChain))
""
else s",$propertyChain"
s"g.addV(label,'$label'$propertyValuesString)"
}
def addEdgeScript(edgeLabel: String, propertyMap: Map[String, AnyRef] = Map()) = {
val propertyChain = if (propertyMap == null || propertyMap.size == 0) ""
else
"," + propertyMap
.filterNot(e => e._1.equals("id"))
.filterNot(e => e._1.toLowerCase().equals("label"))
.filter(_._2 != null)
.map(e => s"'${e._1}', ${e._1}")
.mkString(",")
s"""def v1 = g.V().has('resourceId',id1).next()\n def v2 = g.V().has('resourceId',id2).next()\n v1.addEdge('$edgeLabel', v2 $propertyChain)"""
}
}
|
prad-a-RuntimeException/semantic-store
|
src/main/scala/recipestore/db/tinkerpop/GremlinQueryFactory.scala
|
Scala
|
mit
| 1,467
|
package com.sksamuel.avro4s.schemas
import com.sksamuel.avro4s.SchemaFor
import org.apache.avro.{Schema, SchemaBuilder}
import scala.quoted.Quotes
import scala.quoted.Expr
import scala.quoted.Type
object ScalaEnums:
inline def schema[T]: SchemaFor[T] = ${ schema }
def schema[T:Type](using quotes: Quotes): Expr[SchemaFor[T]] =
import quotes.reflect.*
val tpe = TypeRepr.of[T]
val t = tpe.typeSymbol.tree
println(t)
println(tpe.toString)
'{ new SchemaFor[T] {
println("hello")
override def schema: Schema = SchemaBuilder.builder().intType()
} }
|
sksamuel/avro4s
|
avro4s-core/src/main/scala/com/sksamuel/avro4s/schemas/scalaenums.scala
|
Scala
|
apache-2.0
| 593
|
/*
* Copyright 2011-2018 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.nta.common.taxonomy
import java.net.URI
import eu.cdevreeze.tqa.base.dom.TaxonomyDocument
import eu.cdevreeze.tqa.base.dom.XsdSchema
import eu.cdevreeze.tqa.base.taxonomy.BasicTaxonomy
import eu.cdevreeze.tqa.base.taxonomybuilder.DocumentCollector
import eu.cdevreeze.tqa.docbuilder.DocumentBuilder
import eu.cdevreeze.yaidom.queryapi.BackingDocumentApi
/**
* A taxonomy, holding a collection of DTSes (fitting entirely in that taxonomy).
*
* It does not know anything about the part of the taxonomy that must be validated by NTA validators.
*
* Note that memory usage may be quite large, so by all means use DOM-like trees that use Saxon tiny trees or
* some other thread-safe low footprint DOM-like element implementation.
*
* @author Chris de Vreeze
*/
final class Taxonomy private (
val universeTaxonomy: BasicTaxonomy,
val dtsMap: Map[Set[URI], BasicTaxonomy]) {
assert(dtsMap.forall { case (ep, taxo) => ep.subsetOf(taxo.taxonomyBase.taxonomyDocUriMap.keySet) })
assert(dtsMap.values.forall(dts =>
dts.taxonomyBase.taxonomyDocUriMap.keySet.subsetOf(universeTaxonomy.taxonomyBase.taxonomyDocUriMap.keySet)))
def findAllDocumentUris: Set[URI] = {
universeTaxonomy.taxonomyBase.taxonomyDocUriMap.keySet
}
def findDts(entrypoint: Set[URI]): Option[BasicTaxonomy] = {
dtsMap.get(entrypoint)
}
def getDts(entrypoint: Set[URI]): BasicTaxonomy = {
findDts(entrypoint).getOrElse(sys.error(s"No DTS found for entrypoint ${entrypoint.mkString(", ")}"))
}
def findDocument(uri: URI): Option[TaxonomyDocument] = {
universeTaxonomy.taxonomyBase.taxonomyDocUriMap.get(uri)
}
def getDocument(uri: URI): TaxonomyDocument = {
findDocument(uri).getOrElse(sys.error(s"No document found with URI '$uri'"))
}
def isEntrypointDocument(doc: TaxonomyDocument): Boolean = {
dtsMap.keySet.exists(_.contains(doc.uri))
}
def isEntrypointSchemaDocument(doc: TaxonomyDocument): Boolean = {
doc.documentElement.isInstanceOf[XsdSchema] && isEntrypointDocument(doc)
}
def filterEntrypointsReturningCombinedDtsAsUriSet(p: Set[URI] => Boolean): Set[URI] = {
dtsMap.filterKeys(p)
.values.flatMap { dts =>
dts.taxonomyBase.taxonomyDocUriMap.keySet
}.toSet
}
}
object Taxonomy {
def build(
universeTaxonomy: BasicTaxonomy,
dtsDocumentCollector: DocumentCollector,
entrypoints: Set[Set[URI]]): Taxonomy = {
val docBuilder = new DocBuilder(universeTaxonomy)
val dtsMap: Map[Set[URI], BasicTaxonomy] =
entrypoints.toSeq.map { entrypointUris =>
val dtsUris: Set[URI] =
dtsDocumentCollector.collectTaxonomyDocuments(entrypointUris, docBuilder).map(_.uri).toSet
val dts: BasicTaxonomy = universeTaxonomy.filteringDocumentUris(dtsUris)
entrypointUris -> dts
}.toMap
new Taxonomy(universeTaxonomy, dtsMap)
}
/**
* Very fast DocumentBuilder that only looks up documents in the passed "universe taxonomy", instead of parsing.
*/
final class DocBuilder(val universeTaxonomy: BasicTaxonomy) extends DocumentBuilder {
type BackingDoc = BackingDocumentApi
def build(uri: URI): BackingDocumentApi = {
universeTaxonomy.taxonomyBase.taxonomyDocUriMap
.getOrElse(uri, sys.error(s"Missing document with URI '$uri'"))
.backingDocument
}
}
}
|
dvreeze/nta
|
src/main/scala/eu/cdevreeze/nta/common/taxonomy/Taxonomy.scala
|
Scala
|
apache-2.0
| 3,958
|
package edu.cmu.lti.nlp.amr.ConceptInvoke
import edu.cmu.lti.nlp.amr._
import edu.cmu.lti.nlp.amr.Train._
import edu.cmu.lti.nlp.amr.BasicFeatureVector._
import java.io.File
import java.io.FileOutputStream
import java.io.PrintStream
import java.io.BufferedOutputStream
import java.io.OutputStreamWriter
import java.lang.Math.abs
import java.lang.Math.log
import java.lang.Math.exp
import java.lang.Math.random
import java.lang.Math.floor
import java.lang.Math.min
import java.lang.Math.max
import scala.io.Source
import scala.util.matching.Regex
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.collection.mutable.ArrayBuffer
abstract class Decoder(featureNames: List[String]) {
val features = new Features(featureNames) // maybe this should be renamed ff?
def decode(input: Input) : DecoderResult
}
|
avikalpg/jamr
|
src/ConceptInvoke/Decoder.scala
|
Scala
|
bsd-2-clause
| 848
|
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv
package laws
trait VersionSpecificReaderEngineLaws { self: ReaderEngineLaws =>
def withFilter(csv: List[List[Cell]], f: List[Cell] => Boolean): Boolean =
asReader(csv).withFilter(f).toList == asReader(csv).filter(f).toList
def toStream(csv: List[List[Cell]]): Boolean =
asReader(csv).toStream == csv.toStream
def toTraversable(csv: List[List[Cell]]): Boolean =
asReader(csv).toTraversable == csv.toTraversable
def toIterator(csv: List[List[Cell]]): Boolean =
asReader(csv).toIterator.sameElements(csv.toIterator)
}
|
nrinaudo/scala-csv
|
laws/shared/src/main/scala-2.12/kantan/csv/laws/VersionSpecificReaderEngineLaws.scala
|
Scala
|
mit
| 1,163
|
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.common.partitions
import org.locationtech.geomesa.fs.storage.api.PartitionScheme.SimplifiedFilter
import org.locationtech.geomesa.fs.storage.api.{NamedOptions, PartitionScheme, PartitionSchemeFactory}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
object FlatScheme extends PartitionScheme {
override val depth: Int = 0
override def getPartitionName(feature: SimpleFeature): String = ""
override def getSimplifiedFilters(filter: Filter, partition: Option[String]): Option[Seq[SimplifiedFilter]] =
Some(Seq(SimplifiedFilter(filter, Seq(""), partial = false)))
class FlatPartitionSchemeFactory extends PartitionSchemeFactory {
override def load(sft: SimpleFeatureType, config: NamedOptions): Option[PartitionScheme] =
if (config.name.equalsIgnoreCase("flat")) { Some(FlatScheme) } else { None }
}
}
|
elahrvivaz/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-common/src/main/scala/org/locationtech/geomesa/fs/storage/common/partitions/FlatScheme.scala
|
Scala
|
apache-2.0
| 1,391
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master.ui
import org.apache.spark.Logging
import org.apache.spark.deploy.master.Master
import org.apache.spark.status.api.v1.{ApiRootResource, ApplicationsListResource, ApplicationInfo,
UIRoot}
import org.apache.spark.ui.{SparkUI, WebUI}
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.RpcUtils
/**
* Web UI server for the standalone master.
*/
private[master]
class MasterWebUI(val master: Master, requestedPort: Int)
extends WebUI(master.securityMgr, requestedPort, master.conf, name = "MasterUI") with Logging
with UIRoot {
val masterActorRef = master.self
val timeout = RpcUtils.askTimeout(master.conf)
val killEnabled = master.conf.getBoolean("spark.ui.killEnabled", true)
val masterPage = new MasterPage(this)
initialize()
/** Initialize all components of the server. */
def initialize() {
val masterPage = new MasterPage(this)
attachPage(new ApplicationPage(this))
attachPage(new HistoryNotFoundPage(this))
attachPage(masterPage)
attachHandler(createStaticHandler(MasterWebUI.STATIC_RESOURCE_DIR, "/static"))
attachHandler(ApiRootResource.getServletHandler(this))
attachHandler(createRedirectHandler(
"/app/kill", "/", masterPage.handleAppKillRequest, httpMethods = Set("POST")))
attachHandler(createRedirectHandler(
"/driver/kill", "/", masterPage.handleDriverKillRequest, httpMethods = Set("POST")))
}
/** Attach a reconstructed UI to this Master UI. Only valid after bind(). */
def attachSparkUI(ui: SparkUI) {
assert(serverInfo.isDefined, "Master UI must be bound to a server before attaching SparkUIs")
ui.getHandlers.foreach(attachHandler)
}
/** Detach a reconstructed UI from this Master UI. Only valid after bind(). */
def detachSparkUI(ui: SparkUI) {
assert(serverInfo.isDefined, "Master UI must be bound to a server before detaching SparkUIs")
ui.getHandlers.foreach(detachHandler)
}
def getApplicationInfoList: Iterator[ApplicationInfo] = {
val state = masterPage.getMasterState
val activeApps = state.activeApps.sortBy(_.startTime).reverse
val completedApps = state.completedApps.sortBy(_.endTime).reverse
activeApps.iterator.map { ApplicationsListResource.convertApplicationInfo(_, false) } ++
completedApps.iterator.map { ApplicationsListResource.convertApplicationInfo(_, true) }
}
def getSparkUI(appId: String): Option[SparkUI] = {
val state = masterPage.getMasterState
val activeApps = state.activeApps.sortBy(_.startTime).reverse
val completedApps = state.completedApps.sortBy(_.endTime).reverse
(activeApps ++ completedApps).find { _.id == appId }.flatMap {
master.rebuildSparkUI
}
}
}
private[master] object MasterWebUI {
private val STATIC_RESOURCE_DIR = SparkUI.STATIC_RESOURCE_DIR
}
|
andrewor14/iolap
|
core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala
|
Scala
|
apache-2.0
| 3,633
|
/*
* Grammar of Graphics in Scala
* Copyright (c) 2011, ggscala.org
*/
package org.ggscala.model
import org.scalatest._
import flatspec._
import matchers._
import org.ggscala.test.TestUtils
import org.ggscala.model.DataFrame._
class DataFrameSpec extends AnyFlatSpec with should.Matchers {
"A MemoryDataFrame" should "be simple to construct" in
{
val data = Array( 1.0, 2.0, 3.0 )
// make data frame with a single numeric column
val dataFrame = MemoryDataFrame( d("values",data) )
dataFrame.ncol should be (1)
dataFrame.names.toArray should equal ( Array("values") )
}
it should "be simple to construct with multiple columns" in
{
val data1 = Array( 1.0, 2.0, 3.0 )
val data2 = Array( "A", "B", "C" )
// make data frame with two columns
val dataFrame = MemoryDataFrame( d("values",data1), s("letters",data2) )
dataFrame.ncol should be (2)
val expectNames = Array( "values", "letters" )
dataFrame.names.toArray should equal (expectNames)
}
it should "be row-bindable" in
{
// construct start state
val (data1,data2,data3,data4) = ( Array( 1.0, 2.0, 3.0 ),
Array( "A", "B", "C" ),
Array( 4.0, 5.0, 6.0 ),
Array( "A", "B", "C" )
)
val dataFrame1 = MemoryDataFrame( d("values",data1), s("letters",data2) )
val dataFrame2 = MemoryDataFrame( d("values",data3), s("letters",data4) )
// action
val dataFrame3 = dataFrame1.rbind(dataFrame2)
// asserts
val expectNames = Array( "values", "letters" )
val expectData1 = Array( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0 )
val expectData2 = Array( "A", "B", "C", "A", "B", "C" )
dataFrame3.ncol should be (2)
dataFrame3.names.toArray should equal (expectNames)
dataFrame3.$d("values").toArray should equal (expectData1)
dataFrame3.$s("letters").toArray should equal (expectData2)
}
it should "support row iteration" in
{
val (data1,data2) = (Array( 1.0, 2.0, 3.0 ),Array( "A", "B", "C" ))
val dataFrame = MemoryDataFrame( d("values",data1), s("letters",data2) )
var count = 0
for( row <- dataFrame.rowIterator )
{
row.length should be (2)
// TODO assert casts (might be better way to do this with ScalaTest)
count += 1
}
count should be (3)
}
}
object DataFrameSpec {
def main(args:Array[String]) =
{
TestUtils.timing {
(new DataFrameSpec).execute()
}
}
}
|
drkeoni/ggscala
|
src/test/scala/org/ggscala/model/DataFrameSpec.scala
|
Scala
|
mit
| 2,419
|
package org.jetbrains.plugins.scala
package lang
package references
import java.util
import java.util.Collections
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.module.{Module, ModuleUtilCore}
import com.intellij.openapi.roots.ModuleRootManager
import com.intellij.openapi.util.{Condition, TextRange}
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.patterns.PlatformPatterns
import com.intellij.psi._
import com.intellij.psi.impl.source.resolve.reference.impl.providers.{FileReference, FileReferenceSet}
import com.intellij.util.ProcessingContext
import com.intellij.util.containers.ContainerUtil
import org.jetbrains.annotations.NotNull
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScInterpolationPattern
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScInterpolatedStringLiteral, ScLiteral}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.expr.ScInterpolatedStringPartReference
import scala.collection.JavaConversions
class ScalaReferenceContributor extends PsiReferenceContributor {
def registerReferenceProviders(registrar: PsiReferenceRegistrar) {
registrar.registerReferenceProvider(PlatformPatterns.psiElement(classOf[ScLiteral]), new FilePathReferenceProvider())
registrar.registerReferenceProvider(PlatformPatterns.psiElement(classOf[ScLiteral]), new InterpolatedStringReferenceProvider())
}
}
class InterpolatedStringReferenceProvider extends PsiReferenceProvider {
override def getReferencesByElement(element: PsiElement, context: ProcessingContext): Array[PsiReference] = {
element match {
case _: ScInterpolatedStringLiteral => Array.empty
case l: ScLiteral if (l.isString || l.isMultiLineString) && l.getText.contains("$") =>
val interpolated = ScalaPsiElementFactory.createExpressionFromText("s" + l.getText, l.getContext)
interpolated.getChildren.filter {
case _: ScInterpolatedStringPartReference => false
case _: ScReferenceExpression => true
case _ => false
}.map {
case ref: ScReferenceExpression =>
new PsiReference {
override def getVariants: Array[AnyRef] = Array.empty
override def getCanonicalText: String = ref.getCanonicalText
override def getElement: PsiElement = l
override def isReferenceTo(element: PsiElement): Boolean = ref.isReferenceTo(element)
override def bindToElement(element: PsiElement): PsiElement = ref
override def handleElementRename(newElementName: String): PsiElement = ref
override def isSoft: Boolean = true
override def getRangeInElement: TextRange = {
val range = ref.getTextRange
val startOffset = interpolated.getTextRange.getStartOffset + 1
new TextRange(range.getStartOffset - startOffset, range.getEndOffset - startOffset)
}
override def resolve(): PsiElement = null
}
}
case _ => Array.empty
}
}
}
// todo: Copy of the corresponding class from IDEA, changed to use ScLiteral rather than PsiLiteralExpr
class FilePathReferenceProvider extends PsiReferenceProvider {
private val LOG: Logger = Logger.getInstance("#org.jetbrains.plugins.scala.lang.references.FilePathReferenceProvider")
@NotNull def getRoots(thisModule: Module, includingClasses: Boolean): java.util.Collection[PsiFileSystemItem] = {
if (thisModule == null) return Collections.emptyList[PsiFileSystemItem]
val modules: java.util.List[Module] = new util.ArrayList[Module]
modules.add(thisModule)
var moduleRootManager: ModuleRootManager = ModuleRootManager.getInstance(thisModule)
ContainerUtil.addAll(modules, moduleRootManager.getDependencies: _*)
val result: java.util.List[PsiFileSystemItem] = new java.util.ArrayList[PsiFileSystemItem]
val psiManager: PsiManager = PsiManager.getInstance(thisModule.getProject)
if (includingClasses) {
val libraryUrls: Array[VirtualFile] = moduleRootManager.orderEntries.getAllLibrariesAndSdkClassesRoots
for (file <- libraryUrls) {
val directory: PsiDirectory = psiManager.findDirectory(file)
if (directory != null) {
result.add(directory)
}
}
}
for (module <- JavaConversions.iterableAsScalaIterable(modules)) {
moduleRootManager = ModuleRootManager.getInstance(module)
val sourceRoots: Array[VirtualFile] = moduleRootManager.getSourceRoots
for (root <- sourceRoots) {
val directory: PsiDirectory = psiManager.findDirectory(root)
if (directory != null) {
val aPackage: PsiPackage = JavaDirectoryService.getInstance.getPackage(directory)
if (aPackage != null && aPackage.name != null) {
try {
val createMethod = Class.forName("com.intellij.psi.impl.source.resolve.reference.impl.providers.PackagePrefixFileSystemItemImpl").getMethod("create", classOf[PsiDirectory])
createMethod.setAccessible(true)
createMethod.invoke(directory)
} catch {
case t: Exception => LOG.warn(t)
}
}
else {
result.add(directory)
}
}
}
}
result
}
@NotNull def getReferencesByElement(element: PsiElement, text: String, offset: Int, soft: Boolean): Array[PsiReference] = {
new FileReferenceSet(text, element, offset, this, true, myEndingSlashNotAllowed) {
protected override def isSoft: Boolean = soft
override def isAbsolutePathReference: Boolean = true
override def couldBeConvertedTo(relative: Boolean): Boolean = !relative
override def absoluteUrlNeedsStartSlash: Boolean = {
val s: String = getPathString
s != null && s.length > 0 && s.charAt(0) == '/'
}
@NotNull override def computeDefaultContexts: java.util.Collection[PsiFileSystemItem] = {
val module: Module = ModuleUtilCore.findModuleForPsiElement(getElement)
getRoots(module, includingClasses = true)
}
override def createFileReference(range: TextRange, index: Int, text: String): FileReference = {
FilePathReferenceProvider.this.createFileReference(this, range, index, text)
}
protected override def getReferenceCompletionFilter: Condition[PsiFileSystemItem] = {
new Condition[PsiFileSystemItem] {
def value(element: PsiFileSystemItem): Boolean = {
isPsiElementAccepted(element)
}
}
}
}.getAllReferences.map(identity)
}
override def acceptsTarget(@NotNull target: PsiElement): Boolean = {
target.isInstanceOf[PsiFileSystemItem]
}
protected def isPsiElementAccepted(element: PsiElement): Boolean = {
!(element.isInstanceOf[PsiJavaFile] && element.isInstanceOf[PsiCompiledElement])
}
protected def createFileReference(referenceSet: FileReferenceSet, range: TextRange, index: Int, text: String): FileReference = {
new FileReference(referenceSet, range, index, text)
}
def getReferencesByElement(element: PsiElement, context: ProcessingContext): Array[PsiReference] = {
element match {
case interpolated: ScInterpolationPattern =>
val refs = interpolated.getReferencesToStringParts
val start: Int = interpolated.getTextRange.getStartOffset
return refs.flatMap{ r =>
val offset = r.getElement.getTextRange.getStartOffset - start
getReferencesByElement(r.getElement, r.getElement.getText, offset, soft = true)}
case interpolatedString: ScInterpolatedStringLiteral =>
val refs = interpolatedString.getReferencesToStringParts
val start: Int = interpolatedString.getTextRange.getStartOffset
return refs.flatMap{ r =>
val offset = r.getElement.getTextRange.getStartOffset - start
getReferencesByElement(r.getElement, r.getElement.getText, offset, soft = true)
}
case literal: ScLiteral =>
literal.getValue match {
case text: String =>
if (text == null) return PsiReference.EMPTY_ARRAY
return getReferencesByElement(element, text, 1, soft = true)
case _ =>
}
case _ =>
}
PsiReference.EMPTY_ARRAY
}
private final val myEndingSlashNotAllowed: Boolean = false
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/references/ScalaReferenceContributor.scala
|
Scala
|
apache-2.0
| 8,559
|
package org.jetbrains.plugins.scala
package codeInspection.typeChecking
import java.util.Comparator
import com.intellij.codeInsight.PsiEquivalenceUtil
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.codeInspection.typeChecking.TypeCheckCanBeMatchInspection.{inspectionId, inspectionName}
import org.jetbrains.plugins.scala.codeInspection.typeChecking.TypeCheckToMatchUtil._
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnTwoPsiElements, AbstractInspection}
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, inWriteAction}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaRecursiveElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScPattern}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScExistentialClause, ScTypeElement, ScTypeElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScNamedElement
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.SyntheticNamedElement
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
import org.jetbrains.plugins.scala.lang.psi.{ScalaPsiElement, ScalaPsiUtil}
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.NameSuggester
import org.jetbrains.plugins.scala.lang.refactoring.util.{InplaceRenameHelper, ScalaVariableValidator}
import scala.annotation.tailrec
import scala.collection.mutable
/**
* Nikolay.Tropin
* 5/6/13
*/
object TypeCheckCanBeMatchInspection {
val inspectionId = "TypeCheckCanBeMatch"
val inspectionName = "Type check can be replaced by pattern matching"
}
class TypeCheckCanBeMatchInspection extends AbstractInspection(inspectionId, inspectionName){
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case IsInstanceOfCall(call) =>
for {
ifStmt <- Option(PsiTreeUtil.getParentOfType(call, classOf[ScIfStmt]))
condition <- ifStmt.condition
iioCall <- findIsInstanceOfCalls(condition, onlyFirst = true)
if iioCall == call
if typeCheckIsUsedEnough(ifStmt, call)
} {
val fix = new TypeCheckCanBeMatchQuickFix(call, ifStmt)
holder.registerProblem(call, inspectionName, ProblemHighlightType.GENERIC_ERROR_OR_WARNING, fix)
}
}
private def typeCheckIsUsedEnough(ifStmt: ScIfStmt, isInstOf: ScGenericCall): Boolean = {
val chainSize = listOfIfAndIsInstOf(ifStmt, isInstOf, onlyFirst = true).size
val typeCastsNumber = findAsInstOfCalls(ifStmt.condition, isInstOf).size + findAsInstOfCalls(ifStmt.thenBranch, isInstOf).size
chainSize > 1 || typeCastsNumber > 0
}
}
class TypeCheckCanBeMatchQuickFix(isInstOfUnderFix: ScGenericCall, ifStmt: ScIfStmt)
extends AbstractFixOnTwoPsiElements(inspectionName, isInstOfUnderFix, ifStmt) {
def doApplyFix(project: Project) {
val isInstOf = getFirstElement
val ifSt = getSecondElement
if (!ifSt.isValid || !isInstOf.isValid) return
val (matchStmtOption, renameData) = buildMatchStmt(ifSt, isInstOf, onlyFirst = true)
for (matchStmt <- matchStmtOption) {
val newMatch = inWriteAction {
ifSt.replaceExpression(matchStmt, removeParenthesis = true).asInstanceOf[ScMatchStmt]
}
if (!ApplicationManager.getApplication.isUnitTestMode) {
val renameHelper = new InplaceRenameHelper(newMatch)
setElementsForRename(newMatch, renameHelper, renameData)
renameHelper.startRenaming()
}
}
}
}
object TypeCheckToMatchUtil {
type RenameData = collection.mutable.ArrayBuffer[(Int, Seq[String])]
def buildMatchStmt(ifStmt: ScIfStmt, isInstOfUnderFix: ScGenericCall, onlyFirst: Boolean): (Option[ScMatchStmt], RenameData) = {
import ifStmt.projectContext
baseExpr(isInstOfUnderFix) match {
case Some(expr: ScExpression) =>
val matchedExprText = expr.getText
val (caseClausesText, renameData) = buildCaseClausesText(ifStmt, isInstOfUnderFix, onlyFirst)
val matchStmtText = s"$matchedExprText match { \n " + caseClausesText + "}"
val matchStmt = createExpressionFromText(matchStmtText).asInstanceOf[ScMatchStmt]
(Some(matchStmt), renameData)
case _ => (None, null)
}
}
private def buildCaseClauseText(ifStmt: ScIfStmt, isInstOf: ScGenericCall, caseClauseIndex: Int, renameData: RenameData): Option[String] = {
var definedName: Option[String] = None
var definition: Option[ScPatternDefinition] = None
//method for finding and saving named type cast
def checkAndStoreNameAndDef(asInstOfCall: ScGenericCall): Boolean = {
ScalaPsiUtil.getContextOfType(asInstOfCall, strict = true, classOf[ScPatternDefinition]) match {
case patternDef: ScPatternDefinition =>
val bindings = patternDef.bindings
//pattern consist of one assignment of asInstanceOf call
if (bindings.size == 1 && patternDef.expr.get == asInstOfCall) {
definition match {
//store first occurence of pattern definition and name
case Some(oldDef) if oldDef.getTextOffset < patternDef.getTextOffset => true
case _ =>
definedName = Some(bindings.head.getName)
definition = Some(patternDef)
true
}
} else false
case null => false
}
}
def typeNeedParentheses(typeElem: ScTypeElement): Boolean = {
PsiTreeUtil.getChildOfType(typeElem, classOf[ScExistentialClause]) != null
}
for {
args <- isInstOf.typeArgs
condition <- ifStmt.condition
if args.typeArgs.size == 1
} yield {
val typeElem = args.typeArgs.head
val typeName0 = typeElem.getText
val typeName =
if (typeNeedParentheses(typeElem)) s"($typeName0)"
else typeName0
val asInstOfInBody = findAsInstOfCalls(ifStmt.thenBranch, isInstOf)
val guardCond = guardCondition(condition, isInstOf)
val asInstOfInGuard = findAsInstOfCalls(guardCond, isInstOf)
val asInstOfEverywhere = asInstOfInBody ++ asInstOfInGuard
implicit val projectContext = ifStmt.projectContext
if (asInstOfInBody.count(checkAndStoreNameAndDef) == 0) {
//no usage of asInstanceOf
if (asInstOfEverywhere.isEmpty) {
buildCaseClauseText("_ : " + typeName, guardCond, ifStmt.thenBranch, ifStmt.getProject)
}
//no named usage
else {
val suggestedNames = NameSuggester.suggestNames(asInstOfEverywhere.head)(
new ScalaVariableValidator(ifStmt, false, ifStmt.getParent, ifStmt.getParent)
)
val name = suggestedNames.head
asInstOfEverywhere.foreach { c =>
val newExpr = createExpressionFromText(name)
inWriteAction {
c.replaceExpression(newExpr, removeParenthesis = true)
}
}
renameData += ((caseClauseIndex, suggestedNames.toSeq))
buildCaseClauseText(s"$name : $typeName", guardCond, ifStmt.thenBranch, ifStmt.getProject)
}
}
//have named usage, use this name in case clause pattern definition
else {
//deleting unnecessary val declaration
val patternDef = definition.get
inWriteAction {
patternDef.delete()
}
val name = definedName.get
val newExpr = createExpressionFromText(name)
inWriteAction {
asInstOfEverywhere.foreach(_.replaceExpression(newExpr, removeParenthesis = true))
}
buildCaseClauseText(s"$name : $typeName", guardCond, ifStmt.thenBranch, ifStmt.getProject)
}
}
}
private def buildDefaultCaseClauseText(body: Option[ScExpression], project: Project): Option[String] = {
Some(buildCaseClauseText("_ ", None, body, project))
}
private def buildCaseClauseText(patternText: String, guardCondition: Option[ScExpression],
body: Option[ScExpression], project: Project): String = {
val builder = new StringBuilder
builder.append("case ").append(patternText)
guardCondition.map(cond => builder.append(" if " + cond.getText))
val arrow = ScalaPsiUtil.functionArrow(project)
builder.append(s" $arrow")
body match {
case Some(block: ScBlock) =>
for (elem <- block.children) {
val elementType: IElementType = elem.getNode.getElementType
if (elementType != ScalaTokenTypes.tLBRACE && elementType != ScalaTokenTypes.tRBRACE)
builder.append(elem.getText)
}
case Some(expr: ScExpression) => builder.append(expr.getText)
case None =>
}
if (!builder.last.isWhitespace) builder.append("\n")
builder.toString()
}
def listOfIfAndIsInstOf(currentIfStmt: ScIfStmt, currentCall: ScGenericCall, onlyFirst: Boolean): List[(ScIfStmt, ScGenericCall)] = {
for (currentBase <- baseExpr(currentCall)) {
currentIfStmt.elseBranch match {
case Some(nextIfStmt: ScIfStmt) =>
for {
nextCond <- nextIfStmt.condition
nextCall <- findIsInstanceOfCalls(nextCond, onlyFirst)
nextBase <- baseExpr(nextCall)
if equiv(currentBase, nextBase)
} {
return (currentIfStmt, currentCall) :: listOfIfAndIsInstOf(nextIfStmt, nextCall, onlyFirst)
}
return (currentIfStmt, currentCall) :: Nil
case _ => return (currentIfStmt, currentCall) :: Nil
}
}
Nil
}
private def buildCaseClausesText(ifStmt: ScIfStmt, isInstOfUnderFix: ScGenericCall, onlyFirst: Boolean): (String, RenameData) = {
implicit val project = ifStmt.getProject
val builder = new StringBuilder
val (ifStmts, isInstOf) = listOfIfAndIsInstOf(ifStmt, isInstOfUnderFix, onlyFirst).unzip
val renameData = new RenameData()
for {
index <- ifStmts.indices
text <- buildCaseClauseText(ifStmts(index), isInstOf(index), index, renameData)
} {
builder.append(text)
}
if (ifStmts != Nil) {
val lastElse = ifStmts.last.elseBranch
val defaultText: Option[String] = buildDefaultCaseClauseText(lastElse, project)
defaultText.foreach(builder.append)
}
(builder.toString(), renameData)
}
@tailrec
def findIsInstanceOfCalls(condition: ScExpression, onlyFirst: Boolean): List[ScGenericCall] = {
if (onlyFirst) {
condition match {
case IsInstanceOfCall(call) => List(call)
case infixExpr: ScInfixExpr if infixExpr.operation.refName == "&&" => findIsInstanceOfCalls(infixExpr.lOp, onlyFirst)
case parenth: ScParenthesisedExpr => findIsInstanceOfCalls(parenth.expr.orNull, onlyFirst)
case _ => Nil
}
}
else {
separateConditions(condition).collect {case IsInstanceOfCall(call) => call}
}
}
def findAsInstOfCalls(body: Option[ScExpression], isInstOfCall: ScGenericCall): Seq[ScGenericCall] = {
import isInstOfCall.projectContext
def isAsInstOfCall(genCall: ScGenericCall) = {
genCall.referencedExpr match {
case ref: ScReferenceExpression if ref.refName == "asInstanceOf" =>
ref.resolve() match {
case _: SyntheticNamedElement => true
case _ => false
}
case _ => false
}
}
def equalTypes(firstCall: ScGenericCall, secondCall: ScGenericCall): Boolean = {
val option = for {
firstArgs <- firstCall.typeArgs
secondArgs <- secondCall.typeArgs
firstTypes = firstArgs.typeArgs
secondTypes = secondArgs.typeArgs
if firstTypes.size == 1 && secondTypes.size == 1
} yield {
val firstType = firstTypes.head.calcType
val secondType = secondTypes.head.calcType
firstType.equiv(secondType)
}
option.getOrElse(false)
}
val result = collection.mutable.ArrayBuffer[ScGenericCall]()
val visitor = new ScalaRecursiveElementVisitor() {
override def visitGenericCallExpression(call: ScGenericCall) {
for {
base1 <- baseExpr(isInstOfCall)
base2 <- baseExpr(call)
if isAsInstOfCall(call)
if equalTypes(call, isInstOfCall)
if equiv(base1, base2)
} {
result += call
}
super.visitGenericCallExpression(call)
}
}
for (expr <- body) expr.accept(visitor)
result
}
def setElementsForRename(matchStmt: ScMatchStmt, renameHelper: InplaceRenameHelper, renameData: RenameData) {
val caseClauses = matchStmt.caseClauses.toList
for {
(index, suggestedNames) <- renameData
caseClause = caseClauses(index)
name = suggestedNames.head
} {
val primary = mutable.ArrayBuffer[ScNamedElement]()
val dependents = mutable.SortedSet()(Ordering.by[ScalaPsiElement, Int](_.getTextOffset))
val patternVisitor = new ScalaRecursiveElementVisitor() {
override def visitPattern(pat: ScPattern) {
pat match {
case bp: ScBindingPattern if bp.name == name =>
primary += bp
case _ =>
}
super.visitPattern(pat)
}
}
val referenceVisitor = new ScalaRecursiveElementVisitor() {
override def visitReferenceExpression(ref: ScReferenceExpression) {
for (prim <- primary) {
if (ref.refName == name && ref.resolve() == prim)
dependents += ref
}
super.visitReferenceExpression(ref)
}
}
caseClause.accept(patternVisitor)
caseClause.accept(referenceVisitor)
for (prim <- primary) renameHelper.addGroup(prim, dependents.toSeq, suggestedNames)
}
}
def baseExpr(gCall: ScGenericCall): Option[ScExpression] = gCall.referencedExpr match {
case ref: ScReferenceExpression => ref.qualifier
case _ => None
}
private def guardCondition(condition: ScExpression, isInstOfCall: ScGenericCall): Option[ScExpression] = {
val conditions = separateConditions(condition)
conditions match {
case Nil => None
case _ =>
val guardConditions: List[ScExpression] = conditions.filterNot(equiv(_, isInstOfCall))
val guardConditionsText: String = guardConditions.map(_.getText).mkString(" && ")
val guard = createExpressionFromText(guardConditionsText, condition).asInstanceOf[ScExpression]
Option(guard)
}
}
def equiv(elem1: PsiElement, elem2: PsiElement): Boolean = {
val comparator = new Comparator[PsiElement]() {
def compare(element1: PsiElement, element2: PsiElement): Int = {
if (element1 == element2) return 0
(element1, element2) match {
case (par1: ScParameter, par2: ScParameter) =>
val name1 = par1.name
val name2 = par2.name
if (name1 != null && name2 != null) name1.compareTo(name2)
else 1
case _ => 1
}
}
}
PsiEquivalenceUtil.areElementsEquivalent(elem1, elem2, comparator, false)
}
def separateConditions(expr: ScExpression): List[ScExpression] = {
expr match {
case parenth: ScParenthesisedExpr => parenth.expr match {
case Some(infixExpr: ScInfixExpr) if infixExpr.operation.refName == "&&" =>
separateConditions(infixExpr.lOp) ::: separateConditions(infixExpr.rOp) ::: Nil
case genCall: ScGenericCall => genCall :: Nil
case _ => parenth :: Nil
}
case infixExpr: ScInfixExpr if infixExpr.operation.refName == "&&" =>
separateConditions(infixExpr.lOp) ::: separateConditions(infixExpr.rOp) ::: Nil
case _ => expr :: Nil
}
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInspection/typeChecking/TypeCheckCanBeMatchInspection.scala
|
Scala
|
apache-2.0
| 16,207
|
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Z3FrequencyTest extends Specification with StatTestHelper {
def createStat(precision: Int, observe: Boolean): Z3Frequency = {
val s = Stat(sft, Stat.Z3Frequency("geom", "dtg", TimePeriod.Week, precision))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Z3Frequency]
}
def createStat(observe: Boolean = true): Z3Frequency = createStat(25, observe)
def toDate(string: String) = java.util.Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String) = WKTUtils.read(string)
"FrequencyZ3 stat" should {
"work with geometries and dates" >> {
"be empty initially" >> {
val stat = createStat(observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = createStat()
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100) { i =>
stat.count(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(1L, 6L)
}
}
"serialize and deserialize" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geomIndex mustEqual stat.geomIndex
unpacked.asInstanceOf[Z3Frequency].dtgIndex mustEqual stat.dtgIndex
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = createStat(observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geomIndex mustEqual stat.geomIndex
unpacked.asInstanceOf[Z3Frequency].dtgIndex mustEqual stat.dtgIndex
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geomIndex mustEqual stat.geomIndex
unpacked.asInstanceOf[Z3Frequency].dtgIndex mustEqual stat.dtgIndex
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"clear" >> {
val stat = createStat()
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100) { i =>
stat.count(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) mustEqual 0
}
stat.count(toGeom("POINT(-180 -90)"), toDate("2012-01-01T00:00:00.000Z")) mustEqual 0
}
}
}
}
|
ronq/geomesa
|
geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/Z3FrequencyTest.scala
|
Scala
|
apache-2.0
| 4,307
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Locale, ServiceConfigurationError, ServiceLoader}
import scala.collection.JavaConverters._
import scala.language.{existentials, implicitConversions}
import scala.util.{Failure, Success, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogUtils}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{RateStreamProvider, TextSocketSourceProvider}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{CalendarIntervalType, StructField, StructType}
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.Utils
/**
* The main class responsible for representing a pluggable Data Source in Spark SQL. In addition to
* acting as the canonical set of parameters that can describe a Data Source, this class is used to
* resolve a description to a concrete implementation that can be used in a query plan
* (either batch or streaming) or to write out data using an external library.
*
* From an end user's perspective a DataSource description can be created explicitly using
* [[org.apache.spark.sql.DataFrameReader]] or CREATE TABLE USING DDL. Additionally, this class is
* used when resolving a description from a metastore to a concrete implementation.
*
* Many of the arguments to this class are optional, though depending on the specific API being used
* these optional arguments might be filled in during resolution using either inference or external
* metadata. For example, when reading a partitioned table from a file system, partition columns
* will be inferred from the directory layout even if they are not specified.
*
* @param paths A list of file system paths that hold data. These will be globbed before and
* qualified. This option only works when reading from a [[FileFormat]].
* @param userSpecifiedSchema An optional specification of the schema of the data. When present
* we skip attempting to infer the schema.
* @param partitionColumns A list of column names that the relation is partitioned by. This list is
* generally empty during the read path, unless this DataSource is managed
* by Hive. In these cases, during `resolveRelation`, we will call
* `getOrInferFileFormatSchema` for file based DataSources to infer the
* partitioning. In other cases, if this list is empty, then this table
* is unpartitioned.
* @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
* @param catalogTable Optional catalog table reference that can be used to push down operations
* over the datasource to the catalog service.
*/
case class DataSource(
sparkSession: SparkSession,
className: String,
paths: Seq[String] = Nil,
userSpecifiedSchema: Option[StructType] = None,
partitionColumns: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
options: Map[String, String] = Map.empty,
catalogTable: Option[CatalogTable] = None) extends Logging {
case class SourceInfo(name: String, schema: StructType, partitionColumns: Seq[String])
lazy val providingClass: Class[_] =
DataSource.lookupDataSource(className, sparkSession.sessionState.conf)
lazy val sourceInfo: SourceInfo = sourceSchema()
private val caseInsensitiveOptions = CaseInsensitiveMap(options)
private val equality = sparkSession.sessionState.conf.resolver
bucketSpec.map { bucket =>
SchemaUtils.checkColumnNameDuplication(
bucket.bucketColumnNames, "in the bucket definition", equality)
SchemaUtils.checkColumnNameDuplication(
bucket.sortColumnNames, "in the sort definition", equality)
}
/**
* In the read path, only managed tables by Hive provide the partition columns properly when
* initializing this class. All other file based data sources will try to infer the partitioning,
* and then cast the inferred types to user specified dataTypes if the partition columns exist
* inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510, or
* inconsistent data types as reported in SPARK-21463.
* @param fileIndex A FileIndex that will perform partition inference
* @return The PartitionSchema resolved from inference and cast according to `userSpecifiedSchema`
*/
private def combineInferredAndUserSpecifiedPartitionSchema(fileIndex: FileIndex): StructType = {
val resolved = fileIndex.partitionSchema.map { partitionField =>
// SPARK-18510: try to get schema from userSpecifiedSchema, otherwise fallback to inferred
userSpecifiedSchema.flatMap(_.find(f => equality(f.name, partitionField.name))).getOrElse(
partitionField)
}
StructType(resolved)
}
/**
* Get the schema of the given FileFormat, if provided by `userSpecifiedSchema`, or try to infer
* it. In the read path, only managed tables by Hive provide the partition columns properly when
* initializing this class. All other file based data sources will try to infer the partitioning,
* and then cast the inferred types to user specified dataTypes if the partition columns exist
* inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510.
* This method will try to skip file scanning whether `userSpecifiedSchema` and
* `partitionColumns` are provided. Here are some code paths that use this method:
* 1. `spark.read` (no schema): Most amount of work. Infer both schema and partitioning columns
* 2. `spark.read.schema(userSpecifiedSchema)`: Parse partitioning columns, cast them to the
* dataTypes provided in `userSpecifiedSchema` if they exist or fallback to inferred
* dataType if they don't.
* 3. `spark.readStream.schema(userSpecifiedSchema)`: For streaming use cases, users have to
* provide the schema. Here, we also perform partition inference like 2, and try to use
* dataTypes in `userSpecifiedSchema`. All subsequent triggers for this stream will re-use
* this information, therefore calls to this method should be very cheap, i.e. there won't
* be any further inference in any triggers.
*
* @param format the file format object for this DataSource
* @param fileStatusCache the shared cache for file statuses to speed up listing
* @return A pair of the data schema (excluding partition columns) and the schema of the partition
* columns.
*/
private def getOrInferFileFormatSchema(
format: FileFormat,
fileStatusCache: FileStatusCache = NoopCache): (StructType, StructType) = {
// the operations below are expensive therefore try not to do them if we don't need to, e.g.,
// in streaming mode, we have already inferred and registered partition columns, we will
// never have to materialize the lazy val below
lazy val tempFileIndex = {
val allPaths = caseInsensitiveOptions.get("path") ++ paths
val hadoopConf = sparkSession.sessionState.newHadoopConf()
val globbedPaths = allPaths.toSeq.flatMap { path =>
val hdfsPath = new Path(path)
val fs = hdfsPath.getFileSystem(hadoopConf)
val qualified = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
SparkHadoopUtil.get.globPathIfNecessary(fs, qualified)
}.toArray
new InMemoryFileIndex(sparkSession, globbedPaths, options, None, fileStatusCache)
}
val partitionSchema = if (partitionColumns.isEmpty) {
// Try to infer partitioning, because no DataSource in the read path provides the partitioning
// columns properly unless it is a Hive DataSource
combineInferredAndUserSpecifiedPartitionSchema(tempFileIndex)
} else {
// maintain old behavior before SPARK-18510. If userSpecifiedSchema is empty used inferred
// partitioning
if (userSpecifiedSchema.isEmpty) {
val inferredPartitions = tempFileIndex.partitionSchema
inferredPartitions
} else {
val partitionFields = partitionColumns.map { partitionColumn =>
userSpecifiedSchema.flatMap(_.find(c => equality(c.name, partitionColumn))).orElse {
val inferredPartitions = tempFileIndex.partitionSchema
val inferredOpt = inferredPartitions.find(p => equality(p.name, partitionColumn))
if (inferredOpt.isDefined) {
logDebug(
s"""Type of partition column: $partitionColumn not found in specified schema
|for $format.
|User Specified Schema
|=====================
|${userSpecifiedSchema.orNull}
|
|Falling back to inferred dataType if it exists.
""".stripMargin)
}
inferredOpt
}.getOrElse {
throw new AnalysisException(s"Failed to resolve the schema for $format for " +
s"the partition column: $partitionColumn. It must be specified manually.")
}
}
StructType(partitionFields)
}
}
val dataSchema = userSpecifiedSchema.map { schema =>
StructType(schema.filterNot(f => partitionSchema.exists(p => equality(p.name, f.name))))
}.orElse {
format.inferSchema(
sparkSession,
caseInsensitiveOptions,
tempFileIndex.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $format. It must be specified manually.")
}
// We just print a waring message if the data schema and partition schema have the duplicate
// columns. This is because we allow users to do so in the previous Spark releases and
// we have the existing tests for the cases (e.g., `ParquetHadoopFsRelationSuite`).
// See SPARK-18108 and SPARK-21144 for related discussions.
try {
SchemaUtils.checkColumnNameDuplication(
(dataSchema ++ partitionSchema).map(_.name),
"in the data schema and the partition schema",
equality)
} catch {
case e: AnalysisException => logWarning(e.getMessage)
}
(dataSchema, partitionSchema)
}
/** Returns the name and schema of the source that can be used to continually read data. */
private def sourceSchema(): SourceInfo = {
providingClass.newInstance() match {
case s: StreamSourceProvider =>
val (name, schema) = s.sourceSchema(
sparkSession.sqlContext, userSpecifiedSchema, className, caseInsensitiveOptions)
SourceInfo(name, schema, Nil)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
// Check whether the path exists if it is not a glob pattern.
// For glob pattern, we do not check it because the glob pattern might only make sense
// once the streaming job starts and some upstream source starts dropping data.
val hdfsPath = new Path(path)
if (!SparkHadoopUtil.get.isGlobPath(hdfsPath)) {
val fs = hdfsPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
if (!fs.exists(hdfsPath)) {
throw new AnalysisException(s"Path does not exist: $path")
}
}
val isSchemaInferenceEnabled = sparkSession.sessionState.conf.streamingSchemaInference
val isTextSource = providingClass == classOf[text.TextFileFormat]
// If the schema inference is disabled, only text sources require schema to be specified
if (!isSchemaInferenceEnabled && !isTextSource && userSpecifiedSchema.isEmpty) {
throw new IllegalArgumentException(
"Schema must be specified when creating a streaming source DataFrame. " +
"If some files already exist in the directory, then depending on the file format " +
"you may be able to create a static DataFrame on that directory with " +
"'spark.read.load(directory)' and infer schema from it.")
}
val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format)
SourceInfo(
s"FileSource[$path]",
StructType(dataSchema ++ partitionSchema),
partitionSchema.fieldNames)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed reading")
}
}
/** Returns a source that can be used to continually read data. */
def createSource(metadataPath: String): Source = {
providingClass.newInstance() match {
case s: StreamSourceProvider =>
s.createSource(
sparkSession.sqlContext,
metadataPath,
userSpecifiedSchema,
className,
caseInsensitiveOptions)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
new FileStreamSource(
sparkSession = sparkSession,
path = path,
fileFormatClassName = className,
schema = sourceInfo.schema,
partitionColumns = sourceInfo.partitionColumns,
metadataPath = metadataPath,
options = caseInsensitiveOptions)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed reading")
}
}
/** Returns a sink that can be used to continually write data. */
def createSink(outputMode: OutputMode): Sink = {
providingClass.newInstance() match {
case s: StreamSinkProvider =>
s.createSink(sparkSession.sqlContext, caseInsensitiveOptions, partitionColumns, outputMode)
case fileFormat: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
if (outputMode != OutputMode.Append) {
throw new AnalysisException(
s"Data source $className does not support $outputMode output mode")
}
new FileStreamSink(sparkSession, path, fileFormat, partitionColumns, caseInsensitiveOptions)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed writing")
}
}
/**
* Create a resolved [[BaseRelation]] that can be used to read data from or write data into this
* [[DataSource]]
*
* @param checkFilesExist Whether to confirm that the files exist when generating the
* non-streaming file based datasource. StructuredStreaming jobs already
* list file existence, and when generating incremental jobs, the batch
* is considered as a non-streaming file based data source. Since we know
* that files already exist, we don't need to check them again.
*/
def resolveRelation(checkFilesExist: Boolean = true): BaseRelation = {
val relation = (providingClass.newInstance(), userSpecifiedSchema) match {
// TODO: Throw when too much is given.
case (dataSource: SchemaRelationProvider, Some(schema)) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions, schema)
case (dataSource: RelationProvider, None) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
case (_: SchemaRelationProvider, None) =>
throw new AnalysisException(s"A schema needs to be specified when using $className.")
case (dataSource: RelationProvider, Some(schema)) =>
val baseRelation =
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
if (baseRelation.schema != schema) {
throw new AnalysisException(s"$className does not allow user-specified schemas.")
}
baseRelation
// We are reading from the results of a streaming query. Load files from the metadata log
// instead of listing them using HDFS APIs.
case (format: FileFormat, _)
if FileStreamSink.hasMetadata(
caseInsensitiveOptions.get("path").toSeq ++ paths,
sparkSession.sessionState.newHadoopConf()) =>
val basePath = new Path((caseInsensitiveOptions.get("path").toSeq ++ paths).head)
val tempFileCatalog = new MetadataLogFileIndex(sparkSession, basePath, None)
val fileCatalog = if (userSpecifiedSchema.nonEmpty) {
val partitionSchema = combineInferredAndUserSpecifiedPartitionSchema(tempFileCatalog)
new MetadataLogFileIndex(sparkSession, basePath, Option(partitionSchema))
} else {
tempFileCatalog
}
val dataSchema = userSpecifiedSchema.orElse {
format.inferSchema(
sparkSession,
caseInsensitiveOptions,
fileCatalog.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $format at ${fileCatalog.allFiles().mkString(",")}. " +
"It must be specified manually")
}
HadoopFsRelation(
fileCatalog,
partitionSchema = fileCatalog.partitionSchema,
dataSchema = dataSchema,
bucketSpec = None,
format,
caseInsensitiveOptions)(sparkSession)
// This is a non-streaming file based datasource.
case (format: FileFormat, _) =>
val allPaths = caseInsensitiveOptions.get("path") ++ paths
val hadoopConf = sparkSession.sessionState.newHadoopConf()
val globbedPaths = allPaths.flatMap(
DataSource.checkAndGlobPathIfNecessary(hadoopConf, _, checkFilesExist)).toArray
val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format, fileStatusCache)
val fileCatalog = if (sparkSession.sqlContext.conf.manageFilesourcePartitions &&
catalogTable.isDefined && catalogTable.get.tracksPartitionsInCatalog) {
val defaultTableSize = sparkSession.sessionState.conf.defaultSizeInBytes
new CatalogFileIndex(
sparkSession,
catalogTable.get,
catalogTable.get.stats.map(_.sizeInBytes.toLong).getOrElse(defaultTableSize))
} else {
new InMemoryFileIndex(
sparkSession, globbedPaths, options, Some(partitionSchema), fileStatusCache)
}
HadoopFsRelation(
fileCatalog,
partitionSchema = partitionSchema,
dataSchema = dataSchema.asNullable,
bucketSpec = bucketSpec,
format,
caseInsensitiveOptions)(sparkSession)
case _ =>
throw new AnalysisException(
s"$className is not a valid Spark SQL Data Source.")
}
relation match {
case hs: HadoopFsRelation =>
SchemaUtils.checkColumnNameDuplication(
hs.dataSchema.map(_.name),
"in the data schema",
equality)
SchemaUtils.checkColumnNameDuplication(
hs.partitionSchema.map(_.name),
"in the partition schema",
equality)
case _ =>
SchemaUtils.checkColumnNameDuplication(
relation.schema.map(_.name),
"in the data schema",
equality)
}
relation
}
/**
* Creates a command node to write the given [[LogicalPlan]] out to the given [[FileFormat]].
* The returned command is unresolved and need to be analyzed.
*/
private def planForWritingFileFormat(
format: FileFormat, mode: SaveMode, data: LogicalPlan): InsertIntoHadoopFsRelationCommand = {
// Don't glob path for the write path. The contracts here are:
// 1. Only one output path can be specified on the write path;
// 2. Output path must be a legal HDFS style file system path;
// 3. It's OK that the output path doesn't exist yet;
val allPaths = paths ++ caseInsensitiveOptions.get("path")
val outputPath = if (allPaths.length == 1) {
val path = new Path(allPaths.head)
val fs = path.getFileSystem(sparkSession.sessionState.newHadoopConf())
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
} else {
throw new IllegalArgumentException("Expected exactly one path to be specified, but " +
s"got: ${allPaths.mkString(", ")}")
}
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
PartitioningUtils.validatePartitionColumn(data.schema, partitionColumns, caseSensitive)
val fileIndex = catalogTable.map(_.identifier).map { tableIdent =>
sparkSession.table(tableIdent).queryExecution.analyzed.collect {
case LogicalRelation(t: HadoopFsRelation, _, _, _) => t.location
}.head
}
// For partitioned relation r, r.schema's column ordering can be different from the column
// ordering of data.logicalPlan (partition columns are all moved after data column). This
// will be adjusted within InsertIntoHadoopFsRelation.
InsertIntoHadoopFsRelationCommand(
outputPath = outputPath,
staticPartitions = Map.empty,
ifPartitionNotExists = false,
partitionColumns = partitionColumns.map(UnresolvedAttribute.quoted),
bucketSpec = bucketSpec,
fileFormat = format,
options = options,
query = data,
mode = mode,
catalogTable = catalogTable,
fileIndex = fileIndex,
outputColumns = data.output)
}
/**
* Writes the given [[LogicalPlan]] out to this [[DataSource]] and returns a [[BaseRelation]] for
* the following reading.
*
* @param mode The save mode for this writing.
* @param data The input query plan that produces the data to be written. Note that this plan
* is analyzed and optimized.
* @param outputColumns The original output columns of the input query plan. The optimizer may not
* preserve the output column's names' case, so we need this parameter
* instead of `data.output`.
* @param physicalPlan The physical plan of the input query plan. We should run the writing
* command with this physical plan instead of creating a new physical plan,
* so that the metrics can be correctly linked to the given physical plan and
* shown in the web UI.
*/
def writeAndRead(
mode: SaveMode,
data: LogicalPlan,
outputColumns: Seq[Attribute],
physicalPlan: SparkPlan): BaseRelation = {
if (outputColumns.map(_.dataType).exists(_.isInstanceOf[CalendarIntervalType])) {
throw new AnalysisException("Cannot save interval data type into external storage.")
}
providingClass.newInstance() match {
case dataSource: CreatableRelationProvider =>
dataSource.createRelation(
sparkSession.sqlContext, mode, caseInsensitiveOptions, Dataset.ofRows(sparkSession, data))
case format: FileFormat =>
val cmd = planForWritingFileFormat(format, mode, data)
val resolvedPartCols = cmd.partitionColumns.map { col =>
// The partition columns created in `planForWritingFileFormat` should always be
// `UnresolvedAttribute` with a single name part.
assert(col.isInstanceOf[UnresolvedAttribute])
val unresolved = col.asInstanceOf[UnresolvedAttribute]
assert(unresolved.nameParts.length == 1)
val name = unresolved.nameParts.head
outputColumns.find(a => equality(a.name, name)).getOrElse {
throw new AnalysisException(
s"Unable to resolve $name given [${data.output.map(_.name).mkString(", ")}]")
}
}
val resolved = cmd.copy(partitionColumns = resolvedPartCols, outputColumns = outputColumns)
resolved.run(sparkSession, physicalPlan)
// Replace the schema with that of the DataFrame we just wrote out to avoid re-inferring
copy(userSpecifiedSchema = Some(outputColumns.toStructType.asNullable)).resolveRelation()
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/**
* Returns a logical plan to write the given [[LogicalPlan]] out to this [[DataSource]].
*/
def planForWriting(mode: SaveMode, data: LogicalPlan): LogicalPlan = {
if (data.schema.map(_.dataType).exists(_.isInstanceOf[CalendarIntervalType])) {
throw new AnalysisException("Cannot save interval data type into external storage.")
}
providingClass.newInstance() match {
case dataSource: CreatableRelationProvider =>
SaveIntoDataSourceCommand(data, dataSource, caseInsensitiveOptions, mode)
case format: FileFormat =>
DataSource.validateSchema(data.schema)
planForWritingFileFormat(format, mode, data)
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
}
object DataSource extends Logging {
/** A map to maintain backward compatibility in case we move data sources around. */
private val backwardCompatibilityMap: Map[String, String] = {
val jdbc = classOf[JdbcRelationProvider].getCanonicalName
val json = classOf[JsonFileFormat].getCanonicalName
val parquet = classOf[ParquetFileFormat].getCanonicalName
val csv = classOf[CSVFileFormat].getCanonicalName
val libsvm = "org.apache.spark.ml.source.libsvm.LibSVMFileFormat"
val orc = "org.apache.spark.sql.hive.orc.OrcFileFormat"
val nativeOrc = classOf[OrcFileFormat].getCanonicalName
val socket = classOf[TextSocketSourceProvider].getCanonicalName
val rate = classOf[RateStreamProvider].getCanonicalName
Map(
"org.apache.spark.sql.jdbc" -> jdbc,
"org.apache.spark.sql.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc" -> jdbc,
"org.apache.spark.sql.json" -> json,
"org.apache.spark.sql.json.DefaultSource" -> json,
"org.apache.spark.sql.execution.datasources.json" -> json,
"org.apache.spark.sql.execution.datasources.json.DefaultSource" -> json,
"org.apache.spark.sql.parquet" -> parquet,
"org.apache.spark.sql.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.hive.orc.DefaultSource" -> orc,
"org.apache.spark.sql.hive.orc" -> orc,
"org.apache.spark.sql.execution.datasources.orc.DefaultSource" -> nativeOrc,
"org.apache.spark.sql.execution.datasources.orc" -> nativeOrc,
"org.apache.spark.ml.source.libsvm.DefaultSource" -> libsvm,
"org.apache.spark.ml.source.libsvm" -> libsvm,
"com.databricks.spark.csv" -> csv,
"org.apache.spark.sql.execution.streaming.TextSocketSourceProvider" -> socket,
"org.apache.spark.sql.execution.streaming.RateSourceProvider" -> rate
)
}
/**
* Class that were removed in Spark 2.0. Used to detect incompatibility libraries for Spark 2.0.
*/
private val spark2RemovedClasses = Set(
"org.apache.spark.sql.DataFrame",
"org.apache.spark.sql.sources.HadoopFsRelationProvider",
"org.apache.spark.Logging")
/** Given a provider name, look up the data source class definition. */
def lookupDataSource(provider: String, conf: SQLConf): Class[_] = {
val provider1 = backwardCompatibilityMap.getOrElse(provider, provider) match {
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native" =>
classOf[OrcFileFormat].getCanonicalName
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "hive" =>
"org.apache.spark.sql.hive.orc.OrcFileFormat"
case name => name
}
val provider2 = s"$provider1.DefaultSource"
val loader = Utils.getContextOrSparkClassLoader
val serviceLoader = ServiceLoader.load(classOf[DataSourceRegister], loader)
try {
serviceLoader.asScala.filter(_.shortName().equalsIgnoreCase(provider1)).toList match {
// the provider format did not match any given registered aliases
case Nil =>
try {
Try(loader.loadClass(provider1)).orElse(Try(loader.loadClass(provider2))) match {
case Success(dataSource) =>
// Found the data source using fully qualified path
dataSource
case Failure(error) =>
if (provider1.startsWith("org.apache.spark.sql.hive.orc")) {
throw new AnalysisException(
"Hive built-in ORC data source must be used with Hive support enabled. " +
"Please use the native ORC data source by setting 'spark.sql.orc.impl' to " +
"'native'")
} else if (provider1.toLowerCase(Locale.ROOT) == "avro" ||
provider1 == "com.databricks.spark.avro") {
throw new AnalysisException(
s"Failed to find data source: ${provider1.toLowerCase(Locale.ROOT)}. " +
"Please find an Avro package at " +
"http://spark.apache.org/third-party-projects.html")
} else {
throw new ClassNotFoundException(
s"Failed to find data source: $provider1. Please find packages at " +
"http://spark.apache.org/third-party-projects.html",
error)
}
}
} catch {
case e: NoClassDefFoundError => // This one won't be caught by Scala NonFatal
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw new ClassNotFoundException(s"$className was removed in Spark 2.0. " +
"Please check if your library is compatible with Spark 2.0", e)
} else {
throw e
}
}
case head :: Nil =>
// there is exactly one registered alias
head.getClass
case sources =>
// There are multiple registered aliases for the input. If there is single datasource
// that has "org.apache.spark" package in the prefix, we use it considering it is an
// internal datasource within Spark.
val sourceNames = sources.map(_.getClass.getName)
val internalSources = sources.filter(_.getClass.getName.startsWith("org.apache.spark"))
if (internalSources.size == 1) {
logWarning(s"Multiple sources found for $provider1 (${sourceNames.mkString(", ")}), " +
s"defaulting to the internal datasource (${internalSources.head.getClass.getName}).")
internalSources.head.getClass
} else {
throw new AnalysisException(s"Multiple sources found for $provider1 " +
s"(${sourceNames.mkString(", ")}), please specify the fully qualified class name.")
}
}
} catch {
case e: ServiceConfigurationError if e.getCause.isInstanceOf[NoClassDefFoundError] =>
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getCause.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw new ClassNotFoundException(s"Detected an incompatible DataSourceRegister. " +
"Please remove the incompatible library from classpath or upgrade it. " +
s"Error: ${e.getMessage}", e)
} else {
throw e
}
}
}
/**
* When creating a data source table, the `path` option has a special meaning: the table location.
* This method extracts the `path` option and treat it as table location to build a
* [[CatalogStorageFormat]]. Note that, the `path` option is removed from options after this.
*/
def buildStorageFormatFromOptions(options: Map[String, String]): CatalogStorageFormat = {
val path = CaseInsensitiveMap(options).get("path")
val optionsWithoutPath = options.filterKeys(_.toLowerCase(Locale.ROOT) != "path")
CatalogStorageFormat.empty.copy(
locationUri = path.map(CatalogUtils.stringToURI), properties = optionsWithoutPath)
}
/**
* If `path` is a file pattern, return all the files that match it. Otherwise, return itself.
* If `checkFilesExist` is `true`, also check the file existence.
*/
private def checkAndGlobPathIfNecessary(
hadoopConf: Configuration,
path: String,
checkFilesExist: Boolean): Seq[Path] = {
val hdfsPath = new Path(path)
val fs = hdfsPath.getFileSystem(hadoopConf)
val qualified = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
val globPath = SparkHadoopUtil.get.globPathIfNecessary(fs, qualified)
if (globPath.isEmpty) {
throw new AnalysisException(s"Path does not exist: $qualified")
}
// Sufficient to check head of the globPath seq for non-glob scenario
// Don't need to check once again if files exist in streaming mode
if (checkFilesExist && !fs.exists(globPath.head)) {
throw new AnalysisException(s"Path does not exist: ${globPath.head}")
}
globPath
}
/**
* Called before writing into a FileFormat based data source to make sure the
* supplied schema is not empty.
* @param schema
*/
private def validateSchema(schema: StructType): Unit = {
def hasEmptySchema(schema: StructType): Boolean = {
schema.size == 0 || schema.find {
case StructField(_, b: StructType, _, _) => hasEmptySchema(b)
case _ => false
}.isDefined
}
if (hasEmptySchema(schema)) {
throw new AnalysisException(
s"""
|Datasource does not support writing empty or nested empty schemas.
|Please make sure the data schema has at least one or more column(s).
""".stripMargin)
}
}
}
|
brad-kaiser/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
|
Scala
|
apache-2.0
| 36,094
|
package scodec.bits
import org.scalatest._
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class BitsSuite extends FunSuite with Matchers with GeneratorDrivenPropertyChecks {
protected def serializationShouldRoundtrip[A](x: A): Unit = {
import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream }
val bout = new ByteArrayOutputStream
val out = new ObjectOutputStream(bout)
out.writeObject(x)
out.close()
val in = new ObjectInputStream(new ByteArrayInputStream(bout.toByteArray))
val deserialized = in.readObject.asInstanceOf[A]
deserialized shouldBe x
}
}
|
aloiscochard/scodec-bits
|
core/src/test/scala/scodec/bits/BitsSuite.scala
|
Scala
|
bsd-3-clause
| 645
|
package scray.hdfs
import scray.querying.queries.DomainQuery
import scray.querying.description.TableIdentifier
import com.twitter.util.FuturePool
import scray.querying.source.store.QueryableStoreSource
import scray.querying.description.Column
import HDFSQueryableSource._
import scray.querying.queries.KeyedQuery
import com.twitter.util.Future
import scray.querying.description.Row
import scray.hdfs.index.HDFSBlobResolver
import org.apache.hadoop.io.Writable
import scray.querying.description.SimpleRow
import scala.collection.mutable.ArrayBuffer
import scray.querying.description.RowColumn
import scray.querying.description.ArrayByteColumn
import scray.querying.description.internal.SingleValueDomain
import com.twitter.concurrent.Spool
import scray.querying.description.ColumnFactory
import com.typesafe.scalalogging.LazyLogging
/**
* Some HDFS queryable source to query Blobs from HDFS
*/
class HDFSQueryableSource[Q <: DomainQuery, T <: Writable](
val ti: TableIdentifier,
val resolver: HDFSBlobResolver[org.apache.hadoop.io.Text],
futurePool: FuturePool) extends QueryableStoreSource[Q](ti, getRowKeyColumns(ti), Set(), getAllColumns(ti), false)
with LazyLogging {
val rowColumn = Column(rowKeyColumnName, ti)
val valueColumn = Column(valueColumnName, ti)
override def request(query: Q): scray.querying.source.LazyDataFuture = {
requestIterator(query).map { it =>
it.hasNext match {
case true => it.next() *:: Future.value(Spool.empty[Row])
case false => Spool.empty[Row]
}
}
}
override def requestIterator(query: Q): Future[Iterator[Row]] = {
futurePool {
val key = query.domains.find(domain => domain.column == rowColumn).flatMap {
case single: SingleValueDomain[_] => Some(single.value)
case _ => None
}
key.map { mkey =>
val value = resolver.getBlob(HDFSBlobResolver.transformHadoopTypes(mkey).asInstanceOf[org.apache.hadoop.io.Text])
new OneHDFSBlobIterator(rowColumn, Some(mkey), valueColumn, value)
}.getOrElse {
new OneHDFSBlobIterator(rowColumn, None, valueColumn, None)
}
}
}
override def keyedRequest(query: KeyedQuery): Future[Iterator[Row]] = {
futurePool {
val key = query.keys.find(rowcol => rowcol.column.columnName == rowKeyColumnName).map(_.value)
key.map { mkey =>
val value = resolver.getBlob(mkey.asInstanceOf[org.apache.hadoop.io.Text])
new OneHDFSBlobIterator(rowColumn, Some(mkey), valueColumn, value)
}.getOrElse {
new OneHDFSBlobIterator(rowColumn, None, valueColumn, None)
}
}
}
}
object HDFSQueryableSource {
val rowKeyColumnName = "KEY"
val valueColumnName = "VALUE"
val allColumnNames = Set(rowKeyColumnName, valueColumnName)
def getRowKeyColumns(ti: TableIdentifier): Set[Column] = Set(Column(rowKeyColumnName, ti))
def getValueColumn(ti: TableIdentifier): Set[Column] = Set(Column(valueColumnName, ti))
def getAllColumns(ti: TableIdentifier): Set[Column] = allColumnNames.map(name => Column(name, ti))
}
class OneHDFSBlobIterator(keyCol: Column, key: Option[Any], col: Column, value: Option[Array[Byte]]) extends Iterator[Row] with LazyLogging {
var retrieved = value.isEmpty
override def hasNext: Boolean = !retrieved
override def next(): Row = {
retrieved = true
value.map { entry =>
val entries = new ArrayBuffer[RowColumn[_]]()
key.foreach(k => entries += ColumnFactory.getColumnFromValue(keyCol, k))
entries += new ArrayByteColumn(col, entry)
logger.info(s" found and fetched value $value")
SimpleRow(entries)
}.getOrElse(null)
}
}
|
scray/scray
|
scray-hdfs/modules/scray-hdfs-service-adapter/src/main/scala/scray/hdfs/HDFSQueryableSource.scala
|
Scala
|
apache-2.0
| 3,699
|
/* PageLayout.scala
*
* Jim McBeath, November 3, 2005 (as PageLayout.java)
* converted to scala June 21, 2008
*/
package net.jimmc.mimprint
import net.jimmc.util.SResources
import java.awt.Dimension
import java.awt.Insets
import java.io.File
import java.io.PrintWriter
import java.text.MessageFormat
import java.util.Stack
import javax.xml.parsers.SAXParser
import javax.xml.parsers.SAXParserFactory
import org.xml.sax.Attributes
import org.xml.sax.helpers.DefaultHandler
import org.xml.sax.SAXException
object PageLayout {
val BORDER_THICKNESS = 20
//Values for page units
val UNIT_CM = 0 //metric
val UNIT_INCH = 1 //english
}
/** The layout for a page of images.
*/
class PageLayout(app:SResources) {
import PageLayout._ //get all fields from our companion object
private var description:String = _ //description of this layout
private var pageUnit:Int = _ //name of our units, e.g. "in", "cm"
//The actual values we store in our various dimension fields
//are the units times the multiplier. For example, we
//would represent 8.5 inches as 8500.
private var pageWidth:Int = _ //width of the page in pageUnits
private var pageHeight:Int = _ //height of the page in pageUnits
private var areaLayout:AreaLayout = _
private var currentArea:AreaLayout = _ //when loading an XML file
def setDefaultLayout() {
pageUnit = UNIT_INCH
pageWidth = 8500 //American standard paper size
pageHeight = 11000
val margin = 500 //margin on outer edges
val spacing = 250 //spacing between areas
areaLayout = AreaLayoutFactory.createDefaultTopLayout()
areaLayout.setMargins(margin)
areaLayout.setSpacing(spacing)
areaLayout.setBorderThickness(BORDER_THICKNESS)
setAreaLayout(areaLayout)
}
private def setAreaLayoutBounds() {
if (areaLayout!=null)
areaLayout.setBounds(0,0,pageWidth,pageHeight)
}
/** Set the descriptive text for this layout. */
def setDescription(description:String) = this.description = description
def getDescription():String = description
/** Set the page units.
* @param unit One of UNIT_CM or UNIT_INCH.
*/
def setPageUnit(unit:Int) {
if (unit==pageUnit)
return //no change
unit match {
case UNIT_CM => pageUnit = unit
case UNIT_INCH => pageUnit = unit
case _ =>
throw new IllegalArgumentException("bad units "+unit)
}
}
/** Get the current page units, either UNIT_CM or UNIT_INCH. */
def getPageUnit():Int = pageUnit
def setPageWidth(width:Int) {
this.pageWidth = width
if (areaLayout==null)
return
setAreaLayoutBounds()
areaLayout.revalidate()
}
def getPageWidth():Int = pageWidth
def setPageHeight(height:Int) {
this.pageHeight = height
if (areaLayout==null)
return
setAreaLayoutBounds()
areaLayout.revalidate()
}
def getPageHeight():Int = pageHeight
//Set our top-level AreaLayout
def setAreaLayout(areaLayout:AreaLayout) {
this.areaLayout = areaLayout
setAreaLayoutBounds()
areaLayout.setBorderThickness(BORDER_THICKNESS)
areaLayout.revalidate()
areaLayout.setParent(null) //top level layout
areaLayout.setTreeLocation(null)
areaLayout.setSubTreeLocations()
areaLayout.setTreeDepth(0)
areaLayout.setSubTreeDepths()
fixImageIndexes()
}
def fixImageIndexes() {
areaLayout.setImageIndexes(0)
}
def getAreaLayout():AreaLayout = areaLayout
/** Write the current layout template. */
def writeLayoutTemplate(pw:PrintWriter) {
pw.println("<?xml version=\\"1.0\\" encoding=\\"utf-8\\"?>") //XML header line
//TODO - write out DTD line?
val pageLineFmt = "<page width=\\"{0}\\" height=\\"{1}\\" unit=\\"{2}\\">"
val pageLineArgs = Array(
PageValue.formatPageValue(getPageWidth()),
PageValue.formatPageValue(getPageHeight()),
if (getPageUnit()==PageLayout.UNIT_CM) "cm" else "in"
)
pw.println(MessageFormat.format(pageLineFmt,
(pageLineArgs.asInstanceOf[Array[Object]]:_*)))
if (description!=null)
pw.println(" <description>"+description+"</description>")
areaLayout.writeTemplate(pw,1)
pw.println("</page>")
}
/** Read in the specified layout template. */
def loadLayoutTemplate(f:File) {
var parser:SAXParser = null
try {
parser = SAXParserFactory.newInstance().newSAXParser()
//TODO - create factory only once
} catch {
case ex:Exception =>
throw new RuntimeException("Exception creating SAXParser",ex)
}
//System.out.println("Created SAXParser")
val handler:DefaultHandler = new PageLayoutHandler()
try {
parser.parse(f,handler)
} catch {
case ex:Exception => //SAXException, IOException
throw new RuntimeException("Error parsing xml",ex)
}
}
/** Get a string from our resources. */
def getResourceString(name:String) = app.getResourceString(name)
/** Get a string from our resources. */
def getResourceFormatted(name:String, arg:String) =
app.getResourceFormatted(name, arg)
class PageLayoutHandler extends DefaultHandler {
private var areaStack:Stack[AreaLayout] = _
private var lastText:String = _ //most recent parsed text
override def startDocument() {
//System.out.println("startDocument")
areaStack = new Stack()
}
override def endDocument() {
//System.out.println("endDocument")
}
override def startElement(url:String, localName:String,
qName:String, attributes:Attributes) {
//System.out.println("startElement "+uri+","+localName+","+
// qName+",attrs="+attributes)
if (qName.equals("description"))
() //ignore the start, pick up the text on the end
else if (qName.equals("page"))
loadPageAttributes(attributes)
else if (qName.equals("margins"))
loadMargins(attributes)
else if (qName.equals("spacing"))
loadSpacing(attributes)
else {
val newArea:AreaLayout = AreaLayoutFactory.newAreaLayout(qName)
newArea.setBorderThickness(BORDER_THICKNESS)
newArea.setXmlAttributes(attributes)
//TODO - ensure that newArea.areas has been allocated
areaStack.push(currentArea)
currentArea = newArea
}
}
private def loadMargins(attrs:Attributes) {
val leftStr = attrs.getValue("left")
val rightStr = attrs.getValue("right")
val topStr = attrs.getValue("top")
val bottomStr = attrs.getValue("bottom")
val left = PageValue.parsePageValue(leftStr,0)
val right = PageValue.parsePageValue(rightStr,0)
val top = PageValue.parsePageValue(topStr,0)
val bottom = PageValue.parsePageValue(bottomStr,0)
if (currentArea!=null)
currentArea.setMargins(new Insets(top,left,bottom,right))
else
throw new IllegalArgumentException(
"Can't set margins directly on a Page")
}
private def loadSpacing(attrs:Attributes) {
val widthStr = attrs.getValue("width")
val heightStr = attrs.getValue("height")
val width = PageValue.parsePageValue(widthStr,0)
val height = PageValue.parsePageValue(heightStr,0)
if (currentArea!=null)
currentArea.setSpacing(new Dimension(width,height))
else
throw new IllegalArgumentException(
"Can't set spacing directly on a Page")
}
private def loadPageAttributes(attrs:Attributes) {
val heightStr = attrs.getValue("height")
val widthStr = attrs.getValue("width")
if (heightStr==null || widthStr==null ||
heightStr.trim().equals("") || widthStr.trim().equals("")) {
val msg = getResourceString("error.PageDimensionsRequired")
throw new IllegalArgumentException(msg)
}
setPageWidth(PageValue.parsePageValue(widthStr))
setPageHeight(PageValue.parsePageValue(heightStr))
val unitStr = attrs.getValue("unit")
if ("cm".equalsIgnoreCase(unitStr))
setPageUnit(UNIT_CM)
else if ("in".equalsIgnoreCase(unitStr))
setPageUnit(UNIT_INCH)
else {
val msg = getResourceString("error.BadPageUnit")
throw new IllegalArgumentException(msg)
}
}
override def characters(ch:Array[Char], start:Int, end:Int) =
lastText = new String(ch,start,end)
override def endElement(url:String, localName:String, qName:String) {
//System.out.println("endElement "+uri+","+localName+","+qName)
//TODO - validate end element matches start element
if (qName=="description") {
if (lastText!=null)
setDescription(lastText)
return
} else if (qName=="page") {
if (currentArea!=null) {
val msg = getResourceString("error.MissingEndAreaElement")
throw new RuntimeException(msg)
}
return //done with the page
} else if (qName=="margins") {
return
} else if (qName=="spacing") {
return
} else {
//nothing here
}
val newArea:AreaLayout = currentArea
currentArea = areaStack.pop()
if (currentArea==null)
setAreaLayout(newArea) //set page layout
else
currentArea.addAreaLayout(newArea)
}
}
}
|
jimmc/mimprint
|
src/net/jimmc/mimprint/PageLayout.scala
|
Scala
|
gpl-2.0
| 10,365
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
/**
* A backend interface for scheduling systems that allows plugging in different ones under
* ClusterScheduler. We assume a Mesos-like model where the application gets resource offers as
* machines become available and can launch tasks on them.
*/
private[spark] trait SchedulerBackend {
def start(): Unit
def stop(): Unit
def reviveOffers(): Unit
def defaultParallelism(): Int
def killTask(taskId: Long, executorId: String): Unit = throw new UnsupportedOperationException
}
|
sryza/spark
|
core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala
|
Scala
|
apache-2.0
| 1,330
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import java.util.concurrent.Executors
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal
import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsumer, OffsetAndTimestamp}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.util.{ThreadUtils, UninterruptibleThread}
/**
* This class uses Kafka's own [[KafkaConsumer]] API to read data offsets from Kafka.
* The [[ConsumerStrategy]] class defines which Kafka topics and partitions should be read
* by this source. These strategies directly correspond to the different consumption options
* in. This class is designed to return a configured [[KafkaConsumer]] that is used by the
* [[KafkaSource]] to query for the offsets. See the docs on
* [[org.apache.spark.sql.kafka010.ConsumerStrategy]]
* for more details.
*
* Note: This class is not ThreadSafe
*/
private[kafka010] class KafkaOffsetReader(
consumerStrategy: ConsumerStrategy,
val driverKafkaParams: ju.Map[String, Object],
readerOptions: CaseInsensitiveMap[String],
driverGroupIdPrefix: String) extends Logging {
/**
* Used to ensure execute fetch operations execute in an UninterruptibleThread
*/
val kafkaReaderThread = Executors.newSingleThreadExecutor((r: Runnable) => {
val t = new UninterruptibleThread("Kafka Offset Reader") {
override def run(): Unit = {
r.run()
}
}
t.setDaemon(true)
t
})
val execContext = ExecutionContext.fromExecutorService(kafkaReaderThread)
/**
* Place [[groupId]] and [[nextId]] here so that they are initialized before any consumer is
* created -- see SPARK-19564.
*/
private var groupId: String = null
private var nextId = 0
/**
* A KafkaConsumer used in the driver to query the latest Kafka offsets. This only queries the
* offsets and never commits them.
*/
@volatile protected var _consumer: Consumer[Array[Byte], Array[Byte]] = null
protected def consumer: Consumer[Array[Byte], Array[Byte]] = synchronized {
assert(Thread.currentThread().isInstanceOf[UninterruptibleThread])
if (_consumer == null) {
val newKafkaParams = new ju.HashMap[String, Object](driverKafkaParams)
if (driverKafkaParams.get(ConsumerConfig.GROUP_ID_CONFIG) == null) {
newKafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, nextGroupId())
}
_consumer = consumerStrategy.createConsumer(newKafkaParams)
}
_consumer
}
private[kafka010] val maxOffsetFetchAttempts =
readerOptions.getOrElse(KafkaSourceProvider.FETCH_OFFSET_NUM_RETRY, "3").toInt
private[kafka010] val offsetFetchAttemptIntervalMs =
readerOptions.getOrElse(KafkaSourceProvider.FETCH_OFFSET_RETRY_INTERVAL_MS, "1000").toLong
private def nextGroupId(): String = {
groupId = driverGroupIdPrefix + "-" + nextId
nextId += 1
groupId
}
override def toString(): String = consumerStrategy.toString
/**
* Closes the connection to Kafka, and cleans up state.
*/
def close(): Unit = {
if (_consumer != null) runUninterruptibly { stopConsumer() }
kafkaReaderThread.shutdown()
}
/**
* @return The Set of TopicPartitions for a given topic
*/
def fetchTopicPartitions(): Set[TopicPartition] = runUninterruptibly {
assert(Thread.currentThread().isInstanceOf[UninterruptibleThread])
// Poll to get the latest assigned partitions
consumer.poll(0)
val partitions = consumer.assignment()
consumer.pause(partitions)
partitions.asScala.toSet
}
/**
* Fetch the partition offsets for the topic partitions that are indicated
* in the [[ConsumerStrategy]] and [[KafkaOffsetRangeLimit]].
*/
def fetchPartitionOffsets(
offsetRangeLimit: KafkaOffsetRangeLimit,
isStartingOffsets: Boolean): Map[TopicPartition, Long] = {
def validateTopicPartitions(partitions: Set[TopicPartition],
partitionOffsets: Map[TopicPartition, Long]): Map[TopicPartition, Long] = {
assert(partitions == partitionOffsets.keySet,
"If startingOffsets contains specific offsets, you must specify all TopicPartitions.\\n" +
"Use -1 for latest, -2 for earliest.\\n" +
s"Specified: ${partitionOffsets.keySet} Assigned: ${partitions}")
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to $partitionOffsets")
partitionOffsets
}
val partitions = fetchTopicPartitions()
// Obtain TopicPartition offsets with late binding support
offsetRangeLimit match {
case EarliestOffsetRangeLimit => partitions.map {
case tp => tp -> KafkaOffsetRangeLimit.EARLIEST
}.toMap
case LatestOffsetRangeLimit => partitions.map {
case tp => tp -> KafkaOffsetRangeLimit.LATEST
}.toMap
case SpecificOffsetRangeLimit(partitionOffsets) =>
validateTopicPartitions(partitions, partitionOffsets)
case SpecificTimestampRangeLimit(partitionTimestamps) =>
fetchSpecificTimestampBasedOffsets(partitionTimestamps,
failsOnNoMatchingOffset = isStartingOffsets).partitionToOffsets
}
}
/**
* Resolves the specific offsets based on Kafka seek positions.
* This method resolves offset value -1 to the latest and -2 to the
* earliest Kafka seek position.
*
* @param partitionOffsets the specific offsets to resolve
* @param reportDataLoss callback to either report or log data loss depending on setting
*/
def fetchSpecificOffsets(
partitionOffsets: Map[TopicPartition, Long],
reportDataLoss: String => Unit): KafkaSourceOffset = {
val fnAssertParametersWithPartitions: ju.Set[TopicPartition] => Unit = { partitions =>
assert(partitions.asScala == partitionOffsets.keySet,
"If startingOffsets contains specific offsets, you must specify all TopicPartitions.\\n" +
"Use -1 for latest, -2 for earliest, if you don't care.\\n" +
s"Specified: ${partitionOffsets.keySet} Assigned: ${partitions.asScala}")
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to $partitionOffsets")
}
val fnRetrievePartitionOffsets: ju.Set[TopicPartition] => Map[TopicPartition, Long] = { _ =>
partitionOffsets
}
val fnAssertFetchedOffsets: Map[TopicPartition, Long] => Unit = { fetched =>
partitionOffsets.foreach {
case (tp, off) if off != KafkaOffsetRangeLimit.LATEST &&
off != KafkaOffsetRangeLimit.EARLIEST =>
if (fetched(tp) != off) {
reportDataLoss(
s"startingOffsets for $tp was $off but consumer reset to ${fetched(tp)}")
}
case _ =>
// no real way to check that beginning or end is reasonable
}
}
fetchSpecificOffsets0(fnAssertParametersWithPartitions, fnRetrievePartitionOffsets,
fnAssertFetchedOffsets)
}
def fetchSpecificTimestampBasedOffsets(
partitionTimestamps: Map[TopicPartition, Long],
failsOnNoMatchingOffset: Boolean): KafkaSourceOffset = {
val fnAssertParametersWithPartitions: ju.Set[TopicPartition] => Unit = { partitions =>
assert(partitions.asScala == partitionTimestamps.keySet,
"If starting/endingOffsetsByTimestamp contains specific offsets, you must specify all " +
s"topics. Specified: ${partitionTimestamps.keySet} Assigned: ${partitions.asScala}")
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to $partitionTimestamps")
}
val fnRetrievePartitionOffsets: ju.Set[TopicPartition] => Map[TopicPartition, Long] = { _ => {
val converted = partitionTimestamps.map { case (tp, timestamp) =>
tp -> java.lang.Long.valueOf(timestamp)
}.asJava
val offsetForTime: ju.Map[TopicPartition, OffsetAndTimestamp] =
consumer.offsetsForTimes(converted)
offsetForTime.asScala.map { case (tp, offsetAndTimestamp) =>
if (failsOnNoMatchingOffset) {
assert(offsetAndTimestamp != null, "No offset matched from request of " +
s"topic-partition $tp and timestamp ${partitionTimestamps(tp)}.")
}
if (offsetAndTimestamp == null) {
tp -> KafkaOffsetRangeLimit.LATEST
} else {
tp -> offsetAndTimestamp.offset()
}
}.toMap
}
}
val fnAssertFetchedOffsets: Map[TopicPartition, Long] => Unit = { _ => }
fetchSpecificOffsets0(fnAssertParametersWithPartitions, fnRetrievePartitionOffsets,
fnAssertFetchedOffsets)
}
private def fetchSpecificOffsets0(
fnAssertParametersWithPartitions: ju.Set[TopicPartition] => Unit,
fnRetrievePartitionOffsets: ju.Set[TopicPartition] => Map[TopicPartition, Long],
fnAssertFetchedOffsets: Map[TopicPartition, Long] => Unit): KafkaSourceOffset = {
val fetched = partitionsAssignedToConsumer {
partitions => {
fnAssertParametersWithPartitions(partitions)
val partitionOffsets = fnRetrievePartitionOffsets(partitions)
partitionOffsets.foreach {
case (tp, KafkaOffsetRangeLimit.LATEST) =>
consumer.seekToEnd(ju.Arrays.asList(tp))
case (tp, KafkaOffsetRangeLimit.EARLIEST) =>
consumer.seekToBeginning(ju.Arrays.asList(tp))
case (tp, off) => consumer.seek(tp, off)
}
partitionOffsets.map {
case (tp, _) => tp -> consumer.position(tp)
}
}
}
fnAssertFetchedOffsets(fetched)
KafkaSourceOffset(fetched)
}
/**
* Fetch the earliest offsets for the topic partitions that are indicated
* in the [[ConsumerStrategy]].
*/
def fetchEarliestOffsets(): Map[TopicPartition, Long] = partitionsAssignedToConsumer(
partitions => {
logDebug("Seeking to the beginning")
consumer.seekToBeginning(partitions)
val partitionOffsets = partitions.asScala.map(p => p -> consumer.position(p)).toMap
logDebug(s"Got earliest offsets for partition : $partitionOffsets")
partitionOffsets
}, fetchingEarliestOffset = true)
/**
* Fetch the latest offsets for the topic partitions that are indicated
* in the [[ConsumerStrategy]].
*
* Kafka may return earliest offsets when we are requesting latest offsets if `poll` is called
* right before `seekToEnd` (KAFKA-7703). As a workaround, we will call `position` right after
* `poll` to wait until the potential offset request triggered by `poll(0)` is done.
*
* In addition, to avoid other unknown issues, we also use the given `knownOffsets` to audit the
* latest offsets returned by Kafka. If we find some incorrect offsets (a latest offset is less
* than an offset in `knownOffsets`), we will retry at most `maxOffsetFetchAttempts` times. When
* a topic is recreated, the latest offsets may be less than offsets in `knownOffsets`. We cannot
* distinguish this with KAFKA-7703, so we just return whatever we get from Kafka after retrying.
*/
def fetchLatestOffsets(
knownOffsets: Option[PartitionOffsetMap]): PartitionOffsetMap =
partitionsAssignedToConsumer { partitions => {
logDebug("Seeking to the end.")
if (knownOffsets.isEmpty) {
consumer.seekToEnd(partitions)
partitions.asScala.map(p => p -> consumer.position(p)).toMap
} else {
var partitionOffsets: PartitionOffsetMap = Map.empty
/**
* Compare `knownOffsets` and `partitionOffsets`. Returns all partitions that have incorrect
* latest offset (offset in `knownOffsets` is great than the one in `partitionOffsets`).
*/
def findIncorrectOffsets(): Seq[(TopicPartition, Long, Long)] = {
var incorrectOffsets = ArrayBuffer[(TopicPartition, Long, Long)]()
partitionOffsets.foreach { case (tp, offset) =>
knownOffsets.foreach(_.get(tp).foreach { knownOffset =>
if (knownOffset > offset) {
val incorrectOffset = (tp, knownOffset, offset)
incorrectOffsets += incorrectOffset
}
})
}
incorrectOffsets
}
// Retry to fetch latest offsets when detecting incorrect offsets. We don't use
// `withRetriesWithoutInterrupt` to retry because:
//
// - `withRetriesWithoutInterrupt` will reset the consumer for each attempt but a fresh
// consumer has a much bigger chance to hit KAFKA-7703.
// - Avoid calling `consumer.poll(0)` which may cause KAFKA-7703.
var incorrectOffsets: Seq[(TopicPartition, Long, Long)] = Nil
var attempt = 0
do {
consumer.seekToEnd(partitions)
partitionOffsets = partitions.asScala.map(p => p -> consumer.position(p)).toMap
attempt += 1
incorrectOffsets = findIncorrectOffsets()
if (incorrectOffsets.nonEmpty) {
logWarning("Found incorrect offsets in some partitions " +
s"(partition, previous offset, fetched offset): $incorrectOffsets")
if (attempt < maxOffsetFetchAttempts) {
logWarning("Retrying to fetch latest offsets because of incorrect offsets")
Thread.sleep(offsetFetchAttemptIntervalMs)
}
}
} while (incorrectOffsets.nonEmpty && attempt < maxOffsetFetchAttempts)
logDebug(s"Got latest offsets for partition : $partitionOffsets")
partitionOffsets
}
}
}
/**
* Fetch the earliest offsets for specific topic partitions.
* The return result may not contain some partitions if they are deleted.
*/
def fetchEarliestOffsets(
newPartitions: Seq[TopicPartition]): Map[TopicPartition, Long] = {
if (newPartitions.isEmpty) {
Map.empty[TopicPartition, Long]
} else {
partitionsAssignedToConsumer(partitions => {
// Get the earliest offset of each partition
consumer.seekToBeginning(partitions)
val partitionOffsets = newPartitions.filter { p =>
// When deleting topics happen at the same time, some partitions may not be in
// `partitions`. So we need to ignore them
partitions.contains(p)
}.map(p => p -> consumer.position(p)).toMap
logDebug(s"Got earliest offsets for new partitions: $partitionOffsets")
partitionOffsets
}, fetchingEarliestOffset = true)
}
}
private def partitionsAssignedToConsumer(
body: ju.Set[TopicPartition] => Map[TopicPartition, Long],
fetchingEarliestOffset: Boolean = false)
: Map[TopicPartition, Long] = runUninterruptibly {
withRetriesWithoutInterrupt {
// Poll to get the latest assigned partitions
consumer.poll(0)
val partitions = consumer.assignment()
if (!fetchingEarliestOffset) {
// Call `position` to wait until the potential offset request triggered by `poll(0)` is
// done. This is a workaround for KAFKA-7703, which an async `seekToBeginning` triggered by
// `poll(0)` may reset offsets that should have been set by another request.
partitions.asScala.map(p => p -> consumer.position(p)).foreach(_ => {})
}
consumer.pause(partitions)
logDebug(s"Partitions assigned to consumer: $partitions.")
body(partitions)
}
}
/**
* This method ensures that the closure is called in an [[UninterruptibleThread]].
* This is required when communicating with the [[KafkaConsumer]]. In the case
* of streaming queries, we are already running in an [[UninterruptibleThread]],
* however for batch mode this is not the case.
*/
private def runUninterruptibly[T](body: => T): T = {
if (!Thread.currentThread.isInstanceOf[UninterruptibleThread]) {
val future = Future {
body
}(execContext)
ThreadUtils.awaitResult(future, Duration.Inf)
} else {
body
}
}
/**
* Helper function that does multiple retries on a body of code that returns offsets.
* Retries are needed to handle transient failures. For e.g. race conditions between getting
* assignment and getting position while topics/partitions are deleted can cause NPEs.
*
* This method also makes sure `body` won't be interrupted to workaround a potential issue in
* `KafkaConsumer.poll`. (KAFKA-1894)
*/
private def withRetriesWithoutInterrupt(
body: => Map[TopicPartition, Long]): Map[TopicPartition, Long] = {
// Make sure `KafkaConsumer.poll` won't be interrupted (KAFKA-1894)
assert(Thread.currentThread().isInstanceOf[UninterruptibleThread])
synchronized {
var result: Option[Map[TopicPartition, Long]] = None
var attempt = 1
var lastException: Throwable = null
while (result.isEmpty && attempt <= maxOffsetFetchAttempts
&& !Thread.currentThread().isInterrupted) {
Thread.currentThread match {
case ut: UninterruptibleThread =>
// "KafkaConsumer.poll" may hang forever if the thread is interrupted (E.g., the query
// is stopped)(KAFKA-1894). Hence, we just make sure we don't interrupt it.
//
// If the broker addresses are wrong, or Kafka cluster is down, "KafkaConsumer.poll" may
// hang forever as well. This cannot be resolved in KafkaSource until Kafka fixes the
// issue.
ut.runUninterruptibly {
try {
result = Some(body)
} catch {
case NonFatal(e) =>
lastException = e
logWarning(s"Error in attempt $attempt getting Kafka offsets: ", e)
attempt += 1
Thread.sleep(offsetFetchAttemptIntervalMs)
resetConsumer()
}
}
case _ =>
throw new IllegalStateException(
"Kafka APIs must be executed on a o.a.spark.util.UninterruptibleThread")
}
}
if (Thread.interrupted()) {
throw new InterruptedException()
}
if (result.isEmpty) {
assert(attempt > maxOffsetFetchAttempts)
assert(lastException != null)
throw lastException
}
result.get
}
}
private def stopConsumer(): Unit = synchronized {
assert(Thread.currentThread().isInstanceOf[UninterruptibleThread])
if (_consumer != null) _consumer.close()
}
private def resetConsumer(): Unit = synchronized {
stopConsumer()
_consumer = null // will automatically get reinitialized again
}
}
|
jkbradley/spark
|
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReader.scala
|
Scala
|
apache-2.0
| 19,511
|
package org.janzhou.native
import com.sun.jna._
trait libpmem extends Library {
def pmem_map(fd:Int):Pointer
def pmem_unmap(pmemaddr:Pointer, pmem_len:Long):Int
def pmem_is_pmem(pmemaddr:Pointer, pmem_len:Long):Int
def pmem_persist(addr:Pointer, len:Long):Unit
def pmem_msync(addr:Pointer, len:Long):Int
def pmem_flush(addr:Pointer, len:Long):Unit
def pmem_drain():Unit
def pmem_has_hw_drain():Int
def pmem_memmove_persist(pmemdest:Pointer, src:Pointer, len:Int):Pointer
def pmem_memcpy_persist(pmemdest:Pointer, src:Pointer, len:Int):Pointer
def pmem_memset_persist(pmemdest:Pointer, c:Int, len:Int):Pointer
def pmem_memmove_nodrain(pmemdest:Pointer, src:Pointer, len:Int):Pointer
def pmem_memcpy_nodrain(pmemdest:Pointer, src:Pointer, len:Int):Pointer
def pmem_memset_nodrain(pmemdest:Pointer, c:Int, len:Int):Pointer
}
object libpmem {
private var _libpmem:libpmem = null
def run():libpmem = {
if ( _libpmem == null ) {
_libpmem = Native.loadLibrary("pmem", classOf[libpmem]).asInstanceOf[libpmem]
}
_libpmem
}
def call:libpmem = run()
}
|
janzhou/scala-native
|
src/main/scala/org/janzhou/native/libpmem.scala
|
Scala
|
apache-2.0
| 1,099
|
// lchannels - session programming in Scala
// Copyright (c) 2016, Alceste Scalas and Imperial College London
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/** @author Alceste Scalas <alceste.scalas@imperial.ac.uk> */
package lchannels.examples.sleepingbarber.customer
import scala.concurrent.duration.Duration
import com.typesafe.scalalogging.StrictLogging
import lchannels._
import lchannels.examples.sleepingbarber.barbershop
//////////////////////////////////////////////////////////////////////////////
// Session type:
// S_cust = ?Full.end & ?Seat.?Ready.S_cut
// where S_cut = !Description(String).?Haircut.!Pay.end
//////////////////////////////////////////////////////////////////////////////
sealed abstract class WaitingRoom
case class Full() extends WaitingRoom
case class Seat()(val cont: In[Ready]) extends WaitingRoom
case class Ready()(val cont: Out[Description])
case class Description(text: String)(val cont: Out[Cut])
case class Cut()(val cont: Out[Pay])
case class Pay(amount: Int)
////////////////////////////////////////////////////////////////////////////
class Customer(name: String, shop: barbershop.Shop)
(implicit d: Duration) extends Runnable with StrictLogging {
private def logTrace(msg: String) = logger.trace(f"${name}: ${msg}")
private def logDebug(msg: String) = logger.debug(f"${name}: ${msg}")
private def logInfo(msg: String) = logger.info(f"${name}: ${msg}")
private def logWarn(msg: String) = logger.warn(f"${name}: ${msg}")
private def logError(msg: String) = logger.error(f"${name}: ${msg}")
// Own thread
private val thread = { val t = new Thread(this); t.start(); t }
def join() = thread.join()
override def run(): Unit = {
logInfo("started, entering in shop")
loop()
logInfo("leaving the shop")
}
private def loop(): Unit = {
shop.enter() ? {
case Full() => {
logInfo("waiting is room full, will retry within 3 seconds")
Thread.sleep(new scala.util.Random().nextInt(30) * 100)
loop()
}
case m @ Seat() => {
logInfo("got a seat, waiting...")
m.cont ? { ready =>
logInfo("barber is ready, describing cut")
(ready.cont !! Description("Fancy hairdo")_) ? { cut =>
logInfo("cut done, paying")
cut.cont ! Pay(42)
}
}
}
}
}
}
|
scribble/scribble.github.io
|
src/main/jbake/assets/docs/lchannels/examples/src/main/scala/lchannels/examples/sleepingbarber/Customer.scala
|
Scala
|
apache-2.0
| 3,692
|
package io.taig.android.util.syntax
import android.graphics.Bitmap
import io.taig.android.util.operation
import scala.language.implicitConversions
trait bitmap {
implicit def utilBitmapSyntax(bitmap: Bitmap): operation.bitmap = {
new operation.bitmap(bitmap)
}
}
object bitmap extends bitmap
|
Taig/Toolbelt
|
util/src/main/scala/io/taig/android/util/syntax/bitmap.scala
|
Scala
|
mit
| 304
|
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.objectives
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.time._
import scala.util.hashing._
final class PeriodicTrigger(override val builder: PeriodicTriggerBuilder,
override val seed: InstanceSeed)
extends BinaryTriggerObjective[PeriodicTriggerBuilder](
ObjectiveEvaluationResult.Neutral
) {
require(builder != null && seed != null)
val period
: TimeSpan = builder.period
private var clock
: Timer = Timer(period)
override protected def doEvaluate(optimizer: OptimizerLike,
runBeginIterationNo: Long,
runBeginTime: Timestamp,
runNoSamples: Long,
model: Module,
batch: Batch,
output: Tensor,
value: Real)
: Boolean = clock.resetIfElapsed(period)
// ---------------------------------------------------------------------------
// State management.
// ---------------------------------------------------------------------------
override def state
: ObjectiveState = PeriodicTriggerState(super.state, clock)
override def restoreState(state: InstanceState): Unit = {
super.restoreState(state.parent)
state match {
case state: PeriodicTriggerState =>
clock = state.clock.copy
case _ =>
throw new MatchError(state)
}
}
}
final class PeriodicTriggerBuilder
extends BinaryTriggerObjectiveBuilder[PeriodicTriggerBuilder] {
override def repr
: PeriodicTriggerBuilder = this
private var _period
: TimeSpan = TimeSpan.oneMinute
def period
: TimeSpan = _period
def period_=(value: TimeSpan): Unit = {
require(value >= TimeSpan.zero)
_period = value
}
def setPeriod(value: TimeSpan)
: PeriodicTriggerBuilder = {
period_=(value)
this
}
def setPeriod(value: Real)
: PeriodicTriggerBuilder = setPeriod(TimeSpan(value))
override protected def doToString()
: List[Any] = _period :: super.doToString()
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), _period.hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[PeriodicTriggerBuilder]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: PeriodicTriggerBuilder =>
_period == other._period
case _ =>
false
})
override protected def doCopy()
: PeriodicTriggerBuilder = PeriodicTriggerBuilder()
override def copyTo(other: InstanceBuilder)
: Unit = {
super.copyTo(other)
other match {
case other: PeriodicTriggerBuilder =>
other._period = _period
case _ =>
}
}
override def build(seed: InstanceSeed)
: PeriodicTrigger = new PeriodicTrigger(this, seed)
}
object PeriodicTriggerBuilder {
final def apply()
: PeriodicTriggerBuilder = new PeriodicTriggerBuilder
final def apply(period: Real)
: PeriodicTriggerBuilder = apply().setPeriod(period)
final def apply(period: TimeSpan)
: PeriodicTriggerBuilder = apply().setPeriod(period)
}
final case class PeriodicTriggerState(override val parent: InstanceState,
clock: Timer)
extends ObjectiveState {
}
|
bashimao/ltudl
|
blaze/src/main/scala/edu/latrobe/blaze/objectives/PeriodicTrigger.scala
|
Scala
|
apache-2.0
| 4,164
|
object Super {
class C { def ++=(x: Int) = () }
class D extends C { override def ++=(x: Int) = super.++=(x) }
class T1 { def foo: Int = ??? }
trait T2
new T1 with T2 {
override def foo: Int = super.foo
}
trait T3 { def foo: Unit }
trait T4 extends T3 {
this: C =>
abstract override def foo: Unit = super.foo
}
}
|
mdemarne/scalahost
|
tests/src/test/resources/ScalaToMeta/Super/Original.scala
|
Scala
|
bsd-3-clause
| 343
|
package com.campudus.tableaux.database.domain
import com.campudus.tableaux.database.model.TableauxModel._
import org.vertx.scala.core.json._
case class RawRow(
id: RowId,
rowLevelAnnotations: RowLevelAnnotations,
cellLevelAnnotations: CellLevelAnnotations,
values: Seq[_]
)
case class Row(
table: Table,
id: RowId,
rowLevelAnnotations: RowLevelAnnotations,
cellLevelAnnotations: CellLevelAnnotations,
values: Seq[_]
) extends DomainObject {
override def getJson: JsonObject = {
val json = Json.obj(
"id" -> id,
"values" -> compatibilityGet(values)
)
if (rowLevelAnnotations.isDefined) {
json.mergeIn(rowLevelAnnotations.getJson)
}
if (cellLevelAnnotations.isDefined) {
json.mergeIn(cellLevelAnnotations.getJson)
}
json
}
}
case class RowSeq(rows: Seq[Row], page: Page = Page(Pagination(None, None), None)) extends DomainObject {
override def getJson: JsonObject = {
Json.obj(
"page" -> compatibilityGet(page),
"rows" -> (rows map (_.getJson))
)
}
}
case class DependentRows(table: Table, column: ColumnType[_], rows: Seq[JsonObject]) extends DomainObject {
override def getJson: JsonObject = {
Json.obj(
"table" -> table.getJson,
"column" -> compatibilityGet(column),
"rows" -> compatibilityGet(rows)
)
}
}
case class DependentRowsSeq(dependentRowsSeq: Seq[DependentRows]) extends DomainObject {
override def getJson: JsonObject = Json.obj("dependentRows" -> compatibilityGet(dependentRowsSeq))
}
|
campudus/tableaux
|
src/main/scala/com/campudus/tableaux/database/domain/row.scala
|
Scala
|
apache-2.0
| 1,558
|
package com.ignition.frame.mllib
import scala.xml.{ Elem, Node }
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.sql.{ DataFrame, Row }
import org.apache.spark.sql.types.StructType
import org.json4s.JValue
import org.json4s.JsonDSL._
import org.json4s.jvalue2monadic
import com.ignition.frame.{ FrameTransformer, SparkRuntime }
import com.ignition.types.{ RichStructType, double, fieldToRichStruct, long }
import com.ignition.util.JsonUtils.RichJValue
import com.ignition.util.XmlUtils.RichNodeSeq
/**
* Calculates column-based statistics using MLLib library.
*
* @author Vlad Orzhekhovskiy
*/
case class ColumnStats(dataFields: Iterable[String], groupFields: Iterable[String] = Nil)
extends FrameTransformer with MLFunctions {
import ColumnStats._
def add(fields: String*) = copy(dataFields = dataFields ++ fields)
def %(fields: String*) = add(fields: _*)
def groupBy(fields: String*) = copy(groupFields = fields)
protected def compute(arg: DataFrame)(implicit runtime: SparkRuntime): DataFrame = {
val df = optLimit(arg, runtime.previewMode)
val rdd = toVectors(df, dataFields, groupFields)
rdd.persist
val keys = rdd.keys.distinct.collect
val rows = keys map { key =>
val slice = rdd filter (_._1 == key) values
val st = Statistics.colStats(slice)
val data = (0 until dataFields.size) flatMap { idx =>
Seq(st.max(idx), st.min(idx), st.mean(idx), st.numNonzeros(idx),
st.variance(idx), st.normL1(idx), st.normL2(idx))
}
Row.fromSeq((key.toSeq :+ st.count) ++ data)
}
val targetRDD = ctx.sparkContext.parallelize(rows)
val targetFields = ((groupFields map df.schema.apply toSeq) :+ long("count")) ++
dataFields.zipWithIndex.flatMap {
case (name, idx) => double(s"${name}_max") ~ double(s"${name}_min") ~
double(s"${name}_mean") ~ double(s"${name}_non0") ~ double(s"${name}_variance") ~
double(s"${name}_normL1") ~ double(s"${name}_normL2")
}
val schema = StructType(targetFields)
ctx.createDataFrame(targetRDD, schema)
}
def toXml: Elem =
<node>
<aggregate>
{
dataFields map { name => <field name={ name }/> }
}
</aggregate>
{
if (!groupFields.isEmpty)
<group-by>
{ groupFields map (f => <field name={ f }/>) }
</group-by>
}
</node>.copy(label = tag)
def toJson: org.json4s.JValue = {
val groupBy = if (groupFields.isEmpty) None else Some(groupFields)
val aggregate = dataFields map (_.toString)
("tag" -> tag) ~ ("groupBy" -> groupBy) ~ ("aggregate" -> aggregate)
}
}
/**
* Columns Stats companion object.
*/
object ColumnStats {
val tag = "column-stats"
def apply(dataFields: String*): ColumnStats = apply(dataFields, Nil)
def fromXml(xml: Node) = {
val dataFields = (xml \ "aggregate" \ "field") map { _ \ "@name" asString }
val groupFields = (xml \ "group-by" \ "field") map (_ \ "@name" asString)
apply(dataFields, groupFields)
}
def fromJson(json: JValue) = {
val dataFields = (json \ "aggregate" asArray) map (_ asString)
val groupFields = (json \ "groupBy" asArray) map (_ asString)
apply(dataFields, groupFields)
}
}
|
uralian/ignition
|
src/main/scala/com/ignition/frame/mllib/ColumnStats.scala
|
Scala
|
apache-2.0
| 3,323
|
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import monix.execution.ExecutionModel
import scala.concurrent.duration.TimeUnit
import scala.concurrent.{ExecutionContext, Future}
/** The `TracingScheduler` is a [[monix.execution.Scheduler Scheduler]]
* implementation that wraps another
* [[monix.execution.schedulers.SchedulerService SchedulerService]]
* reference, with the purpose of propagating the
* [[monix.execution.misc.Local.Context Local.Context]] on async
* execution.
*
* @param underlying the
* [[monix.execution.schedulers.SchedulerService SchedulerService]]
* in charge of the actual execution and scheduling
*/
final class TracingSchedulerService(underlying: SchedulerService)
extends TracingScheduler.Base(underlying)
with SchedulerService { self =>
override def isShutdown: Boolean =
underlying.isShutdown
override def isTerminated: Boolean =
underlying.isTerminated
override def shutdown(): Unit =
underlying.shutdown()
override def awaitTermination(timeout: Long, unit: TimeUnit, awaitOn: ExecutionContext): Future[Boolean] =
underlying.awaitTermination(timeout, unit, awaitOn)
override def withExecutionModel(em: ExecutionModel): TracingSchedulerService =
new TracingSchedulerService(underlying.withExecutionModel(em))
}
object TracingSchedulerService {
/** Builds a [[TracingScheduler]] instance, wrapping the
* `underlying` scheduler given.
*/
def apply(underlying: SchedulerService): TracingSchedulerService =
new TracingSchedulerService(underlying)
}
|
Wogan/monix
|
monix-execution/shared/src/main/scala/monix/execution/schedulers/TracingSchedulerService.scala
|
Scala
|
apache-2.0
| 2,226
|
package com.thangiee.lolhangouts.ui.core
import android.content.Intent
trait TIntent {
implicit class IntentOp(i: Intent) {
def args(arguments: (String, Any)*): Intent = {
for ((k, v) ← arguments) {
v match {
case v: String => i.putExtra(k, v)
case v: Int => i.putExtra(k, v)
case v: Double => i.putExtra(k, v)
case v: Float => i.putExtra(k, v)
case v: Boolean => i.putExtra(k, v)
case v: Serializable => i.putExtra(k, v)
}
}
i
}
}
}
|
Thangiee/LoL-Hangouts
|
src/com/thangiee/lolhangouts/ui/core/TIntent.scala
|
Scala
|
apache-2.0
| 586
|
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.util
import Predef.{ any2stringadd => _, _ => _ }
import scala.collection.{ Map, Set }
package object map {
implicit class RichMap[K, V](val map: Map[K, V]) extends AnyVal {
/**
* Map.mapValues is notoriously inconsistent and returns a View
* rather than a solid implementation, this is what you thought it
* did.
*/
def mapValuesEagerly[W](f: V => W): Map[K, W] = map.map {
case (k, v) => (k, f(v))
}
}
// I'm sure CanBuildFrom could make this general to all value containers
implicit class RichMultiMapSet[K, V](val map: Map[K, Set[V]]) extends AnyVal {
/**
* Treating `map` as a multimap, merge with another similarly
* structured object removing duplicate values.
*/
def merge(other: Map[K, Set[V]]): Map[K, Set[V]] = {
import collection.mutable
val builder = new mutable.HashMap[K, mutable.Set[V]]
with mutable.MultiMap[K, V]
builder ++= map.mapValuesEagerly { v =>
v.to[mutable.Set]
}
for {
(k, vs) <- other
v <- vs
} builder.addBinding(k, v)
builder.map {
case (k, vs) => (k, vs.toSet)
}(collection.breakOut)
}
}
}
|
yyadavalli/ensime-server
|
util/src/main/scala/org/ensime/util/map.scala
|
Scala
|
gpl-3.0
| 1,345
|
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.internal
import cats.effect.Sync
import cats.syntax.all._
import monix.execution.internal.collection.ChunkedArrayStack
import monix.tail.Iterant
import monix.tail.Iterant.{Concat, Halt, Last, Next, NextBatch, NextCursor, Scope, Suspend}
import monix.tail.batches.BatchCursor
private[tail] object IterantHeadOptionL {
/**
* Implementation for `Iterant#headOption`.
*/
def apply[F[_], A](source: Iterant[F, A])
(implicit F: Sync[F]): F[Option[A]] = {
source match {
case Next(a, _) => F.pure(Some(a))
case Last(a) => F.pure(Some(a))
case _ =>
F.suspend(new Loop[F, A].apply(source))
}
}
private final class Loop[F[_], A](implicit F: Sync[F])
extends Iterant.Visitor[F, A, F[Option[A]]] {
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// Used in visit(Concat)
private[this] var stackRef: ChunkedArrayStack[F[Iterant[F, A]]] = _
private def stackPush(item: F[Iterant[F, A]]): Unit = {
if (stackRef == null) stackRef = ChunkedArrayStack()
stackRef.push(item)
}
private def stackPop(): F[Iterant[F, A]] = {
if (stackRef != null) stackRef.pop()
else null.asInstanceOf[F[Iterant[F, A]]]
}
private[this] val concatContinue: (Option[A] => F[Option[A]]) = {
case None =>
stackPop() match {
case null => F.pure(None)
case xs => xs.flatMap(this)
}
case some =>
F.pure(some)
}
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def visit(ref: Next[F, A]): F[Option[A]] =
F.pure(Some(ref.item))
def visit(ref: NextBatch[F, A]): F[Option[A]] =
processCursor(ref.batch.cursor(), ref.rest)
def visit(ref: NextCursor[F, A]): F[Option[A]] =
processCursor(ref.cursor, ref.rest)
def visit(ref: Suspend[F, A]): F[Option[A]] =
ref.rest.flatMap(this)
def visit(ref: Concat[F, A]): F[Option[A]] = {
stackPush(ref.rh)
ref.lh.flatMap(this).flatMap(concatContinue)
}
def visit[S](ref: Scope[F, S, A]): F[Option[A]] =
ref.runFold(this)
def visit(ref: Last[F, A]): F[Option[A]] =
F.pure(Some(ref.item))
def visit(ref: Halt[F, A]): F[Option[A]] =
ref.e match {
case Some(e) => F.raiseError(e)
case None => F.pure(None)
}
def fail(e: Throwable): F[Option[A]] =
F.raiseError(e)
private def processCursor(cursor: BatchCursor[A], rest: F[Iterant[F, A]]): F[Option[A]] = {
if (cursor.hasNext())
F.pure(Some(cursor.next()))
else
rest.flatMap(this)
}
}
}
|
Wogan/monix
|
monix-tail/shared/src/main/scala/monix/tail/internal/IterantHeadOptionL.scala
|
Scala
|
apache-2.0
| 3,302
|
package com.twitter.finagle
import com.twitter.finagle.util.LoadService
import com.twitter.util.{Closable, Future, Time}
import java.net.{InetSocketAddress, SocketAddress}
import java.util.logging.Logger
import scala.collection.mutable
/**
* Indicates that an [[com.twitter.finagle.Announcer]] was not found for the
* given `scheme`.
*
* Announcers are discovered via Finagle's [[com.twitter.finagle.util.LoadService]]
* mechanism. These exceptions typically suggest that there are no libraries
* on the classpath that define an Announcer for the given scheme.
*/
class AnnouncerNotFoundException(scheme: String)
extends Exception("Announcer not found for scheme \\"%s\\"".format(scheme))
/**
* Indicates that multiple [[com.twitter.finagle.Announcer Announcers]] were
* discovered for given `scheme`.
*
* Announcers are discovered via Finagle's [[com.twitter.finagle.util.LoadService]]
* mechanism. These exceptions typically suggest that there are multiple
* libraries on the classpath with conflicting scheme definitions.
*/
class MultipleAnnouncersPerSchemeException(announcers: Map[String, Seq[Announcer]])
extends NoStacktrace
{
override def getMessage = {
val msgs = announcers map { case (scheme, rs) =>
"%s=(%s)".format(scheme, rs.map(_.getClass.getName).mkString(", "))
} mkString(" ")
"Multiple announcers defined: %s".format(msgs)
}
}
/**
* Indicates that a forum string passed to an [[com.twitter.finagle.Announcer]]
* was invalid according to the forum grammar [1].
*
* [1] http://twitter.github.io/finagle/guide/Names.html
*/
class AnnouncerForumInvalid(forum: String)
extends Exception("Announcer forum \\"%s\\" is not valid".format(forum))
trait Announcement extends Closable {
def close(deadline: Time) = unannounce()
def unannounce(): Future[Unit]
}
trait ProxyAnnouncement extends Announcement with Proxy {
val forums: List[String]
}
trait Announcer {
val scheme: String
def announce(addr: InetSocketAddress, name: String): Future[Announcement]
}
object Announcer {
private[this] lazy val announcers = {
val announcers = LoadService[Announcer]()
val log = Logger.getLogger(getClass.getName)
val dups = announcers groupBy(_.scheme) filter { case (_, rs) => rs.size > 1 }
if (dups.size > 0) throw new MultipleAnnouncersPerSchemeException(dups)
for (r <- announcers)
log.info("Announcer[%s] = %s(%s)".format(r.scheme, r.getClass.getName, r))
announcers
}
def get[T <: Announcer](clazz: Class[T]): Option[T] =
announcers find { _.getClass isAssignableFrom clazz } map { _.asInstanceOf[T] }
private[this] val _announcements = mutable.Set[(InetSocketAddress, List[String])]()
def announcements = synchronized { _announcements.toSet }
def announce(addr: InetSocketAddress, forum: String): Future[Announcement] = {
val announcement = forum.split("!", 2) match {
case Array(scheme, name) =>
announcers.find(_.scheme == scheme) match {
case Some(announcer) => announcer.announce(addr, name)
case None => Future.exception(new AnnouncerNotFoundException(scheme))
}
case _ =>
Future.exception(new AnnouncerForumInvalid(forum))
}
announcement map { ann =>
val lastForums = ann match {
case a: ProxyAnnouncement => a.forums
case _ => Nil
}
val proxyAnnouncement = new ProxyAnnouncement {
val self = ann
def unannounce() = ann.unannounce()
val forums = forum :: lastForums
}
synchronized {
_announcements -= ((addr, lastForums))
_announcements += ((addr, proxyAnnouncement.forums))
}
proxyAnnouncement
}
}
}
|
travisbrown/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/Announcer.scala
|
Scala
|
apache-2.0
| 3,701
|
/**
* Copyright 2013 Gianluca Amato <gamato@unich.it>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.targets.jvmsoot
import it.unich.jandom.targets._
import soot.SootMethod
import soot.Scene
import soot.jimple.toolkits.callgraph.CHATransformer
import soot.jimple.toolkits.callgraph.Sources
import soot.jimple.toolkits.callgraph.TopologicalOrderer
/**
* This is just a tag for interpretations which should be used in the analysis of Soot
* methods. The input in a Soot interpretation has the following format:
* - if the method is not static, the first dimension corresponds to `this`
* - each further dimension corresponds to a parameter in the order of declaration
* The output of a Soot interpretation has the same correspondence between dimensions
* and signature, with the addition of a last parameters corresponding to the return type
* if this is different from `void`.
*/
trait SootInterpretation[Tgt <: SootCFG[Tgt,_], Params <: Parameters[Tgt]] extends Interpretation[Tgt, Params] {
}
/**
* A `TopSootInterpretation` always returns the top abstract element of the correct type for
* each input.
* @param params the parameters for the analysis
*/
class TopSootInterpretation[Tgt <: SootCFG[Tgt, _], Params <: Parameters[Tgt]](val params: Params) extends SootInterpretation[Tgt, Params] {
def apply(method: SootMethod, input: params.Property): params.Property = params.domain.top(SootCFG.outputTypes(method))
}
/**
* A `JimpleInterpretation` tries to return the semantics of a method by recursively analyzing its body.
* It does not handle recursion, so it generates an exception if recursion is detected. It should only
* be used for testing purposes. It only supports the target `JimpleMethod` for now.
* @param params the parameters for the analysis
* @throw IllegalArgumentException if recursive definitions are detected
*/
class JimpleInterpretation[Params <: Parameters[JimpleMethod]](val params: Params) extends SootInterpretation[JimpleMethod, Params] {
/**
* It maps each pair `(method, input)` with the pair `(output,rec)`. The first time a pair `(method, input)`
* is encountered, the value `(bottom, false)` is put in `inte`. Then, the body of the method is analyzed.
* If during the analysis the pair `(method, input)` is required again, execution stops with an exception,
* otherwise `(output, true)` is saved into the map.
*/
private val inte = scala.collection.mutable.HashMap[(SootMethod, params.Property), (params.Property, Boolean)]()
private val jmethodCache = scala.collection.mutable.HashMap[SootMethod, JimpleMethod]()
def apply(method: SootMethod, input: params.Property): params.Property = {
if (inte contains ((method, input))) inte((method, input)) match {
case (output, true) => output
case (output, false) => throw new IllegalArgumentException("Recursive")
}
else {
inte((method, input)) = (params.domain.bottom(SootCFG.outputTypes(method)), false)
val jmethod = jmethodCache.getOrElseUpdate(method, new JimpleMethod(method))
val ann = jmethod.analyzeFromInput(params)(input)
val output = jmethod.extractOutput(params)(ann)
inte((method, input)) = (output, true)
output
}
}
}
/**
* A `JimpleRecursiveInterpretation` tries to return the semantics of methods by a summary based analysis.
* The semantics of all methods is initialized to top for every possible input, then methods are analyzed
* with a work-list based approach. Each method has a single possible input context, which is top.
*/
class JimpleRecursiveInterpretation[Params <: Parameters[JimpleMethod]](scene: Scene, val params: Params) extends Interpretation[JimpleMethod, Params] {
import scala.collection.JavaConversions._
val inte = scala.collection.mutable.HashMap[SootMethod, params.Property]()
val targets = scala.collection.mutable.HashMap[SootMethod, Option[JimpleMethod]]()
def apply(method: SootMethod, input: params.Property): params.Property = {
if (inte contains method)
inte(method)
else {
val bottom = params.domain.bottom(SootCFG.outputTypes(method))
inte(method) = bottom
bottom
}
}
def compute(method: SootMethod, input: params.Property) {
val l = new java.util.LinkedList[SootMethod]()
l.add(method)
scene.setEntryPoints(l)
CHATransformer.v().transform()
val cg = scene.getCallGraph()
val tpo = new TopologicalOrderer(cg)
tpo.go()
val order = tpo.order().reverse // it is enough to get the set of all the elements
if (order.isEmpty()) order.add(method)
for (m <- order; if !(targets contains m)) {
targets(m) = if (m.isConcrete()) Some(new JimpleMethod(m)) else None
inte(m) = params.domain.bottom(SootCFG.outputTypes(m))
}
val worklist = scala.collection.mutable.Queue[SootMethod](order.toSeq: _*)
while (!worklist.isEmpty) {
val m = worklist.dequeue
val jmethod = targets(m)
val top = params.domain.top(SootCFG.inputTypes(m))
val output = jmethod match {
case None => inte(m)
case Some(jmethod) => {
val ann = jmethod.analyzeFromInput(params)(top)
jmethod.extractOutput(params)(ann)
}
}
if (!(inte(m) >= output)) {
inte(m) = inte(m) widening output
val sources = new Sources(cg.edgesInto(m)).asInstanceOf[java.util.Iterator[SootMethod]]
worklist.enqueue(sources.toSeq: _*)
}
}
}
override def toString = inte.toString
}
|
rubino22/JDBeta
|
core/src/main/scala/it/unich/jandom/targets/jvmsoot/SootInterpretation.scala
|
Scala
|
lgpl-3.0
| 6,191
|
package sorm.test.general
import org.scalatest.{FunSuite, Matchers}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import sorm._
import sorm.test.MultiInstanceSuite
@RunWith(classOf[JUnitRunner])
class TutorialSuite extends FunSuite with Matchers with MultiInstanceSuite {
import TutorialSuite._
def entities
= Set() +
Entity[Artist]() +
Entity[Genre]() +
Entity[Locale](unique = Set() + Seq("code"))
instancesAndIds foreach { case (db, dbId) =>
// create locales:
val ru = db.save( Locale("ru") )
val en = db.save( Locale("en") )
// create genres:
val rock = db.save( Genre( Map( en -> Seq("Rock"),
ru -> Seq("Рок") ) ) )
val hardRock = db.save( Genre( Map( en -> Seq("Hard Rock"),
ru -> Seq("Тяжёлый рок",
"Тяжелый рок") ) ) )
val metal = db.save( Genre( Map( en -> Seq("Metal"),
ru -> Seq("Метал") ) ) )
val grunge = db.save( Genre( Map( en -> Seq("Grunge"),
ru -> Seq("Грандж") ) ) )
// create artists:
db.save( Artist( Map( en -> Seq("Metallica"),
ru -> Seq("Металика", "Металлика") ),
Set( metal, rock, hardRock ) ) )
db.save( Artist( Map( en -> Seq("Nirvana"),
ru -> Seq("Нирвана") ),
Set( rock, hardRock, grunge ) ) )
db.save( Artist( Map( en -> Seq("Kino"),
ru -> Seq("Кино") ),
Set( rock ) ) )
db.save( Artist( Map( en -> Seq("The Rolling Stones",
"Rolling Stones",
"Rolling Stones, The"),
ru -> Seq("Ролинг Стоунз",
"Роллинг Стоунз",
"Роллинг Стоунс",
"Ролинг Стоунс") ),
Set( rock ) ) )
db.save( Artist( Map( en -> Seq("Dire Straits"),
ru -> Seq("Даэр Стрэйтс") ),
Set( rock ) ) )
db.save( Artist( Map( en -> Seq("Godsmack"),
ru -> Seq("Годсмэк") ),
Set( metal, hardRock, rock ) ) )
// All artists having a genre equaling to the value of the `metal` variable,
// which we've previously declared.
// The result type is `Stream[Artist with Persisted]`
val metalArtists = db.query[Artist].whereContains("genres", metal).fetch()
// All artists having a genre that contains "Hard Rock" of a locale with a
// code "en" in a list of its names.
// The result type is `Stream[Artist with Persisted]`
val hardRockArtists
= db.query[Artist]
.whereEqual("genres.item.names.value.item", "Hard Rock")
.whereEqual("genres.item.names.key.code", "en")
.fetch()
test(dbId + " - metal artists"){
metalArtists.flatMap(_.names.values.flatten) should (
contain("Metallica") and contain("Godsmack") and not contain("Kino")
)
}
test(dbId + " - hard rock artists"){
hardRockArtists.flatMap(_.names.values.flatten) should (
contain("Nirvana") and contain("Metallica") and contain("Godsmack")
and not contain("Dire Straits")
)
}
}
}
object TutorialSuite {
case class Artist
( names : Map[Locale, Seq[String]],
genres : Set[Genre] )
case class Genre
( names : Map[Locale, Seq[String]] )
case class Locale
( code : String )
}
|
pjfanning/sorm
|
src/test/scala/sorm/test/general/TutorialSuite.scala
|
Scala
|
mit
| 3,829
|
package org.locationtech.geomesa.plugin
import org.apache.wicket.behavior.SimpleAttributeModifier
import org.apache.wicket.markup.html.form.{Form, FormComponent}
import org.apache.wicket.markup.html.panel.Panel
import org.apache.wicket.model.{IModel, ResourceModel}
import org.geoserver.web.data.store.StoreEditPanel
import org.geoserver.web.data.store.panel.{ParamPanel, PasswordParamPanel, TextParamPanel}
import org.geoserver.web.util.MapModel
import org.geotools.data.DataAccessFactory.Param
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
abstract class GeoMesaStoreEditPanel (componentId: String, storeEditForm: Form[_])
extends StoreEditPanel(componentId, storeEditForm) {
def addTextPanel(paramsModel: IModel[_], param: Param): FormComponent[_] = {
val paramName = param.key
val resourceKey = getClass.getSimpleName + "." + paramName
val required = param.required
val textParamPanel =
new TextParamPanel(paramName,
new MapModel(paramsModel, paramName).asInstanceOf[IModel[_]],
new ResourceModel(resourceKey, paramName), required)
addPanel(textParamPanel, param, resourceKey)
}
def addPasswordPanel(paramsModel: IModel[_], param: Param): FormComponent[_] = {
val paramName = param.key
val resourceKey = getClass.getSimpleName + "." + paramName
val required = param.required
val passParamPanel =
new PasswordParamPanel(paramName,
new MapModel(paramsModel, paramName).asInstanceOf[IModel[_]],
new ResourceModel(resourceKey, paramName), required)
addPanel(passParamPanel, param, resourceKey)
}
def addPanel(paramPanel: Panel with ParamPanel, param: Param, resourceKey: String): FormComponent[_] = {
paramPanel.getFormComponent.setType(classOf[String])
val defaultTitle = String.valueOf(param.description)
val titleModel = new ResourceModel(resourceKey + ".title", defaultTitle)
val title = String.valueOf(titleModel.getObject)
paramPanel.add(new SimpleAttributeModifier("title", title))
add(paramPanel)
paramPanel.getFormComponent
}
}
|
jwkessi/geomesa
|
geomesa-plugin/src/main/scala/org/locationtech/geomesa/plugin/GeoMesaStoreEditPanel.scala
|
Scala
|
apache-2.0
| 2,641
|
package wdl
import better.files.File
import org.scalatest.{FlatSpec, Matchers}
class WdlWorkflowImportsSpec extends FlatSpec with Matchers {
def addAndGetFile(name: String, source: String): String = {
val tempFile = File.newTemporaryFile(s"$name", ".wdl", Option(wdlDirectory)) write source
tempFile.name
}
private val wdlDirectory = File.newTemporaryDirectory("imports_dir")
val echoHelloWdl =
"""
|task inItalian {
| String x = "ciao"
| command {
| echo "${x}"
| }
|}
|
|task inSpanish {
| String x = "hola"
| command {
| echo "${x}"
| }
|}
|
|task inFrench {
| String x = "bonjour"
| command {
| echo "${x}"
| }
|}
|
|workflow echoHello {}
""".stripMargin
val echoHelloWdlFile = addAndGetFile("echoHello", echoHelloWdl)
val basicWdl =
s"""
|import "$echoHelloWdlFile"
""".stripMargin +
"""
|
|task ls {
| command {
| ls -l
| }
| output {
| String fileList = read_string(stdout())
| }
|}
|
|task pwd {
| command {
| pwd
| }
| output {
| String current = read_string(stdout())
| }
|}
|
|workflow basic {
| call ls
| call pwd
| output {
| ls.fileList
| }
|}
""".stripMargin
val printNumsWdl =
s"""
|import "$echoHelloWdlFile" as multilingualEcho
|
""".stripMargin +
"""
|task ls {
| command {
| ls
| }
|}
|
|
|task print1 {
| Int x = 10
| command {
| for i in `seq 1 ${x}`
| do
| echo $i
| done
| }
|}
|
|task print2 {
| Int x = 20
| command {
| for i in `seq 1 ${x}`
| do
| echo $i
| done
| }
|}
|
|task print3 {
| Int x = 30
| command {
| for i in `seq 1 ${x}`
| do
| echo $i
| done
| }
|}
""".stripMargin
val snoozeWdl =
"""
|task sleep {
| command { sleep 1 }
|
|}
|task sleep2 {
| command { sleep 2 }
|}
|
|task sleep3 {
| command { sleep 3 }
|}
""".stripMargin
val threeStepWdl =
"""
|task ps {
| command {
| ps
| }
| output {
| File procs = stdout()
| }
|}
|
|task cgrep {
| String pattern
| File in_file
| command {
| grep '${pattern}' ${in_file} | wc -l
| }
| output {
| Int count = read_int(stdout())
| }
| runtime {docker: "ubuntu:latest"}
|}
|
|task wc {
| File in_file
| command {
| cat ${in_file} | wc -l
| }
| output {
| Int count = read_int(stdout())
| }
| runtime {docker: "ubuntu:latest"}
|}
|
|workflow three_step {
| call ps
| call cgrep { input: in_file=ps.procs }
| call wc { input: in_file=ps.procs }
|}
""".stripMargin
val threeStepWdlWithImports =
s"""
|import "$echoHelloWdlFile" as funEcho
|
""".stripMargin + threeStepWdl
val basicWdlImportFile = addAndGetFile("basic", basicWdl)
val threeStepWdlImportFile = addAndGetFile("threestep", threeStepWdlWithImports)
val snoozeWdlImportFile = addAndGetFile("snooze", snoozeWdl)
val printNumsWdlImportFile = addAndGetFile("printNums", printNumsWdl)
def noExtension(fileName: String) = fileName.replace(".wdl","")
val imports =
s"""
|import "$basicWdlImportFile"
|import "$threeStepWdlImportFile" as classicThreeStep
|import "$snoozeWdlImportFile" as trySleep
|import "$printNumsWdlImportFile"
|
""".stripMargin
val primaryWorkflow =
s"""
|
|task testCaseTask {
| command {
| echo "Ruchi's birthday: 01/19"
| }
|}
|
|workflow testCases {
| call ${noExtension(basicWdlImportFile)}.ls as ls1
| call ${noExtension(printNumsWdlImportFile)}.ls as ls2
|
| #call ${noExtension(basicWdlImportFile)}.pwd as soBasic
|
| #call ${noExtension(printNumsWdlImportFile)}.print1
| #call ${noExtension(printNumsWdlImportFile)}.print2 as printingFun
|
|
| call classicThreeStep.ps
| call classicThreeStep.ps as psAgain
|
| call trySleep.sleep
| call trySleep.sleep2 as sleepMore
|
| call ${noExtension(basicWdlImportFile)}.${noExtension(echoHelloWdlFile)}.inFrench
| call classicThreeStep.funEcho.inSpanish
| call classicThreeStep.funEcho.inSpanish as inPortugese
| call ${noExtension(printNumsWdlImportFile)}.multilingualEcho.inItalian
|
|}
|""".stripMargin
val wdlWithImports = imports + primaryWorkflow
val namespace = {
val resolvers: Seq[ImportResolver] = Seq(WdlNamespace.directoryResolver(wdlDirectory), WdlNamespace.fileResolver)
WdlNamespaceWithWorkflow.load(wdlWithImports, resolvers).get
}
"WDL file with imports" should "Have 1 task (remaining tasks are in separate namespaces)" in {
namespace.tasks.size shouldEqual 1
}
it should "Have 4 imported WdlNamespaces" in {
namespace.namespaces.size shouldEqual 4
}
it should "import a WDL file (with alias) and be able to reference its tasks by FQN" in {
namespace.resolve("classicThreeStep.ps").size shouldEqual 1
}
it should "import a WDL file (with no alias) and be able to reference its tasks by FQN" in {
namespace.resolve(s"${noExtension(printNumsWdlImportFile)}.print1").size shouldEqual 1
}
it should "import two WDL file (with clashing task names) and be able to reference all tasks by FQN" in {
val clashingTaskNames = Seq(namespace.resolve(s"${noExtension(basicWdlImportFile)}.ls"),
namespace.resolve(s"${noExtension(printNumsWdlImportFile)}.ls"))
clashingTaskNames.size shouldEqual 2
}
def deleteTempFiles() = wdlDirectory.delete(swallowIOExceptions = true)
deleteTempFiles()
}
|
ohsu-comp-bio/cromwell
|
wdl/src/test/scala/wdl/WdlWorkflowImportsSpec.scala
|
Scala
|
bsd-3-clause
| 6,302
|
package eu.shiftforward.icfpc2015.model
import scala.collection.mutable
case class PowerPhrase(text: List[Char]) {
val movements = text.map(Command.char)
val length = text.length
}
object PowerPhrase {
def apply(text: String): PowerPhrase = new PowerPhrase(text.toList)
val knownPhrases = List(
PowerPhrase("Ei!"), // from statement
PowerPhrase("Ia! Ia!"), // from problem 3 grid
PowerPhrase("R'lyeh"), // from problem 5 grid
PowerPhrase("Yuggoth"), // from problem 7 grid
PowerPhrase("Tsathoggua"), // https://twitter.com/ICFPContest2015/status/630300070236139520
PowerPhrase("Yoyodyne"), // https://twitter.com/ICFPContest2015/status/630393114331459588, https://en.wikipedia.org/wiki/The_Adventures_of_Buckaroo_Banzai_Across_the_8th_Dimension
PowerPhrase("Blue Hades"), // https://twitter.com/ICFPContest2015/status/629956402031624192, https://laundry-game.obsidianportal.com/wikis/blue-hades
PowerPhrase("The Laundry"),
PowerPhrase("Case Nightmare Green")) // https://twitter.com/ICFPContest2015/status/630645065304576000
def getMatchings(source: Seq[Command], powerphrases: Seq[PowerPhrase]): Map[PowerPhrase, List[Int]] = {
var matching = List[(Int, PowerPhrase)]()
var matched = Map[PowerPhrase, List[Int]]()
for (i <- source.indices) {
val command = source(i)
powerphrases.foreach { p =>
matching = matching :+ (0, p)
}
matching = matching.foldLeft(List[(Int, PowerPhrase)]()) {
case (acc, (idx, power)) =>
if (command.action == power.movements(idx).action) {
if (idx + 1 == power.movements.length) {
val startingIdx = i - power.movements.length + 1
matched = matched.updated(power, matched.getOrElse(power, List[Int]()) :+ startingIdx)
acc
} else {
acc :+ (idx + 1, power)
}
} else {
acc
}
}
}
matched
}
def flatten(sourceLength: Int, matchings: Map[PowerPhrase, List[Int]]): Map[Int, PowerPhrase] = {
val sortedMatches = matchings.toList.sortWith {
case ((powerA, idxsA), (powerB, idxsB)) =>
if (idxsA.length == idxsB.length)
powerA.movements.length > powerB.movements.length
else
idxsA.length > idxsB.length
}
val matchingsSortedNoOverlaps = sortedMatches.map {
case (power, idxs) =>
val movementLength = power.movements.length
val newIndexes = idxs.tail.foldLeft(List(idxs.head)) {
case (acc, i) =>
if (acc.last + movementLength <= i) {
acc :+ i
} else {
acc
}
}
(power, newIndexes)
}
var freePositions = List.fill(sourceLength)(true)
matchingsSortedNoOverlaps.foldLeft(Map[Int, PowerPhrase]()) {
case (acc, (power, idxs)) =>
val succesfulPlacements = mutable.ArrayBuffer[(Int, PowerPhrase)]()
idxs foreach { i =>
val valid = freePositions.slice(i, i + power.length - 1).reduce(_ && _)
if (valid) {
succesfulPlacements += (i -> power)
for (j <- 0 until power.length) {
freePositions = freePositions.updated(i + j, false)
}
}
}
acc ++ succesfulPlacements.toMap
}
}
def getBestString(source: Seq[Command], powerPhrases: Seq[PowerPhrase] = knownPhrases): String = {
val matchings = getMatchings(source, powerPhrases)
val finalMatchings = flatten(source.length, matchings)
var result = ""
var i = 0
while (i < source.length) {
finalMatchings.get(i) match {
case None =>
result += source(i).ch
i += 1
case Some(power) =>
result += power.text.mkString("")
i += power.text.length
}
}
result
}
}
|
ShiftForward/icfpc2015
|
src/main/scala/eu/shiftforward/icfpc2015/model/PowerPhrase.scala
|
Scala
|
mit
| 3,873
|
package com.example
object Decoration extends App {
trait Logger {
def debug(msg: String)
def error(msg: String)
}
def log[A, B](logger: Logger)(fn: A => B): A => B = {
input =>
try {
logger.debug(s"Start. <= $input")
val start = System.nanoTime
val output = fn(input)
val time = (System.nanoTime - start) / 1000000
logger.debug(s"End $time ms. => $output")
output
} catch {
case e: Exception =>
logger.error(e.toString)
throw e
}
}
trait DB {
def begin
def commit
def rollback
def select(sql: String)
}
class TestDB extends DB {
def begin = println("[begin]")
def commit = println("[commit]")
def rollback = println("[rollback]")
def select(sql: String) = println(s"[select] $sql")
}
def transaction[T](fn: DB => T): DB => T = {
db =>
try {
db.begin
val result = fn(db)
db.commit
result
} catch {
case e: Exception =>
db.rollback
throw e
}
}
class TestLogger extends Logger {
def debug(msg: String) = println(s"[Debug] $msg")
def error(msg: String) = println(s"[Error] $msg")
}
def testLogger = new TestLogger
def testDB = new TestDB
val selectPerson = log(testLogger) {
transaction {
db =>
db.select("select * from person")
Thread.sleep(103)
//throw new RuntimeException("Error")
"Alex Smith"
}
}
selectPerson(testDB)
}
|
enpassant/miniatures
|
src/main/scala/com/example/Composition.scala
|
Scala
|
apache-2.0
| 1,539
|
package com.gx.chain.oo
/**
* Copyright 2017 josephguan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Handler Interface
*/
abstract class RequestHandler(next: RequestHandler) {
def handleRequest(req: Request): Unit = {
if (next != null) {
next.handleRequest(req)
} else {
println("No one is responsible for this request.")
}
}
}
/**
* Developer is a concrete handler
*/
class Developer(next: RequestHandler) extends RequestHandler(next) {
override def handleRequest(req: Request): Unit = req match {
case FixBugRequest(desc) =>
println(s"I am a developer. I can fix this bug: $desc")
req.markHandled()
case _ =>
super.handleRequest(req)
}
}
/**
* Architect is a concrete handler
*/
class Architect(next: RequestHandler) extends RequestHandler(next) {
override def handleRequest(req: Request): Unit = req match {
case FeatureRequest(desc) =>
println(s"I am an architect. I can implement this feature: $desc")
req.markHandled()
case _ =>
super.handleRequest(req)
}
}
/**
* CTO is a concrete handler
*/
class CTO(next: RequestHandler) extends RequestHandler(next) {
override def handleRequest(req: Request): Unit = req match {
case ProductRequest(desc) =>
println(s"I am a CTO. I can make this product: $desc")
req.markHandled()
case _ =>
super.handleRequest(req)
}
}
|
josephguan/scala-design-patterns
|
behavioral/chain-of-responsibility/src/main/scala/com/gx/chain/oo/RequestHandler.scala
|
Scala
|
apache-2.0
| 1,946
|
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
// scalastyle:off magic.number multiple.string.literals
import org.junit.Test
import org.scalastyle.file.CheckerTest
import org.scalastyle.file.FileLengthChecker
class CommentFilterDisabledTest extends CheckerTest {
val key = "file.size.limit"
val classUnderTest = classOf[FileLengthChecker]
@Test def testOne(): Unit = {
val source = """
// scalastyle:off
package foobar
object Foobar {
}
object Barbar {
}
"""
assertErrors(List(fileError(List("5"))), source, Map("maxFileLength" -> "5"), commentFilter = false)
}
}
|
scalastyle/scalastyle
|
src/test/scala/org/scalastyle/CommentFilterDisabledTest.scala
|
Scala
|
apache-2.0
| 1,303
|
package com.airbnb.scheduler.api
import java.util.logging.{Level, Logger}
import javax.ws.rs._
import javax.ws.rs.core.{MediaType, Response}
import javax.ws.rs.core.Response.Status
import scala.Array
import com.airbnb.scheduler.config.SchedulerConfiguration
import scala.collection.mutable.ListBuffer
import com.airbnb.scheduler.jobs._
import com.airbnb.scheduler.graph.JobGraph
import com.google.inject.Inject
import com.codahale.metrics.annotation.Timed
import com.fasterxml.jackson.databind.ObjectMapper
import scala.collection.JavaConversions._
/**
* The REST API to the PerformanceResource component of the API.
* @author Matt Redmond (matt.redmond@airbnb.com)
* Returns a list of jobs, sorted by percentile run times.
*/
@Path(PathConstants.allStatsPath)
@Produces(Array(MediaType.APPLICATION_JSON))
class StatsResource @Inject()(
val jobScheduler: JobScheduler,
val jobGraph: JobGraph,
val configuration: SchedulerConfiguration,
val jobMetrics: JobMetrics) {
private[this] val log = Logger.getLogger(getClass.getName)
@Timed
@GET
// Valid arguments are
// /scheduler/stats/99thPercentile
// /scheduler/stats/98thPercentile
// /scheduler/stats/95thPercentile
// /scheduler/stats/75thPercentile
// /scheduler/stats/median
// /scheduler/stats/mean
def getPerf(@PathParam("percentile") percentile: String): Response = {
try {
var output = ListBuffer[Map[String, Any]]()
var jobs = ListBuffer[(String, Double)]()
val mapper = new ObjectMapper()
for (jobNameString <- jobGraph.dag.vertexSet()) {
val node = mapper.readTree(jobMetrics.getJsonStats(jobNameString))
if (node.has(percentile) && node.get(percentile) != null) {
val time = node.get(percentile).asDouble()
jobs.append((jobNameString, time))
}
}
jobs = jobs.sortBy(_._2).reverse
for ( (jobNameString, time) <- jobs) {
val myMap = Map("jobNameLabel" -> jobNameString, "time" -> time / 1000.0)
output.append(myMap)
}
Response.ok(output).build
} catch {
case ex: Exception => {
log.log(Level.WARNING, "Exception while serving request", ex)
throw new WebApplicationException(Status.INTERNAL_SERVER_ERROR)
}
}
}
}
|
doronin/chronos
|
src/main/scala/com/airbnb/scheduler/api/StatsResource.scala
|
Scala
|
apache-2.0
| 2,405
|
package org.igye.jfxutils.dialog
import org.junit.{Assert, Test}
class TextFieldVarNameAutocompleteTest {
@Test
def extractPartsAndFilterTest(): Unit = {
var res = TextFieldVarNameAutocomplete.extractPartsAndFilter("text1${varname}text2", 8)
Assert.assertEquals("text1${", res.left)
Assert.assertEquals("v", res.filter)
Assert.assertEquals("}text2", res.right)
res = TextFieldVarNameAutocomplete.extractPartsAndFilter("text1${v}text2", 8)
Assert.assertEquals("text1${", res.left)
Assert.assertEquals("v", res.filter)
Assert.assertEquals("}text2", res.right)
res = TextFieldVarNameAutocomplete.extractPartsAndFilter("text1${}text2", 7)
Assert.assertEquals("text1${", res.left)
Assert.assertEquals("", res.filter)
Assert.assertEquals("}text2", res.right)
res = TextFieldVarNameAutocomplete.extractPartsAndFilter("text1$}text2", 7)
Assert.assertEquals("text1$}", res.left)
Assert.assertEquals("", res.filter)
Assert.assertEquals("text2", res.right)
res = TextFieldVarNameAutocomplete.extractPartsAndFilter("text1$}text2", 6)
Assert.assertEquals("text1$", res.left)
Assert.assertEquals("", res.filter)
Assert.assertEquals("}text2", res.right)
res = TextFieldVarNameAutocomplete.extractPartsAndFilter("text1$}text2", 5)
Assert.assertEquals("text1", res.left)
Assert.assertEquals("", res.filter)
Assert.assertEquals("$}text2", res.right)
res = TextFieldVarNameAutocomplete.extractPartsAndFilter("abcd", 1)
Assert.assertEquals("a", res.left)
Assert.assertEquals("", res.filter)
Assert.assertEquals("bcd", res.right)
}
}
|
Igorocky/jfxutils
|
src/test/scala/org/igye/jfxutils/dialog/TextFieldVarNameAutocompleteTest.scala
|
Scala
|
mit
| 1,757
|
package grammarcomp
/**
* File: GrammarComparator.scala
* Date: 20/5/2013
* Author: Mikaël Mayer
* Purpose: Compares two grammars
*/
package repair
import grammar._
import CFGrammar._
import EBNFGrammar._
import generators._
import parsing._
import equivalence._
import utils._
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.MultiMap
import scala.collection.mutable.HashMap
object RepairResult {
sealed trait CNFFeedback[T]
sealed trait GrammarFeedback[T]
sealed trait BNFFeedback
case class NoRepair[T](reason: String) extends GrammarFeedback[T] with CNFFeedback[T] with BNFFeedback {
override def toString = reason
}
//CNF repair feedback
case class CNFRemove[T](r: Rule[T], repairRule : Option[Rule[T]] = None) extends CNFFeedback[T] {
override def toString = " Remove rule " + r
}
case class CNFAdd[T](l: List[Rule[T]]) extends CNFFeedback[T] {
override def toString = " Add rules " + l.mkString("\\n")
}
case class CNFSplit[T](old: Rule[T], news: List[Rule[T]], repRule: Rule[T]) extends CNFFeedback[T] {
override def toString = " Replace \\n" + old + "\\n by \\n" + news.mkString("\\n")
}
case class CNFExpand[T](old: Rule[T], news: List[Rule[T]]) extends CNFFeedback[T] {
override def toString = " Expand \\n" + old + "\\n as \\n" + news.mkString("\\n")
}
//Grammar feedback
case class AddAllRules[T](l: List[Rule[T]]) extends GrammarFeedback[T] {
override def toString = " Add rules " + rulesToStr(l)
}
case class RemoveRules[T](l: List[Rule[T]], repRules : Option[List[Rule[T]]] = None) extends GrammarFeedback[T] {
override def toString = " Remove rules " + rulesToStr(l)
}
case class RefineRules[T](olds: List[Rule[T]], news: List[Rule[T]], repRules: List[Rule[T]]) extends GrammarFeedback[T] {
override def toString = " Refine \\n" + rulesToStr(olds) + "\\n by replacing it by \\n" + rulesToStr(news)
}
case class ExpandRules[T](olds: List[Rule[T]], news: List[Rule[T]]) extends GrammarFeedback[T] {
override def toString = " Expand \\n" + rulesToStr(olds) + "\\n by replacing it by \\n" + rulesToStr(news)
}
def cnfToGrammarFeedbacks[T](oldg: Grammar[T], newg: Grammar[T], cnfFBs: List[CNFFeedback[T]]) = cnfFBs map {
case CNFAdd(rules) =>
val newrules = CNFConverter.removeCNFNonterminals(newg, rules)
AddAllRules(newrules)
case CNFRemove(rule, None) =>
val newrules = CNFConverter.removeCNFNonterminals(oldg, List(rule))
RemoveRules(newrules)
case CNFRemove(rule, Some(repRule)) =>
val newrules = CNFConverter.removeCNFNonterminals(oldg, List(rule))
val repRules = CNFConverter.removeCNFNonterminals(oldg, List(repRule))
RemoveRules(newrules, Some(repRules))
case CNFSplit(iold, inews, repRule) =>
val nolds = CNFConverter.removeCNFNonterminals(oldg, List(iold))
val nnews = CNFConverter.removeCNFNonterminals(newg, inews)
val nreps = CNFConverter.removeCNFNonterminals(newg, List(repRule))
RefineRules(nolds, nnews, nreps)
case CNFExpand(iold, inews) =>
val nolds = CNFConverter.removeCNFNonterminals(oldg, List(iold))
val nnews = CNFConverter.removeCNFNonterminals(newg, inews)
ExpandRules(nolds, nnews)
case n@NoRepair(_) => n
}
//BNF feedback
//TODO: do not know how to generate this
/*case class AddAllBNFRules(l: List[BNFRule], s: String) extends BNFFeedback {
override def toString = s + l.mkString("\\n")
}
case class RemoveBNFRules(l: List[BNFRule], s: String) extends BNFFeedback {
override def toString = s + l.mkString("\\n")
}
case class ReplaceBNFRules(old: List[BNFRule], nu: List[BNFRule], s: String) extends BNFFeedback {
override def toString = s + " replace \\n" + old.mkString("\\n") + "\\n by \\n" + nu.mkString("\\n")
}*/
/*def toBNFFeedback(feedback: List[CNFFeedback], cnfG: Grammar) = {
val genSymbols = CNFConverter.generatedSymbols
val cnfContext = CNFConverter.enclosingContext
val genRegexp = BNFConverter.generatedRegExp
val bnfContext = BNFConverter.enclosingContext
//println("Generated Regexp: "+genRegexp.mkString("\\n"))
//println("Generated Symbols: "+genSymbols.mkString("\\n"))
def cnfToGrammarRule(l: Rule[T]): Rule[T] = {
val rightSide = l.rightSide.flatMap {
case k: Nonterminal => genSymbols.getOrElse(k, List(k))
case k => List(k)
}
val leftSide = l.leftSide
cnfContext.getOrElse(leftSide, (l: List[Symbol]) => Rule[T](leftSide, rightSide))(rightSide)
}
def grammarToBNFrule(l: Rule[T]): BNFRule = {
val rightSide = RegConcat(l.rightSide.map {
case k: Symbol => {
val re = genRegexp.getOrElse[RegExp](k, RegId(k.name): RegExp)
//println("Regex generated: "+re+" contains: "+genRegexp.contains(k))
re
}
case k => RegId(k.name)
})
//println("Generated right side for rule: "+ rightSide)
val leftSide = l.leftSide
bnfContext.getOrElse(leftSide, (k: RegExp) => BNFRule(RegId(leftSide.name), k))(rightSide)
}
val finalfeedback: List[BNFFeedback] = for (f <- feedback.toList) yield {
f match {
case c @ Comment(s) => c: BNFFeedback
case AddAllRules(l, comment, d) =>
val bnfFixes = (l map cnfToGrammarRule map grammarToBNFrule).distinct
AddAllBNFRules(bnfFixes, comment, "")
case RemoveRules(bad, comment, d) =>
val bnfFixes = (bad map cnfToGrammarRule map grammarToBNFrule).distinct
RemoveBNFRules(bnfFixes, comment, "")
case ReplaceRules(old, nu, comment) =>
val bnfOld = (old map cnfToGrammarRule map grammarToBNFrule).distinct
val bnfNew = (nu map cnfToGrammarRule map grammarToBNFrule).distinct
ReplaceBNFRules(bnfOld, bnfNew, comment)
}
}
finalfeedback.toList
}*/
}
|
epfl-lara/GrammarComparison
|
src/main/scala/grammarcomp/repair/RepairResult.scala
|
Scala
|
mit
| 6,001
|
package org.knora.webapi.twirl
import org.knora.webapi.IRI
/**
* Represents a standoff datatype class of an XML tag.
*
* @param datatype the IRI of the standoff datatype class.
* @param attributeName the XML attribute that holds the typed value.
* @param mappingStandoffDataTypeClassElementIri the IRI of the standoff datatype element (to be used to create the element in the triplestore).
*/
case class MappingStandoffDatatypeClass(datatype: IRI, attributeName: String, mappingStandoffDataTypeClassElementIri: IRI)
/**
* Represents an attribute of an XML tag.
*
* @param attributeName the name of the XML attribute.
* @param namespace the namespace of the XML attribute.
* @param standoffProperty the IRI of standoff property the XML attribute is mapped to.
* @param mappingXMLAttributeElementIri the IRI of the attribute element (to be used to create the element in the triplestore).
*/
case class MappingXMLAttribute(attributeName: String, namespace: String, standoffProperty: IRI, mappingXMLAttributeElementIri: IRI)
/**
* Represents an element of an XML to standoff mapping.
*
* @param tagName the name of the XML tag.
* @param namespace the namespace of the XML tag.
* @param className the classname of the XML tag.
* @param standoffClass the IRI of the standoff class the XML tag is mapped to.
* @param attributes the attributes of the XML tag.
* @param standoffDataTypeClass the standoff data type class of the xml tag.
* @param mappingElementIri the IRI of the mapping element (to be used to create the element in the triplestore).
*/
case class MappingElement(tagName: String, namespace: String, className: String, standoffClass: IRI, attributes: Seq[MappingXMLAttribute] = Seq.empty[MappingXMLAttribute], standoffDataTypeClass: Option[MappingStandoffDatatypeClass] = None, mappingElementIri: IRI, separatorRequired: Boolean)
|
musicEnfanthen/Knora
|
webapi/src/main/scala/org/knora/webapi/twirl/Mapping.scala
|
Scala
|
agpl-3.0
| 2,050
|
package com.outr.arango
import com.outr.arango.api.model.ArangoLinkFieldProperties
import scala.concurrent.{ExecutionContext, Future}
class View[D <: Document[D]](val name: String,
val includeAllFields: Boolean,
val fields: Map[Field[_], ArangoLinkFieldProperties],
val collection: Collection[D],
val analyzers: List[Analyzer]) {
lazy val arangoView: ArangoView = collection.graph.arangoDatabase.searchView(name)
collection.graph.add(this)
protected[arango] def create(createView: Boolean)(implicit ec: ExecutionContext): Future[Unit] = for {
_ <- if (createView) {
arangoView.create()
} else {
Future.successful(())
}
_ <- arangoView.update(
includeAllFields = includeAllFields,
links = Some(List(
ViewLink(
collectionName = collection.name,
fields = fields.map {
case (f, p) => f.fieldName -> p
},
analyzers = analyzers
)
)
))
} yield {
()
}
}
|
outr/arangodb-scala
|
driver/src/main/scala/com/outr/arango/View.scala
|
Scala
|
mit
| 1,098
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.web
import com.twitter.app.App
import com.twitter.conversions.time._
import com.twitter.finagle.http.HttpMuxer
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.finagle.{Http, Service, Thrift}
import com.twitter.server.TwitterServer
import com.twitter.util.{Await, Future}
import com.twitter.zipkin.common.mustache.ZipkinMustache
import com.twitter.zipkin.gen.ZipkinQuery
import java.net.InetSocketAddress
import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponse}
trait ZipkinWebFactory { self: App =>
import Handlers._
private[this] val resourceDirs = Map(
"/public/css" -> "text/css",
"/public/img" -> "image/png",
"/public/js" -> "application/javascript",
"/public/templates" -> "text/plain"
)
val webServerPort = flag("zipkin.web.port", new InetSocketAddress(8080), "Listening port for the zipkin web frontend")
val webRootUrl = flag("zipkin.web.rootUrl", "http://localhost:8080/", "Url where the service is located")
val webCacheResources = flag("zipkin.web.cacheResources", false, "cache resources (mustache, static sources, etc)")
val webPinTtl = flag("zipkin.web.pinTtl", 30.days, "Length of time pinned traces should exist")
val queryDest = flag("zipkin.web.query.dest", "127.0.0.1:9411", "Location of the query server")
def newQueryClient(): ZipkinQuery[Future] =
Thrift.newIface[ZipkinQuery[Future]]("ZipkinQuery=" + queryDest())
def newWebServer(
queryClient: ZipkinQuery[Future] = newQueryClient(),
stats: StatsReceiver = DefaultStatsReceiver.scope("zipkin-web")
): Service[HttpRequest, HttpResponse] = {
ZipkinMustache.cache = webCacheResources()
Seq(
("/public/", handlePublic(resourceDirs, webCacheResources())),
("/", addLayout(webRootUrl()) andThen handleIndex(queryClient)),
("/traces/:id", addLayout(webRootUrl()) andThen handleTraces),
("/static", addLayout(webRootUrl()) andThen handleStatic),
("/aggregates", addLayout(webRootUrl()) andThen handleAggregates),
("/api/query", handleQuery(queryClient)),
("/api/services", handleServices(queryClient)),
("/api/spans", requireServiceName andThen handleSpans(queryClient)),
("/api/top_annotations", requireServiceName andThen handleTopAnnotations(queryClient)),
("/api/top_kv_annotations", requireServiceName andThen handleTopKVAnnotations(queryClient)),
("/api/dependencies", handleDependencies(queryClient)),
("/api/dependencies/?:startTime/?:endTime", handleDependencies(queryClient)),
("/api/get/:id", handleGetTrace(queryClient)),
("/api/trace/:id", handleGetTrace(queryClient)),
("/api/is_pinned/:id", handleIsPinned(queryClient)),
("/api/pin/:id/:state", handleTogglePin(queryClient, webPinTtl()))
).foldLeft(new HttpMuxer) { case (m , (p, handler)) =>
val path = p.split("/").toList
val handlePath = path.takeWhile { t => !(t.startsWith(":") || t.startsWith("?:")) }
val suffix = if (p.endsWith("/") || p.contains(":")) "/" else ""
m.withHandler(handlePath.mkString("/") + suffix,
nettyToFinagle andThen
collectStats(stats.scope(handlePath.mkString("-"))) andThen
renderPage andThen
catchExceptions andThen
checkPath(path) andThen
handler)
}
}
}
object Main extends TwitterServer with ZipkinWebFactory {
def main() {
val server = Http.serve(webServerPort(), newWebServer(stats = statsReceiver.scope("zipkin-web")))
onExit { server.close() }
Await.ready(server)
}
}
|
dmmata/zipkin
|
zipkin-web/src/main/scala/com/twitter/zipkin/web/Main.scala
|
Scala
|
apache-2.0
| 4,198
|
// src/main/scala/progscala2/traits/observer/Observer.scala
package progscala2.traits.observer
trait Observer[-State] { // <1>
def receiveUpdate(state: State): Unit
}
trait Subject[State] { // <2>
private var observers: List[Observer[State]] = Nil // <3>
def addObserver(observer:Observer[State]): Unit = // <4>
observers ::= observer // <5>
def notifyObservers(state: State): Unit = // <6>
observers foreach (_.receiveUpdate(state))
}
|
sunilrebel/programming-scala
|
examples/src/main/scala/progscala2/traits/observer/observer.scala
|
Scala
|
mpl-2.0
| 646
|
package com.sksamuel.elastic4s.requests.searches
import scala.language.implicitConversions
case class Highlight(options: HighlightOptions, fields: Iterable[HighlightField])
trait HighlightApi {
def highlightOptions(): HighlightOptions = HighlightOptions()
def highlight(field: String): HighlightField = HighlightField(field)
}
|
stringbean/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/HighlightApi.scala
|
Scala
|
apache-2.0
| 334
|
package io.skysail.api.ddd
/**
* DDD stands for the concepts of "Domain Driven Design".
*
* This sealed trait defines various elements to characterize
* these concepts.
*/
// tag::methods[]
sealed trait DddElement
// end::methods[]
/**
*
* @tparam T the type of the id attribute for this entity, e.g. Int or String
*/
trait Entity[T] extends DddElement {
def id: Option[T]
override def hashCode(): Int = id.hashCode()
override def equals(obj: scala.Any): Boolean = {
if (!obj.isInstanceOf[Entity[T]]) {
return false
}
val otherEntity = obj.asInstanceOf[Entity[T]]
if (id == null || otherEntity.id == null) {
return false
}
if (id.isEmpty || otherEntity.id.isEmpty) {
return false
}
otherEntity.id == id
}
}
trait ValueObject extends DddElement
|
evandor/skysail-server
|
skysail.api/src/io/skysail/api/ddd/ddd.scala
|
Scala
|
apache-2.0
| 822
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.scalnet.layers.core
import org.deeplearning4j.nn.conf.layers.{ ActivationLayer => JActivationLayer }
import org.nd4j.linalg.activations.Activation
/**
* Activation layer
*
* @author Max Pumperla
*/
class ActivationLayer(activation: Activation,
nOut: Option[List[Int]],
nIn: Option[List[Int]],
override val name: String = "")
extends Layer {
override def compile: org.deeplearning4j.nn.conf.layers.Layer =
new JActivationLayer.Builder()
.activation(activation)
.name(name)
.build()
override val outputShape: List[Int] = nOut.getOrElse(List(0))
override val inputShape: List[Int] = nIn.getOrElse(List(0))
override def reshapeInput(newIn: List[Int]): ActivationLayer =
new ActivationLayer(activation, Some(newIn), Some(newIn), name)
}
object ActivationLayer {
def apply(activation: Activation, nOut: Int = 0, nIn: Int = 0, name: String = ""): ActivationLayer =
new ActivationLayer(activation, Some(List(nOut)), Some(List(nIn)), name)
}
|
deeplearning4j/deeplearning4j
|
scalnet/src/main/scala/org/deeplearning4j/scalnet/layers/core/ActivationLayer.scala
|
Scala
|
apache-2.0
| 1,859
|
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
class JavaMarshal(spec: Spec) extends Marshal(spec) {
val javaNullableAnnotation = spec.javaNullableAnnotation.map(pkg => '@' + pkg.split("\\\\.").last)
val javaNonnullAnnotation = spec.javaNonnullAnnotation.map(pkg => '@' + pkg.split("\\\\.").last)
override def typename(tm: MExpr): String = toJavaType(tm, None)
def typename(name: String, ty: TypeDef): String = idJava.ty(name)
override def fqTypename(tm: MExpr): String = toJavaType(tm, spec.javaPackage)
def fqTypename(name: String, ty: TypeDef): String = withPackage(spec.javaPackage, idJava.ty(name))
override def paramType(tm: MExpr): String = toJavaValueType(tm, None)
override def fqParamType(tm: MExpr): String = toJavaValueType(tm, spec.javaPackage)
override def returnType(ret: Option[TypeRef]): String = ret.fold("void")(ty => toJavaValueType(ty.resolved, None))
override def fqReturnType(ret: Option[TypeRef]): String = ret.fold("void")(ty => toJavaValueType(ty.resolved, spec.javaPackage))
override def fieldType(tm: MExpr): String = toJavaValueType(tm, None)
override def fqFieldType(tm: MExpr): String = toJavaValueType(tm, spec.javaPackage)
override def toCpp(tm: MExpr, expr: String): String = throw new AssertionError("direct java to cpp conversion not possible")
override def fromCpp(tm: MExpr, expr: String): String = throw new AssertionError("direct cpp to java conversion not possible")
def references(m: Meta): Seq[SymbolReference] = m match {
case o: MOpaque =>
o match {
case MList => List(ImportRef("java.util.ArrayList"))
case MSet => List(ImportRef("java.util.HashSet"))
case MMap => List(ImportRef("java.util.HashMap"))
case MDate => List(ImportRef("java.util.Date"))
case _ => List()
}
case e if isEnumFlags(e) => List(ImportRef("java.util.EnumSet"))
case _ => List()
}
val interfaceNullityAnnotation = if (spec.cppNnType.nonEmpty) javaNonnullAnnotation else javaNullableAnnotation
def nullityAnnotation(ty: Option[TypeRef]): Option[String] = ty.map(nullityAnnotation).getOrElse(None)
def nullityAnnotation(ty: TypeRef): Option[String] = {
ty.resolved.base match {
case MOptional => javaNullableAnnotation
case p: MPrimitive => None
case m: MDef => m.defType match {
case DInterface => interfaceNullityAnnotation
case DEnum => javaNonnullAnnotation
case DRecord => javaNonnullAnnotation
}
case e: MExtern => e.defType match {
case DInterface => interfaceNullityAnnotation
case DRecord => if(e.java.reference) javaNonnullAnnotation else None
case DEnum => javaNonnullAnnotation
}
case _ => javaNonnullAnnotation
}
}
def isReference(td: TypeDecl) = td.body match {
case i: Interface => true
case r: Record => true
case e: Enum => true
}
def isEnumFlags(m: Meta): Boolean = m match {
case MDef(_, _, _, Enum(_, true)) => true
case MExtern(_, _, _, Enum(_, true), _, _, _, _, _) => true
case _ => false
}
def isEnumFlags(tm: MExpr): Boolean = tm.base match {
case MOptional => isEnumFlags(tm.args.head)
case _ => isEnumFlags(tm.base)
}
private def toJavaValueType(tm: MExpr, packageName: Option[String]): String = {
val name = toJavaType(tm, packageName)
if(isEnumFlags(tm)) s"EnumSet<$name>" else name
}
private def toJavaType(tm: MExpr, packageName: Option[String]): String = {
def args(tm: MExpr) = if (tm.args.isEmpty) "" else tm.args.map(f(_, true)).mkString("<", ", ", ">")
def f(tm: MExpr, needRef: Boolean): String = {
tm.base match {
case MOptional =>
// HACK: We use "null" for the empty optional in Java.
assert(tm.args.size == 1)
val arg = tm.args.head
arg.base match {
case p: MPrimitive => p.jBoxed
case MOptional => throw new AssertionError("nested optional?")
case m => f(arg, true)
}
case e: MExtern => (if(needRef) e.java.boxed else e.java.typename) + (if(e.java.generic) args(tm) else "")
case o =>
val base = o match {
case p: MPrimitive => if (needRef) p.jBoxed else p.jName
case MString => "String"
case MDate => "Date"
case MBinary => "byte[]"
case MOptional => throw new AssertionError("optional should have been special cased")
case MList => "ArrayList"
case MSet => "HashSet"
case MMap => "HashMap"
case d: MDef => withPackage(packageName, idJava.ty(d.name))
case e: MExtern => throw new AssertionError("unreachable")
case p: MParam => idJava.typeParam(p.name)
}
base + args(tm)
}
}
f(tm, false)
}
private def withPackage(packageName: Option[String], t: String) = packageName.fold(t)(_ + "." + t)
}
|
happybits/djinni
|
src/source/JavaMarshal.scala
|
Scala
|
apache-2.0
| 4,978
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.security.AccessController
import java.util.Properties
import javax.security.auth.callback._
import javax.security.auth.Subject
import javax.security.auth.login.AppConfigurationEntry
import scala.collection.Seq
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.utils.JaasTestUtils._
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs
import org.apache.kafka.common.network.Mode
import org.apache.kafka.common.security.auth._
import org.apache.kafka.common.security.plain.PlainAuthenticateCallback
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Test
object SaslPlainSslEndToEndAuthorizationTest {
class TestPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
val saslContext = context.asInstanceOf[SaslAuthenticationContext]
// Verify that peer principal can be obtained from the SSLSession provided in the context
// since we have enabled TLS mutual authentication for the listener
val sslPrincipal = saslContext.sslSession.get.getPeerPrincipal.getName
assertTrue(sslPrincipal.endsWith(s"CN=${TestUtils.SslCertificateCn}"), s"Unexpected SSL principal $sslPrincipal")
saslContext.server.getAuthorizationID match {
case KafkaPlainAdmin =>
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "admin")
case KafkaPlainUser =>
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "user")
case _ =>
KafkaPrincipal.ANONYMOUS
}
}
}
object Credentials {
val allUsers = Map(KafkaPlainUser -> "user1-password",
KafkaPlainUser2 -> KafkaPlainPassword2,
KafkaPlainAdmin -> "broker-password")
}
class TestServerCallbackHandler extends AuthenticateCallbackHandler {
def configure(configs: java.util.Map[String, _], saslMechanism: String, jaasConfigEntries: java.util.List[AppConfigurationEntry]): Unit = {}
def handle(callbacks: Array[Callback]): Unit = {
var username: String = null
for (callback <- callbacks) {
if (callback.isInstanceOf[NameCallback])
username = callback.asInstanceOf[NameCallback].getDefaultName
else if (callback.isInstanceOf[PlainAuthenticateCallback]) {
val plainCallback = callback.asInstanceOf[PlainAuthenticateCallback]
plainCallback.authenticated(Credentials.allUsers(username) == new String(plainCallback.password))
} else
throw new UnsupportedCallbackException(callback)
}
}
def close(): Unit = {}
}
class TestClientCallbackHandler extends AuthenticateCallbackHandler {
def configure(configs: java.util.Map[String, _], saslMechanism: String, jaasConfigEntries: java.util.List[AppConfigurationEntry]): Unit = {}
def handle(callbacks: Array[Callback]): Unit = {
val subject = Subject.getSubject(AccessController.getContext())
val username = subject.getPublicCredentials(classOf[String]).iterator().next()
for (callback <- callbacks) {
if (callback.isInstanceOf[NameCallback])
callback.asInstanceOf[NameCallback].setName(username)
else if (callback.isInstanceOf[PasswordCallback]) {
if (username == KafkaPlainUser || username == KafkaPlainAdmin)
callback.asInstanceOf[PasswordCallback].setPassword(Credentials.allUsers(username).toCharArray)
} else
throw new UnsupportedCallbackException(callback)
}
}
def close(): Unit = {}
}
}
// This test uses SASL callback handler overrides for server connections of Kafka broker
// and client connections of Kafka producers and consumers. Client connections from Kafka brokers
// used for inter-broker communication also use custom callback handlers. The second client used in
// the multi-user test SaslEndToEndAuthorizationTest#testTwoConsumersWithDifferentSaslCredentials uses
// static JAAS configuration with default callback handlers to test those code paths as well.
class SaslPlainSslEndToEndAuthorizationTest extends SaslEndToEndAuthorizationTest {
import SaslPlainSslEndToEndAuthorizationTest._
this.serverConfig.setProperty(s"${listenerName.configPrefix}${KafkaConfig.SslClientAuthProp}", "required")
this.serverConfig.setProperty(BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, classOf[TestPrincipalBuilder].getName)
this.serverConfig.put(KafkaConfig.SaslClientCallbackHandlerClassProp, classOf[TestClientCallbackHandler].getName)
val mechanismPrefix = listenerName.saslMechanismConfigPrefix("PLAIN")
this.serverConfig.put(s"$mechanismPrefix${KafkaConfig.SaslServerCallbackHandlerClassProp}", classOf[TestServerCallbackHandler].getName)
this.producerConfig.put(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS, classOf[TestClientCallbackHandler].getName)
this.consumerConfig.put(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS, classOf[TestClientCallbackHandler].getName)
this.adminClientConfig.put(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS, classOf[TestClientCallbackHandler].getName)
private val plainLogin = s"org.apache.kafka.common.security.plain.PlainLoginModule username=$KafkaPlainUser required;"
this.producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, plainLogin)
this.consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, plainLogin)
this.adminClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, plainLogin)
override protected def kafkaClientSaslMechanism = "PLAIN"
override protected def kafkaServerSaslMechanisms = List("PLAIN")
override val clientPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "user")
override val kafkaPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "admin")
override def jaasSections(kafkaServerSaslMechanisms: Seq[String],
kafkaClientSaslMechanism: Option[String],
mode: SaslSetupMode,
kafkaServerEntryName: String): Seq[JaasSection] = {
val brokerLogin = PlainLoginModule(KafkaPlainAdmin, "") // Password provided by callback handler
val clientLogin = PlainLoginModule(KafkaPlainUser2, KafkaPlainPassword2)
Seq(JaasSection(kafkaServerEntryName, Seq(brokerLogin)),
JaasSection(KafkaClientContextName, Seq(clientLogin))) ++ zkSections
}
// Generate SSL certificates for clients since we are enabling TLS mutual authentication
// in this test for the SASL_SSL listener.
override def clientSecurityProps(certAlias: String): Properties = {
TestUtils.securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, certAlias, TestUtils.SslCertificateCn,
clientSaslProperties, needsClientCert = Some(true))
}
/**
* Checks that secure paths created by broker and acl paths created by AclCommand
* have expected ACLs.
*/
@Test
def testAcls(): Unit = {
TestUtils.verifySecureZkAcls(zkClient, 1)
}
}
|
Chasego/kafka
|
core/src/test/scala/integration/kafka/api/SaslPlainSslEndToEndAuthorizationTest.scala
|
Scala
|
apache-2.0
| 7,758
|
package org.apache.spark.ml.util
import breeze.linalg._
import breeze.numerics._
import org.apache.spark.util.AccumulatorV2
import scala.collection.mutable.ArrayBuffer
/**
* @author sramirez
*/
class MatrixAccumulator(val rows: Int, val cols: Int, sparse: Boolean) extends AccumulatorV2[Matrix[Double], Matrix[Double]] {
def this(m: Matrix[Double]) = {
this(m.rows, m.cols, m.isInstanceOf[CSCMatrix[Double]])
this.accMatrix = m.copy
}
def this(rows: Int, cols: Int, cooMatrix: ArrayBuffer[(Int, Int, Double)]) = {
this(rows, cols, true)
val builder = new CSCMatrix.Builder[Double](rows = rows, cols = cols)
cooMatrix.foreach{ t => builder.add(t._1, t._2, t._3) }
this.accMatrix = builder.result
}
private var accMatrix: Matrix[Double] = if (sparse) CSCMatrix.zeros(rows, cols) else Matrix.zeros(rows, cols)
private var zero: Boolean = true
def reset(): Unit = {
accMatrix = if (sparse) CSCMatrix.zeros(rows, cols) else Matrix.zeros(rows, cols)
zero = true
}
def add(v: Matrix[Double]): Unit = {
if(isZero)
zero = false
accMatrix = accMatrix match {
case bsm: CSCMatrix[Double] => bsm += v.asInstanceOf[CSCMatrix[Double]]
case dsm: DenseMatrix[Double] => dsm += v
}
}
def isZero(): Boolean = zero
def merge(other: AccumulatorV2[Matrix[Double], Matrix[Double]]): Unit = add(other.value)
def value: Matrix[Double] = accMatrix
def copy(): AccumulatorV2[Matrix[Double], Matrix[Double]] = new MatrixAccumulator(accMatrix)
}
|
sramirez/spark-RELIEFFC-fselection
|
src/main/scala/org/apache/spark/ml/util/MatrixAccumulator.scala
|
Scala
|
apache-2.0
| 1,536
|
/*
*
* * Copyright 2015 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*
*/
package org.nd4j.api.linalg
import org.junit.runner.RunWith
import org.nd4j.api.linalg.DSL._
import org.nd4j.linalg.factory.Nd4j
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpec, Matchers}
@RunWith(classOf[JUnitRunner])
class RichNDArraySpec extends FlatSpec with Matchers {
"RichNDArray" should "use the apply method to access values" in {
// -- 2D array
val nd2 = Nd4j.create(Array[Double](1, 2, 3, 4), Array(4, 1))
nd2(0) should be(1)
nd2(3, 0) should be(4)
// -- 3D array
val nd3 = Nd4j.create(Array[Double](1, 2, 3, 4, 5, 6, 7, 8), Array(2, 2, 2))
nd3(0, 0, 0) should be(1)
nd3(1, 1, 1) should be(8)
}
it should "use transpose abbreviation" in {
val nd1 = Nd4j.create(Array[Double](1, 2, 3), Array(3, 1))
nd1.shape should equal(Array(3, 1))
val nd1t = nd1.T
nd1t.shape should equal(Array(1,3))
}
it should "add correctly" in {
val a = Nd4j.create(Array[Double](1, 2, 3, 4, 5, 6, 7, 8), Array(2, 2, 2))
val b = a + 100
a(0, 0, 0) should be(1)
b(0, 0, 0) should be(101)
a += 1
a(0, 0, 0) should be(2)
}
it should "subtract correctly" in {
val a = Nd4j.create(Array[Double](1, 2, 3, 4, 5, 6, 7, 8), Array(2, 2, 2))
val b = a - 100
a(0, 0, 0) should be(1)
b(0, 0, 0) should be(-99)
a -= 1
a(0, 0, 0) should be(0)
val c = Nd4j.create(Array[Double](1, 2))
val d = c - c
d(0) should be(0)
d(1) should be(0)
}
it should "divide correctly" in {
val a = Nd4j.create(Array[Double](1, 2, 3, 4, 5, 6, 7, 8), Array(2, 2, 2))
val b = a / a
a(1, 1, 1) should be(8)
b(1, 1, 1) should be(1)
a /= a
a(1, 1, 1) should be(1)
}
it should "element-by-element multiply correctly" in {
val a = Nd4j.create(Array[Double](1, 2, 3, 4), Array(4, 1))
val b = a * a
a(3) should be(4) // [1.0, 2.0, 3.0, 4.0
b(3) should be(16) // [1.0 ,4.0 ,9.0 ,16.0]
a *= 5 // [5.0 ,10.0 ,15.0 ,20.0]
a(0) should be(5)
}
it should "use the update method to mutate values" in {
val nd3 = Nd4j.create(Array[Double](1, 2, 3, 4, 5, 6, 7, 8), Array(2, 2, 2))
nd3(0) = 11
nd3(0) should be(11)
val idx = Array(1, 1, 1)
nd3(idx) = 100
nd3(idx) should be(100)
}
it should "use === for equality comparisons" in {
val a = Nd4j.create(Array[Double](1, 2))
val b = Nd4j.create(Array[Double](1, 2))
val c = a === b
c(0) should be(1)
c(1) should be(1)
val d = Nd4j.create(Array[Double](10, 20))
val e = a === d
e(0) should be(0)
e(1) should be(0)
val f = a === 1 // === from our DSL
f(0) should be(1)
f(1) should be(0)
}
it should "use > for greater than comparisons" in {
val a = Nd4j.create(Array[Double](1, 3))
val b = a > 1
b(0) should be(0)
b(1) should be(1)
val c = Nd4j.create(Array[Double](2, 2))
val d = c > a
d(0) should be(1)
d(1) should be(0)
}
it should "use < for less than comparisons" in {
val a = Nd4j.create(Array[Double](1, 3))
val b = a < 2
b(0) should be(1)
b(1) should be(0)
val c = Nd4j.create(Array[Double](2, 2))
val d = c < a
d(0) should be(0)
d(1) should be(1)
}
it should "use - prefix for negation" in {
val a = Nd4j.create(Array[Double](1, 3))
val b = -a
b(0) should be(-1)
b(1) should be(-3)
}
}
|
phvu/nd4j
|
nd4j-scala-api/src/test/scala/org/nd4j/api/linalg/RichNDArraySpec.scala
|
Scala
|
apache-2.0
| 4,047
|
/*
Copyright 2013 Tomas Tauber
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.mathematics
import cascading.pipe.Pipe
import cascading.tuple.Fields
import com.twitter.scalding.TDsl._
import com.twitter.scalding._
import com.twitter.scalding.typed.{ValuePipe, EmptyValue, LiteralValue, ComputedValue}
import com.twitter.algebird.{ Semigroup, Monoid, Ring, Group, Field }
import scala.collection.mutable.Map
import scala.collection.mutable.HashMap
import cascading.flow.FlowDef
import java.io.Serializable
/**
* This is the future Matrix API. The old one will be removed in scalding 0.10.0 (or 1.0.0).
*
* Create Matrix2 instances with methods in the Matrix2 object.
* Note that this code optimizes the order in which it evaluates matrices, and replaces equivalent
* terms to avoid recomputation. Also, this code puts the parenthesis in the optimal place in
* terms of size according to the sizeHints. For instance:
* (A*B)*C == A*(B*C) but if B is a 10 x 10^6 matrix, and C is 10^6 x 100,
* it is better to do the B*C product first in order to avoid storing as much intermediate output.
*
* NOTE THIS REQUIREMENT: for each formula, you can only have one Ring[V] in scope. If you
* evaluate part of the formula with one Ring, and another part with another, you must go through
* a TypedPipe (call toTypedPipe) or the result may not be correct.
*/
sealed trait Matrix2[R, C, V] extends Serializable {
implicit def rowOrd: Ordering[R]
implicit def colOrd: Ordering[C]
val sizeHint: SizeHint = NoClue
def +(that: Matrix2[R, C, V])(implicit mon: Monoid[V]): Matrix2[R, C, V] = Sum(this, that, mon)
def -(that: Matrix2[R, C, V])(implicit g: Group[V]): Matrix2[R, C, V] = Sum(this, that.negate, g)
def unary_-(implicit g: Group[V]): Matrix2[R, C, V] = negate
def negate(implicit g: Group[V]): Matrix2[R, C, V]
/** Represents the pointwise, or Hadamard, product of two matrices.
*/
def #*#(that: Matrix2[R, C, V])(implicit ring: Ring[V]): Matrix2[R, C, V] = HadamardProduct(this, that, ring)
// Matrix product
def *[C2](that: Matrix2[C, C2, V])(implicit ring: Ring[V], mj: MatrixJoiner2): Matrix2[R, C2, V] =
Product(this, that, ring)
def *(that: Scalar2[V])(implicit ring: Ring[V], mode: Mode, flowDef: FlowDef, mj: MatrixJoiner2): Matrix2[R, C, V] = that * this
def /(that: Scalar2[V])(implicit field: Field[V], mode: Mode, flowDef: FlowDef): Matrix2[R, C, V] =
that divMatrix this
/** Convert the current Matrix to a TypedPipe
*/
def toTypedPipe: TypedPipe[(R, C, V)]
def transpose: Matrix2[C, R, V]
/**
* Users should never need this. This is the current Matrix2, but in most optimized
* form. Usually, you will just do matrix operations until you eventually call write
* or toTypedPipe
*/
def optimizedSelf: Matrix2[R, C, V] =
Matrix2.optimize(this.asInstanceOf[Matrix2[Any, Any, V]])._2.asInstanceOf[Matrix2[R, C, V]]
/** equivalent to multiplying this matrix by itself, power times */
def ^(power: Int)(implicit ev: =:=[R, C], ring: Ring[V], mj: MatrixJoiner2): Matrix2[R, R, V] = {
// it can possibly be pre-computed in an optimal way as g^k = ((g*g)*(g*g)...
// but it is handled in "optimize" in general, so that (g^k)*something works
assert(power > 0, "exponent must be >= 1")
val literal = this.asInstanceOf[Matrix2[R, R, V]]
if (power == 1) {
literal
} else {
literal * (literal ^ (power - 1))
}
}
// TODO: complete the rest of the API to match the old Matrix API (many methods are effectively on the TypedPipe)
def sumColVectors(implicit ring: Ring[V], mj: MatrixJoiner2): Matrix2[R, Unit, V] =
Product(this, OneC()(colOrd), ring)
/**
* the result is the same as considering everything on the this to be like a 1 value
* so we just sum, using only a monoid on VecV, where this Matrix has the value true.
* This is useful for graph propagation of monoids, such as sketchs like HyperLogLog,
* BloomFilters or CountMinSketch.
* TODO This is a special kind of product that could be optimized like Product is
*/
def propagate[C2, VecV](vec: Matrix2[C, C2, VecV])(implicit ev: =:=[V, Boolean],
mon: Monoid[VecV],
mj: MatrixJoiner2): Matrix2[R, C2, VecV] = {
//This cast will always succeed:
lazy val joinedBool = mj.join(this.asInstanceOf[Matrix2[R, C, Boolean]], vec)
implicit val ord2: Ordering[C2] = vec.colOrd
lazy val resultPipe = joinedBool.flatMap { case (key, ((row, bool), (col2, v))) =>
if (bool) Some((row, col2), v) else None // filter early
}
.group // TODO we could be lazy with this group and combine with a sum
.sum
.filter { kv => mon.isNonZero(kv._2) }
.map { case ((r, c2), v) => (r, c2, v) }
MatrixLiteral(resultPipe, this.sizeHint)
}
def propagateRow[C2](mat: Matrix2[C, C2, Boolean])(implicit ev: =:=[R, Unit], mon: Monoid[V], mj: MatrixJoiner2): Matrix2[Unit, C2, V] =
mat.transpose.propagate(this.transpose.asInstanceOf[Matrix2[C, Unit, V]]).transpose
// Binarize values, all x != 0 become 1
def binarizeAs[NewValT](implicit mon: Monoid[V], ring: Ring[NewValT]): Matrix2[R, C, NewValT] = {
lazy val newPipe = toTypedPipe.map { case (r, c, x) =>
(r, c, if (mon.isNonZero(x)) { ring.one } else { ring.zero })
}
.filter { kv => ring.isNonZero(kv._3) }
MatrixLiteral(newPipe, this.sizeHint)
}
/** Row L2 normalization (can only be called for Double)
* After this operation, the sum(|x|^2) along each row will be 1.
*/
def rowL2Normalize(implicit ev: =:=[V, Double], mj: MatrixJoiner2): Matrix2[R, C, Double] = {
val matD = this.asInstanceOf[Matrix2[R, C, Double]]
lazy val result = MatrixLiteral(matD.toTypedPipe.map { case (r, c, x) => (r, c, x * x) }, this.sizeHint)
.sumColVectors
.toTypedPipe
.map { case (r, c, x) => (r, r, 1 / scala.math.sqrt(x)) } // diagonal + inverse
MatrixLiteral(result, SizeHint.asDiagonal(this.sizeHint.setRowsToCols)) * matD
}
def getRow(index: R): Matrix2[Unit, C, V] =
MatrixLiteral(
toTypedPipe
.filter { case (r, c, v) => Ordering[R].equiv(r, index) }
.map { case (r, c, v) => ((), c, v) }, this.sizeHint.setRows(1L)
)
def getColumn(index: C): Matrix2[R, Unit, V] =
MatrixLiteral(
toTypedPipe
.filter { case (r, c, v) => Ordering[C].equiv(c, index) }
.map { case (r, c, v) => (r, (), v) }, this.sizeHint.setCols(1L)
)
/** Consider this Matrix as the r2 row of a matrix. The current matrix must be a row,
* which is to say, its row type must be Unit.
*/
def asRow[R2](r2: R2)(implicit ev: R =:= Unit, rowOrd: Ordering[R2]): Matrix2[R2, C, V] =
MatrixLiteral(toTypedPipe.map { case (r, c, v) => (r2, c, v) }, this.sizeHint)
def asCol[C2](c2: C2)(implicit ev: C =:= Unit, colOrd: Ordering[C2]): Matrix2[R, C2, V] =
MatrixLiteral(toTypedPipe.map { case (r, c, v) => (r, c2, v) }, this.sizeHint)
// Compute the sum of the main diagonal. Only makes sense cases where the row and col type are
// equal
def trace(implicit mon: Monoid[V], ev: =:=[R,C]): Scalar2[V] =
Scalar2(toTypedPipe.asInstanceOf[TypedPipe[(R, R, V)]]
.filter{case (r1, r2, _) => Ordering[R].equiv(r1, r2)}
.map{case (_,_,x) => x}
.sum(mon)
)
def write(sink: TypedSink[(R, C, V)])(implicit fd: FlowDef, m: Mode): Matrix2[R, C, V] =
MatrixLiteral(toTypedPipe.write(sink), sizeHint)
}
/** This trait allows users to plug in join algoritms
* where they are needed to improve products and propagations.
* The default works well in most cases, but highly skewed matrices may need some
* special handling
*/
trait MatrixJoiner2 extends java.io.Serializable {
def join[R, C, V, C2, V2](left: Matrix2[R, C, V], right: Matrix2[C, C2, V2]): TypedPipe[(C, ((R, V), (C2, V2)))]
}
object MatrixJoiner2 {
// The default if the user does not override,
// comment this out to verify we are not hiding the user's suppled values
implicit def default: MatrixJoiner2 = new DefaultMatrixJoiner(10000L)
def join[R, C, V, C2, V2](left: Matrix2[R, C, V],
right: Matrix2[C, C2, V2])(implicit mj: MatrixJoiner2): TypedPipe[(C, ((R, V), (C2, V2)))] =
mj.join(left, right)
}
/** This uses standard join if the matrices are comparable size and large,
* otherwise, if one is much smaller than the other, we use a hash join
*/
class DefaultMatrixJoiner(sizeRatioThreshold: Long) extends MatrixJoiner2 {
def join[R, C, V, C2, V2](left: Matrix2[R, C, V],
right: Matrix2[C, C2, V2]): TypedPipe[(C, ((R, V), (C2, V2)))] = {
implicit val cOrd: Ordering[C] = left.colOrd
val one = left.toTypedPipe.map { case (r, c, v) => (c, (r, v)) }.group
val two = right.toTypedPipe.map { case (c, c2, v2) => (c, (c2, v2)) }.group
val sizeOne = left.sizeHint.total.getOrElse(BigInt(1L))
val sizeTwo = right.sizeHint.total.getOrElse(BigInt(1L))
def swapInner[M,N](t: TypedPipe[(C, (M, N))]): TypedPipe[(C, (N, M))] = t.mapValues { t: (M,N) => t.swap }
// TODO:
// use block join on tall skinny times skinny tall (or skewed): the result really big,
// but the direct approach can't get much parallelism.
// https://github.com/twitter/scalding/issues/629
if (sizeOne / sizeTwo > sizeRatioThreshold) {
one.hashJoin(two)
} else if (sizeTwo / sizeOne > sizeRatioThreshold) {
swapInner(two.hashJoin(one))
} else if (sizeOne > sizeTwo) {
one.join(two).toTypedPipe
} else {
swapInner(two.join(one).toTypedPipe)
}
}
}
/**
* Infinite column vector - only for intermediate computations
*/
case class OneC[R, V](implicit override val rowOrd: Ordering[R]) extends Matrix2[R, Unit, V] {
override val sizeHint: SizeHint = FiniteHint(Long.MaxValue, 1)
override def colOrd = Ordering[Unit]
def transpose = OneR()
override def negate(implicit g: Group[V]) = sys.error("Only used in intermediate computations, try (-1 * OneC)")
def toTypedPipe = sys.error("Only used in intermediate computations")
}
/**
* Infinite row vector - only for intermediate computations
*/
case class OneR[C, V](implicit override val colOrd: Ordering[C]) extends Matrix2[Unit, C, V] {
override val sizeHint: SizeHint = FiniteHint(1, Long.MaxValue)
override def rowOrd = Ordering[Unit]
def transpose = OneC()
override def negate(implicit g: Group[V]) = sys.error("Only used in intermediate computations, try (-1 * OneR)")
def toTypedPipe = sys.error("Only used in intermediate computations")
}
/**
* Class representing a matrix product
*
* @param left multiplicand
* @param right multiplier
* @param ring
* @param expressions a HashMap of common subtrees; None if possibly not optimal (did not go through optimize), Some(...) with a HashMap that was created in optimize
*/
case class Product[R, C, C2, V](left: Matrix2[R, C, V],
right: Matrix2[C, C2, V],
ring: Ring[V],
expressions: Option[Map[Matrix2[R, C2, V], TypedPipe[(R, C2, V)]]] = None)(implicit val joiner: MatrixJoiner2) extends Matrix2[R, C2, V] {
/**
* Structural, NOT mathematical equality (e.g. (A*B) * C != A * (B*C))
* Used for the Matrix2OptimizationTest (so that it doesn't care about expressions)
*/
override def equals(obj: Any): Boolean = obj match {
case Product(tl, tr, _, _) => left.equals(tl) && right.equals(tr)
case _ => false
}
override def hashCode(): Int = left.hashCode ^ right.hashCode
private lazy val optimal: Boolean = expressions.isDefined
private lazy val isSpecialCase: Boolean = right.isInstanceOf[OneC[_, _]] || left.isInstanceOf[OneR[_, _]]
private lazy val specialCase: TypedPipe[(R, C2, V)] = {
val leftMatrix = right.isInstanceOf[OneC[_, _]]
val joined = (if (leftMatrix) {
val ord: Ordering[R] = left.rowOrd
left.toTypedPipe.groupBy(x => x._1)(ord)
} else {
val ord: Ordering[C] = right.rowOrd
right.toTypedPipe.groupBy(x => x._1)(ord)
}).mapValues { _._3 }
.sum(ring)
.filter { kv => ring.isNonZero(kv._2) }
if (leftMatrix) {
joined.map { case (r, v) => (r, (), v) }.asInstanceOf[TypedPipe[(R, C2, V)]] // we know C2 is Unit
} else {
joined.map { case (c, v) => ((), c, v) }.asInstanceOf[TypedPipe[(R, C2, V)]] // we know R is Unit
}
}
// represents `\\sum_{i j} M_{i j}` where `M_{i j}` is the Matrix with exactly one element at `row=i, col = j`.
lazy val toOuterSum: TypedPipe[(R, C2, V)] = {
if (optimal) {
if (isSpecialCase) {
specialCase
} else {
implicit val ord: Ordering[C] = right.rowOrd
joiner.join(left, right)
.map { case (key, ((l1, lv), (r2, rv))) => (l1, r2, ring.times(lv, rv)) }
}
} else {
// this branch might be tricky, since not clear to me that optimizedSelf will be a Product with a known C type
// Maybe it is Product[R, _, C2, V]
optimizedSelf.asInstanceOf[Product[R, _, C2, V]].toOuterSum
}
}
private def computePipe(joined: TypedPipe[(R, C2, V)] = toOuterSum): TypedPipe[(R, C2, V)] = {
if (isSpecialCase) {
joined
} else {
val ord2: Ordering[(R, C2)] = Ordering.Tuple2(rowOrd, colOrd)
joined.groupBy(w => (w._1, w._2))(ord2).mapValues { _._3 }
.sum(ring)
.filter { kv => ring.isNonZero(kv._2) }
.map { case ((r, c), v) => (r, c, v) }
}
}
override lazy val toTypedPipe: TypedPipe[(R, C2, V)] = {
expressions match {
case Some(m) => m.get(this) match {
case Some(pipe) => pipe
case None => {
val result = computePipe()
m.put(this, result)
result
}
}
case None => optimizedSelf.toTypedPipe
}
}
override val sizeHint = left.sizeHint * right.sizeHint
implicit override val rowOrd: Ordering[R] = left.rowOrd
implicit override val colOrd: Ordering[C2] = right.colOrd
override lazy val transpose: Product[C2, C, R, V] = Product(right.transpose, left.transpose, ring)
override def negate(implicit g: Group[V]): Product[R, C, C2, V] = {
if (left.sizeHint.total.getOrElse(BigInt(0L)) > right.sizeHint.total.getOrElse(BigInt(0L))) {
Product(left, right.negate, ring, expressions)
} else {
Product(left.negate, right, ring, expressions)
}
}
/** Trace(A B) = Trace(B A) so we optimize to choose the lowest cost item
*/
override def trace(implicit mon: Monoid[V], ev1: =:=[R,C2]): Scalar2[V] = {
val (cost1, plan1) = Matrix2.optimize(this.asInstanceOf[Matrix2[Any, Any, V]])
val (cost2, plan2) = Matrix2.optimize(
Product(right.asInstanceOf[Matrix2[C,R,V]], left.asInstanceOf[Matrix2[R,C,V]], ring, None)
.asInstanceOf[Matrix2[Any, Any, V]]
)
if (cost1 > cost2) {
val product2 = plan2.asInstanceOf[Product[C, R, C, V]]
val ord = left.colOrd
val filtered = product2.toOuterSum.filter{case (c1, c2, _) => ord.equiv(c1, c2)}
Scalar2(product2.computePipe(filtered).map{case (_, _, x) => x}.sum(mon))
} else {
val product1 = plan1.asInstanceOf[Product[R, C, R, V]]
val ord = left.rowOrd
val filtered = product1.toOuterSum.filter{case (r1, r2, _) => ord.equiv(r1, r2)}
Scalar2(product1.computePipe(filtered).map{case (_, _, x) => x}.sum(mon))
}
}
}
case class Sum[R, C, V](left: Matrix2[R, C, V], right: Matrix2[R, C, V], mon: Monoid[V]) extends Matrix2[R, C, V] {
def collectAddends(sum: Sum[R, C, V]): List[TypedPipe[(R, C, V)]] = {
def getLiteral(mat: Matrix2[R, C, V]): TypedPipe[(R, C, V)] = {
mat match {
case x @ Product(_, _, _, _) => x.toOuterSum
case x @ MatrixLiteral(_, _) => x.toTypedPipe
case x @ HadamardProduct(_, _, _) => x.optimizedSelf.toTypedPipe
case _ => sys.error("Invalid addend")
}
}
sum match {
case Sum(l @ Sum(_, _, _), r @ Sum(_, _, _), _) => {
collectAddends(l) ++ collectAddends(r)
}
case Sum(l @ Sum(_, _, _), r, _) => {
collectAddends(l) ++ List(getLiteral(r))
}
case Sum(l, r @ Sum(_, _, _), _) => {
getLiteral(l) :: collectAddends(r)
}
case Sum(l, r, _) => {
List(getLiteral(l), getLiteral(r))
}
}
}
override lazy val toTypedPipe: TypedPipe[(R, C, V)] = {
if (left.equals(right)) {
left.optimizedSelf.toTypedPipe.map(v => (v._1, v._2, mon.plus(v._3, v._3)))
} else {
val ord: Ordering[(R, C)] = Ordering.Tuple2(left.rowOrd, left.colOrd)
collectAddends(this)
.reduce((x, y) => x ++ y)
.groupBy(x => (x._1, x._2))(ord).mapValues { _._3 }
.sum(mon)
.filter { kv => mon.isNonZero(kv._2) }
.map { case ((r, c), v) => (r, c, v) }
}
}
override val sizeHint = left.sizeHint + right.sizeHint
implicit override val rowOrd: Ordering[R] = left.rowOrd
implicit override val colOrd: Ordering[C] = left.colOrd
override lazy val transpose: Sum[C, R, V] = Sum(left.transpose, right.transpose, mon)
override def negate(implicit g: Group[V]): Sum[R, C, V] = Sum(left.negate, right.negate, mon)
override def sumColVectors(implicit ring: Ring[V], mj: MatrixJoiner2): Matrix2[R, Unit, V] =
Sum(left.sumColVectors, right.sumColVectors, mon)
override def trace(implicit mon: Monoid[V], ev: =:=[R,C]): Scalar2[V] =
Scalar2(collectAddends(this).map { pipe =>
pipe.asInstanceOf[TypedPipe[(R, R, V)]]
.filter { case (r, c, v) => Ordering[R].equiv(r, c) }
.map { _._3 }
}.reduce(_ ++ _).sum)
}
case class HadamardProduct[R, C, V](left: Matrix2[R, C, V],
right: Matrix2[R, C, V],
ring: Ring[V]) extends Matrix2[R, C, V] {
// TODO: optimize / combine with Sums: https://github.com/tomtau/scalding/issues/14#issuecomment-22971582
override lazy val toTypedPipe: TypedPipe[(R, C, V)] = {
if (left.equals(right)) {
left.optimizedSelf.toTypedPipe.map(v => (v._1, v._2, ring.times(v._3, v._3)))
} else {
val ord: Ordering[(R, C)] = Ordering.Tuple2(left.rowOrd, left.colOrd)
// tracking values which were reduced (multiplied by non-zero) or non-reduced (multiplied by zero) with a boolean
(left.optimizedSelf.toTypedPipe.map { case (r, c, v) => (r, c, (v, false)) } ++
right.optimizedSelf.toTypedPipe.map { case (r, c, v) => (r, c, (v, false)) })
.groupBy(x => (x._1, x._2))(ord)
.mapValues { _._3 }
.reduce((x, y) => (ring.times(x._1, y._1), true))
.filter { kv => kv._2._2 && ring.isNonZero(kv._2._1) }
.map { case ((r, c), v) => (r, c, v._1) }
}
}
override lazy val transpose: MatrixLiteral[C, R, V] = MatrixLiteral(toTypedPipe.map(x => (x._2, x._1, x._3)), sizeHint.transpose)(colOrd, rowOrd)
override val sizeHint = left.sizeHint #*# right.sizeHint
override def negate(implicit g: Group[V]): HadamardProduct[R, C, V] =
if (left.sizeHint.total.getOrElse(BigInt(0L)) > right.sizeHint.total.getOrElse(BigInt(0L)))
HadamardProduct(left, right.negate, ring)
else
HadamardProduct(left.negate, right, ring)
implicit override val rowOrd: Ordering[R] = left.rowOrd
implicit override val colOrd: Ordering[C] = left.colOrd
}
case class MatrixLiteral[R, C, V](override val toTypedPipe: TypedPipe[(R, C, V)],
override val sizeHint: SizeHint)(implicit override val rowOrd: Ordering[R], override val colOrd: Ordering[C])
extends Matrix2[R, C, V] {
override lazy val transpose: MatrixLiteral[C, R, V] =
MatrixLiteral(toTypedPipe.map(x => (x._2, x._1, x._3)), sizeHint.transpose)(colOrd, rowOrd)
override def negate(implicit g: Group[V]): MatrixLiteral[R, C, V] =
MatrixLiteral(toTypedPipe.map(x => (x._1, x._2, g.negate(x._3))), sizeHint)
}
/** A representation of a scalar value that can be used with Matrices
*/
trait Scalar2[V] extends Serializable {
def value: ValuePipe[V]
def +(that: Scalar2[V])(implicit sg: Semigroup[V]): Scalar2[V] = {
(value, that.value) match {
case (EmptyValue(), _) => that
case (LiteralValue(v1), _) => that.map(sg.plus(v1, _))
case (_, EmptyValue()) => this
case (_, LiteralValue(v2)) => map(sg.plus(_, v2))
// TODO: optimize sums of scalars like sums of matrices:
// only one M/R pass for the whole Sum.
case (_, ComputedValue(v2)) => Scalar2((value ++ v2).sum(sg))
}
}
def -(that: Scalar2[V])(implicit g: Group[V]): Scalar2[V] = this + that.map(x => g.negate(x))
def *(that: Scalar2[V])(implicit ring: Ring[V]): Scalar2[V] =
Scalar2(ValuePipe.fold(value, that.value)(ring.times _))
def /(that: Scalar2[V])(implicit f: Field[V]): Scalar2[V] =
Scalar2(ValuePipe.fold(value, that.value)(f.div _))
def unary_-(implicit g: Group[V]): Scalar2[V] = map(x => g.negate(x))
def *[R, C](that: Matrix2[R, C, V])(implicit ring: Ring[V], mj: MatrixJoiner2): Matrix2[R, C, V] =
that match {
case p@Product(left, right, _, expressions) =>
if (left.sizeHint.total.getOrElse(BigInt(0L)) > right.sizeHint.total.getOrElse(BigInt(0L)))
Product(left, (this * right), ring, expressions)(p.joiner)
else
Product(this * left, right, ring, expressions)(p.joiner)
case HadamardProduct(left, right, _) =>
if (left.sizeHint.total.getOrElse(BigInt(0L)) > right.sizeHint.total.getOrElse(BigInt(0L)))
HadamardProduct(left, (this * right), ring)
else
HadamardProduct(this * left, right, ring)
case s @ Sum(left, right, mon) => Sum(this * left, this * right, mon)
case m @ MatrixLiteral(_, _) => timesLiteral(m) // handle literals here
case x @ OneC() =>
Product(OneC[Unit, V](), toMatrix, ring)
.asInstanceOf[Matrix2[R, C, V]]
case x @ OneR() =>
Product(toMatrix, OneR[Unit, V](), ring)
.asInstanceOf[Matrix2[R, C, V]]
}
def divMatrix[R, C](that: Matrix2[R, C, V])(implicit f: Field[V]): MatrixLiteral[R, C, V] =
MatrixLiteral(
that.toTypedPipe
.mapWithValue(value) { case ((r, c, v), optV) =>
(r, c, f.div(v, optV.getOrElse(f.zero)))
},
that.sizeHint
)(that.rowOrd, that.colOrd)
def timesLiteral[R, C](that: Matrix2[R, C, V])(implicit ring: Ring[V]): MatrixLiteral[R, C, V] =
MatrixLiteral(
that.toTypedPipe
.mapWithValue(value) { case ((r, c, v), optV) =>
(r, c, ring.times(optV.getOrElse(ring.zero), v))
},
that.sizeHint
)(that.rowOrd, that.colOrd)
def map[U](fn: V => U): Scalar2[U] = Scalar2(value.map(fn))
def toMatrix: Matrix2[Unit, Unit, V] =
MatrixLiteral(value.toTypedPipe.map(v => ((), (), v)), FiniteHint(1, 1))
// TODO: FunctionMatrix[R,C,V](fn: (R,C) => V) and a Literal scalar is just: FuctionMatrix[Unit, Unit, V]({ (_, _) => v })
}
case class ValuePipeScalar[V](override val value: ValuePipe[V]) extends Scalar2[V]
object Scalar2 {
// implicits cannot share names
implicit def from[V](v: ValuePipe[V]): Scalar2[V] = ValuePipeScalar(v)
def apply[V](v: ValuePipe[V]): Scalar2[V] = ValuePipeScalar(v)
// implicits can't share names, but we want the implicit
implicit def const[V](v: V)(implicit fd: FlowDef, m: Mode): Scalar2[V] =
from(LiteralValue(v))
def apply[V](v: V)(implicit fd: FlowDef, m: Mode): Scalar2[V] =
from(LiteralValue(v))
}
object Matrix2 {
def apply[R:Ordering, C: Ordering, V](t: TypedPipe[(R, C, V)], hint: SizeHint): Matrix2[R, C, V] =
MatrixLiteral(t, hint)
def read[R, C, V](t: TypedSource[(R, C, V)],
hint: SizeHint)(implicit ordr: Ordering[R],
ordc: Ordering[C], fd: FlowDef, m: Mode): Matrix2[R, C, V] =
MatrixLiteral(TypedPipe.from(t), hint)
def J[R, C, V](implicit ordR: Ordering[R], ordC: Ordering[C], ring: Ring[V], mj: MatrixJoiner2) =
Product(OneC[R, V]()(ordR), OneR[C, V]()(ordC), ring)
/**
* The original prototype that employs the standard O(n^3) dynamic programming
* procedure to optimize a matrix chain factorization.
*
* Now, it also "prefers" more spread out / bushy / less deep factorization
* which reflects more the Map/Reduce nature.
*/
def optimizeProductChain[V](p: IndexedSeq[Matrix2[Any, Any, V]], product: Option[(Ring[V], MatrixJoiner2)]): (BigInt, Matrix2[Any, Any, V]) = {
val subchainCosts = HashMap.empty[(Int, Int), BigInt]
val splitMarkers = HashMap.empty[(Int, Int), Int]
def computeCosts(p: IndexedSeq[Matrix2[Any, Any, V]], i: Int, j: Int): BigInt = {
if (subchainCosts.contains((i, j))) subchainCosts((i, j))
if (i == j) subchainCosts.put((i, j), 0)
else {
subchainCosts.put((i, j), Long.MaxValue)
for (k <- i to (j - 1)) {
// the original did not multiply by (k - i) and (j - k - 1) respectively (this achieves spread out trees)
val cost = (k - i) * computeCosts(p, i, k) + (j - k - 1) * computeCosts(p, k + 1, j) +
(p(i).sizeHint * (p(k).sizeHint * p(j).sizeHint)).total.getOrElse(BigInt(0L))
if (cost < subchainCosts((i, j))) {
subchainCosts.put((i, j), cost)
splitMarkers.put((i, j), k)
}
}
}
subchainCosts((i, j))
}
val sharedMap = HashMap.empty[Matrix2[Any, Any, V], TypedPipe[(Any, Any, V)]]
def generatePlan(i: Int, j: Int): Matrix2[Any, Any, V] = {
if (i == j) p(i)
else {
val k = splitMarkers((i, j))
val left = generatePlan(i, k)
val right = generatePlan(k + 1, j)
val (ring, joiner) = product.get
Product(left, right, ring, Some(sharedMap))(joiner)
}
}
val best = computeCosts(p, 0, p.length - 1)
(best, generatePlan(0, p.length - 1))
}
/**
* This function walks the input tree, finds basic blocks to optimize,
* i.e. matrix product chains that are not interrupted by summations.
* One example:
* A*B*C*(D+E)*(F*G) => "basic blocks" are ABC, D, E, and FG
*
* + it now does "global" optimization - i.e. over optimize over basic blocks.
* In the above example, we'd treat (D+E) as a temporary matrix T and optimize the whole chain ABCTFG
*
* Not sure if making use of distributivity to generate more variants would be good.
* In the above example, we could also generate ABCDFG + ABCEFG and have basic blocks: ABCDFG, and ABCEFG.
* But this would be almost twice as much work with the current cost estimation.
*/
def optimize[V](mf: Matrix2[Any, Any, V]): (BigInt, Matrix2[Any, Any, V]) = {
def pair[X,Y](x: Option[X], y: Option[Y]): Option[(X,Y)] =
for { xi <- x; yi <- y } yield (xi, yi)
/**
* Recursive function - returns a flatten product chain and optimizes product chains under sums
*/
def optimizeBasicBlocks(mf: Matrix2[Any, Any, V]): (List[Matrix2[Any, Any, V]], BigInt, Option[Ring[V]], Option[MatrixJoiner2]) = {
mf match {
// basic block of one matrix
case element @ MatrixLiteral(_, _) => (List(element), 0, None, None)
// two potential basic blocks connected by a sum
case Sum(left, right, mon) => {
val (lastLChain, lastCost1, ringL, joinerL) = optimizeBasicBlocks(left)
val (lastRChain, lastCost2, ringR, joinerR) = optimizeBasicBlocks(right)
val (cost1, newLeft) = optimizeProductChain(lastLChain.toIndexedSeq, pair(ringL, joinerL))
val (cost2, newRight) = optimizeProductChain(lastRChain.toIndexedSeq, pair(ringR, joinerR))
(List(Sum(newLeft, newRight, mon)),
lastCost1 + lastCost2 + cost1 + cost2,
ringL.orElse(ringR),
joinerL.orElse(joinerR))
}
case HadamardProduct(left, right, ring) => {
val (lastLChain, lastCost1, ringL, joinerL) = optimizeBasicBlocks(left)
val (lastRChain, lastCost2, ringR, joinerR) = optimizeBasicBlocks(right)
val (cost1, newLeft) = optimizeProductChain(lastLChain.toIndexedSeq, pair(ringL, joinerL))
val (cost2, newRight) = optimizeProductChain(lastRChain.toIndexedSeq, pair(ringR, joinerR))
(List(HadamardProduct(newLeft, newRight, ring)),
lastCost1 + lastCost2 + cost1 + cost2,
ringL.orElse(ringR),
joinerL.orElse(joinerR))
}
// chain (...something...)*(...something...)
case p@Product(left, right, ring, _) => {
val (lastLChain, lastCost1, ringL, joinerL) = optimizeBasicBlocks(left)
val (lastRChain, lastCost2, ringR, joinerR) = optimizeBasicBlocks(right)
(lastLChain ++ lastRChain, lastCost1 + lastCost2, Some(ring), Some(p.joiner))
}
// OneC, OneR and potentially other intermediate matrices
case el => (List(el), 0, None, None)
}
}
val (lastChain, lastCost, ring, joiner) = optimizeBasicBlocks(mf)
val (potentialCost, finalResult) = optimizeProductChain(lastChain.toIndexedSeq, pair(ring, joiner))
(lastCost + potentialCost, finalResult)
}
}
|
vidyar/twitterscalding
|
scalding-core/src/main/scala/com/twitter/scalding/mathematics/Matrix2.scala
|
Scala
|
apache-2.0
| 29,370
|
package org.apache.spark.streaming.flumedemo
import com.wallace.common.LogSupport
import org.apache.spark.SparkContext
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.flume.{FlumeUtils, SparkFlumeEvent}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import scala.util.control.NonFatal
/**
* Created by Wallace on 2017/3/30.
*/
object SparkStreamingFlumeDemo extends LogSupport {
private val DEFAULT_BATCH_DURATION: Int = 20
private val DEFAULT_PART_NUM: Int = 30
def main(args: Array[String]): Unit = {
if (args.length < 2) {
log.warn("please enter host and port")
System.exit(1)
}
//val sc = new SparkContext("spark://centos.host1:7077", "Spark Streaming Flume Integration")
val sc = new SparkContext("local[*]", "Spark Streaming Flume Integration")
//创建StreamingContext,20秒一个批次
val ssc = new StreamingContext(sc, Seconds(DEFAULT_BATCH_DURATION))
val hostname = args(0)
val port = args(1).toInt
val storageLevel = StorageLevel.MEMORY_ONLY
val flumeStream: ReceiverInputDStream[SparkFlumeEvent] = FlumeUtils.createStream(ssc, hostname, port, storageLevel)
val flumePollingStream: ReceiverInputDStream[SparkFlumeEvent] = FlumeUtils.createPollingStream(ssc, hostname, port, storageLevel)
flumeStream.count().map(cnt => "Received " + cnt + " flume events.").print()
flumePollingStream.count().map(cnt => "Received " + cnt + " flume events.").print()
flumeStream.foreachRDD {
rdd =>
rdd.coalesce(DEFAULT_PART_NUM).saveAsTextFile("/")
}
//开始运行
ssc.start()
//计算完毕退出
try {
ssc.awaitTermination()
} catch {
case NonFatal(e) =>
log.error(s"[SparkStreamingFlume] Catch NonFatal Exception: ${e.getMessage}.")
ssc.stop(stopSparkContext = true, stopGracefully = true)
} finally {
sc.stop()
}
}
}
|
BiyuHuang/CodePrototypesDemo
|
demo/SparkDemo/src/main/scala/org/apache/spark/streaming/flumedemo/SparkStreamingFlumeDemo.scala
|
Scala
|
apache-2.0
| 2,056
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.table
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.runtime.utils.{TableProgramsCollectionTestBase, TableProgramsTestBase}
import org.apache.flink.table.utils.MemoryTableSourceSinkUtil
import org.apache.flink.test.util.TestBaseUtils
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import java.util
import scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class TableEnvironmentITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testSimpleRegister(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.createTemporaryView(tableName, ds)
val t = tEnv.scan(tableName).select('_1, '_2, '_3)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testRegisterWithFieldsByPosition(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.createTemporaryView(tableName, ds, 'a, 'b, 'c) // new alias
val t = tEnv.scan(tableName).select('a, 'b)
val expected = "1,1\\n" + "2,2\\n" + "3,2\\n" + "4,3\\n" + "5,3\\n" + "6,3\\n" +
"7,4\\n" + "8,4\\n" + "9,4\\n" + "10,4\\n" + "11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" +
"15,5\\n" + "16,6\\n" + "17,6\\n" + "18,6\\n" + "19,6\\n" + "20,6\\n" + "21,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testRegisterWithFieldsByName(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.createTemporaryView(tableName, ds, '_3, '_1, '_2) // new order
val t = tEnv.scan(tableName).select('_1, '_2)
val expected = "1,1\\n" + "2,2\\n" + "3,2\\n" + "4,3\\n" + "5,3\\n" + "6,3\\n" +
"7,4\\n" + "8,4\\n" + "9,4\\n" + "10,4\\n" + "11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" +
"15,5\\n" + "16,6\\n" + "17,6\\n" + "18,6\\n" + "19,6\\n" + "20,6\\n" + "21,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testTableRegister(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable(tableName, t)
val regT = tEnv.scan(tableName).select('a, 'b).filter('a > 8)
val expected = "9,4\\n" + "10,4\\n" +
"11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" +
"15,5\\n" + "16,6\\n" + "17,6\\n" + "18,6\\n" +
"19,6\\n" + "20,6\\n" + "21,6\\n"
val results = regT.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testToTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val t = CollectionDataSets.get3TupleDataSet(env)
.toTable(tEnv, 'a, 'b, 'c)
.select('a, 'b, 'c)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testToTableFromCaseClass(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val data = List(
SomeCaseClass("Peter", 28, 4000.00, "Sales"),
SomeCaseClass("Anna", 56, 10000.00, "Engineering"),
SomeCaseClass("Lucy", 42, 6000.00, "HR"))
val t = env.fromCollection(data)
.toTable(tEnv, 'a, 'b, 'c, 'd)
.select('a, 'b, 'c, 'd)
val expected: String =
"Peter,28,4000.0,Sales\\n" +
"Anna,56,10000.0,Engineering\\n" +
"Lucy,42,6000.0,HR\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testToTableFromAndToCaseClass(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
val data = List(
SomeCaseClass("Peter", 28, 4000.00, "Sales"),
SomeCaseClass("Anna", 56, 10000.00, "Engineering"),
SomeCaseClass("Lucy", 42, 6000.00, "HR"))
val t = env.fromCollection(data)
.toTable(tEnv, 'a, 'b, 'c, 'd)
.select('a, 'b, 'c, 'd)
val expected: String =
"SomeCaseClass(Peter,28,4000.0,Sales)\\n" +
"SomeCaseClass(Anna,56,10000.0,Engineering)\\n" +
"SomeCaseClass(Lucy,42,6000.0,HR)\\n"
val results = t.toDataSet[SomeCaseClass].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testInsertIntoMemoryTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env)
MemoryTableSourceSinkUtil.clear()
val t = CollectionDataSets.getSmall3TupleDataSet(env).toTable(tEnv).as("a", "b", "c")
tEnv.registerTable("sourceTable", t)
val fieldNames = Array("d", "e", "f")
val fieldTypes = tEnv.scan("sourceTable").getSchema.getFieldTypes
val sink = new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"targetTable", sink.configure(fieldNames, fieldTypes))
tEnv.scan("sourceTable")
.select('a, 'b, 'c)
.insertInto("targetTable")
tEnv.execute("job name")
val expected = List("1,1,Hi", "2,2,Hello", "3,2,Hello world")
assertEquals(expected.sorted, MemoryTableSourceSinkUtil.tableDataStrings.sorted)
}
}
object TableEnvironmentITCase {
@Parameterized.Parameters(name = "Table config = {0}")
def parameters(): util.Collection[Array[java.lang.Object]] = {
Seq[Array[AnyRef]](
Array(TableProgramsTestBase.DEFAULT)
).asJava
}
}
case class SomeCaseClass(name: String, age: Int, salary: Double, department: String) {
def this() { this("", 0, 0.0, "") }
}
|
tzulitai/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/batch/table/TableEnvironmentITCase.scala
|
Scala
|
apache-2.0
| 8,526
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources.v2
import java.io.File
import java.util
import java.util.OptionalLong
import scala.collection.JavaConverters._
import test.org.apache.spark.sql.sources.v2._
import org.apache.spark.SparkException
import org.apache.spark.sql.{DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2Relation}
import org.apache.spark.sql.execution.exchange.{Exchange, ShuffleExchangeExec}
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.functions._
import org.apache.spark.sql.sources.{Filter, GreaterThan}
import org.apache.spark.sql.sources.v2.TableCapability._
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.partitioning.{ClusteredDistribution, Distribution, Partitioning}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.vectorized.ColumnarBatch
class DataSourceV2Suite extends QueryTest with SharedSQLContext {
import testImplicits._
private def getBatch(query: DataFrame): AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[AdvancedBatch]
}.head
}
private def getJavaBatch(query: DataFrame): JavaAdvancedDataSourceV2.AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[JavaAdvancedDataSourceV2.AdvancedBatch]
}.head
}
test("simplest implementation") {
Seq(classOf[SimpleDataSourceV2], classOf[JavaSimpleDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 10).map(i => Row(-i)))
checkAnswer(df.filter('i > 5), (6 until 10).map(i => Row(i, -i)))
}
}
}
test("advanced implementation") {
Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
val q1 = df.select('j)
checkAnswer(q1, (0 until 10).map(i => Row(-i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
val q2 = df.filter('i > 3)
checkAnswer(q2, (4 until 10).map(i => Row(i, -i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
} else {
val batch = getJavaBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
}
val q3 = df.select('i).filter('i > 6)
checkAnswer(q3, (7 until 10).map(i => Row(i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
} else {
val batch = getJavaBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
}
val q4 = df.select('j).filter('j < -10)
checkAnswer(q4, Nil)
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
}
}
}
test("columnar batch scan implementation") {
Seq(classOf[ColumnarDataSourceV2], classOf[JavaColumnarDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 90).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 90).map(i => Row(-i)))
checkAnswer(df.filter('i > 50), (51 until 90).map(i => Row(i, -i)))
}
}
}
test("schema required data source") {
Seq(classOf[SchemaRequiredDataSource], classOf[JavaSchemaRequiredDataSource]).foreach { cls =>
withClue(cls.getName) {
val e = intercept[IllegalArgumentException](spark.read.format(cls.getName).load())
assert(e.getMessage.contains("requires a user-supplied schema"))
val schema = new StructType().add("i", "int").add("s", "string")
val df = spark.read.format(cls.getName).schema(schema).load()
assert(df.schema == schema)
assert(df.collect().isEmpty)
}
}
}
test("partitioning reporting") {
import org.apache.spark.sql.functions.{count, sum}
Seq(classOf[PartitionAwareDataSource], classOf[JavaPartitionAwareDataSource]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, Seq(Row(1, 4), Row(1, 4), Row(3, 6), Row(2, 6), Row(4, 2), Row(4, 2)))
val groupByColA = df.groupBy('i).agg(sum('j))
checkAnswer(groupByColA, Seq(Row(1, 8), Row(2, 6), Row(3, 6), Row(4, 4)))
assert(groupByColA.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColAB = df.groupBy('i, 'j).agg(count("*"))
checkAnswer(groupByColAB, Seq(Row(1, 4, 2), Row(2, 6, 1), Row(3, 6, 1), Row(4, 2, 2)))
assert(groupByColAB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColB = df.groupBy('j).agg(sum('i))
checkAnswer(groupByColB, Seq(Row(2, 8), Row(4, 2), Row(6, 5)))
assert(groupByColB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isDefined)
val groupByAPlusB = df.groupBy('i + 'j).agg(count("*"))
checkAnswer(groupByAPlusB, Seq(Row(5, 2), Row(6, 2), Row(8, 1), Row(9, 1)))
assert(groupByAPlusB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isDefined)
}
}
}
test ("statistics report data source") {
Seq(classOf[ReportStatisticsDataSource], classOf[JavaReportStatisticsDataSource]).foreach {
cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
val logical = df.queryExecution.optimizedPlan.collect {
case d: DataSourceV2Relation => d
}.head
val statics = logical.computeStats()
assert(statics.rowCount.isDefined && statics.rowCount.get === 10,
"Row count statics should be reported by data source")
assert(statics.sizeInBytes === 80,
"Size in bytes statics should be reported by data source")
}
}
}
test("SPARK-23574: no shuffle exchange with single partition") {
val df = spark.read.format(classOf[SimpleSinglePartitionSource].getName).load().agg(count("*"))
assert(df.queryExecution.executedPlan.collect { case e: Exchange => e }.isEmpty)
}
test("simple writable data source") {
// TODO: java implementation.
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
// test with different save modes
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("append").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).union(spark.range(10)).select('id, -'id))
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("overwrite").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(5).select('id, -'id))
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("ignore").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(5).select('id, -'id))
val e = intercept[Exception] {
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("error").save()
}
assert(e.getMessage.contains("data already exists"))
// test transaction
val failingUdf = org.apache.spark.sql.functions.udf {
var count = 0
(id: Long) => {
if (count > 5) {
throw new RuntimeException("testing error")
}
count += 1
id
}
}
// this input data will fail to read middle way.
val input = spark.range(10).select(failingUdf('id).as('i)).select('i, -'i as 'j)
val e2 = intercept[SparkException] {
input.write.format(cls.getName).option("path", path).mode("overwrite").save()
}
assert(e2.getMessage.contains("Writing job aborted"))
// make sure we don't have partial data.
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
}
}
}
test("simple counter in writer with onDataWriterCommit") {
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
val numPartition = 6
spark.range(0, 10, 1, numPartition).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
assert(SimpleCounter.getCounter == numPartition,
"method onDataWriterCommit should be called as many as the number of partitions")
}
}
}
test("SPARK-23293: data source v2 self join") {
val df = spark.read.format(classOf[SimpleDataSourceV2].getName).load()
val df2 = df.select(($"i" + 1).as("k"), $"j")
checkAnswer(df.join(df2, "j"), (0 until 10).map(i => Row(-i, i, i + 1)))
}
test("SPARK-23301: column pruning with arbitrary expressions") {
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
val q1 = df.select('i + 1)
checkAnswer(q1, (1 until 11).map(i => Row(i)))
val batch1 = getBatch(q1)
assert(batch1.requiredSchema.fieldNames === Seq("i"))
val q2 = df.select(lit(1))
checkAnswer(q2, (0 until 10).map(i => Row(1)))
val batch2 = getBatch(q2)
assert(batch2.requiredSchema.isEmpty)
// 'j === 1 can't be pushed down, but we should still be able do column pruning
val q3 = df.filter('j === -1).select('j * 2)
checkAnswer(q3, Row(-2))
val batch3 = getBatch(q3)
assert(batch3.filters.isEmpty)
assert(batch3.requiredSchema.fieldNames === Seq("j"))
// column pruning should work with other operators.
val q4 = df.sort('i).limit(1).select('i + 1)
checkAnswer(q4, Row(1))
val batch4 = getBatch(q4)
assert(batch4.requiredSchema.fieldNames === Seq("i"))
}
test("SPARK-23315: get output from canonicalized data source v2 related plans") {
def checkCanonicalizedOutput(
df: DataFrame, logicalNumOutput: Int, physicalNumOutput: Int): Unit = {
val logical = df.queryExecution.optimizedPlan.collect {
case d: DataSourceV2Relation => d
}.head
assert(logical.canonicalized.output.length == logicalNumOutput)
val physical = df.queryExecution.executedPlan.collect {
case d: BatchScanExec => d
}.head
assert(physical.canonicalized.output.length == physicalNumOutput)
}
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
checkCanonicalizedOutput(df, 2, 2)
checkCanonicalizedOutput(df.select('i), 2, 1)
}
test("SPARK-25425: extra options should override sessions options during reading") {
val prefix = "spark.datasource.userDefinedDataSource."
val optionName = "optionA"
withSQLConf(prefix + optionName -> "true") {
val df = spark
.read
.option(optionName, false)
.format(classOf[DataSourceV2WithSessionConfig].getName).load()
val options = df.queryExecution.optimizedPlan.collectFirst {
case d: DataSourceV2Relation => d.options
}.get
assert(options.get(optionName) === "false")
}
}
test("SPARK-25425: extra options should override sessions options during writing") {
withTempPath { path =>
val sessionPath = path.getCanonicalPath
withSQLConf("spark.datasource.simpleWritableDataSource.path" -> sessionPath) {
withTempPath { file =>
val optionPath = file.getCanonicalPath
val format = classOf[SimpleWritableDataSource].getName
val df = Seq((1L, 2L)).toDF("i", "j")
df.write.format(format).option("path", optionPath).save()
assert(!new File(sessionPath).exists)
checkAnswer(spark.read.format(format).option("path", optionPath).load(), df)
}
}
}
}
test("SPARK-25700: do not read schema when writing in other modes except append and overwrite") {
withTempPath { file =>
val cls = classOf[SimpleWriteOnlyDataSource]
val path = file.getCanonicalPath
val df = spark.range(5).select('id as 'i, -'id as 'j)
// non-append mode should not throw exception, as they don't access schema.
df.write.format(cls.getName).option("path", path).mode("error").save()
df.write.format(cls.getName).option("path", path).mode("ignore").save()
// append and overwrite modes will access the schema and should throw exception.
intercept[SchemaReadAttemptException] {
df.write.format(cls.getName).option("path", path).mode("append").save()
}
intercept[SchemaReadAttemptException] {
df.write.format(cls.getName).option("path", path).mode("overwrite").save()
}
}
}
}
case class RangeInputPartition(start: Int, end: Int) extends InputPartition
object SimpleReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = InternalRow(current, -current)
override def close(): Unit = {}
}
}
}
abstract class SimpleBatchTable extends Table with SupportsRead {
override def schema(): StructType = new StructType().add("i", "int").add("j", "int")
override def name(): String = this.getClass.toString
override def capabilities(): util.Set[TableCapability] = Set(BATCH_READ).asJava
}
abstract class SimpleScanBuilder extends ScanBuilder
with Batch with Scan {
override def build(): Scan = this
override def toBatch: Batch = this
override def readSchema(): StructType = new StructType().add("i", "int").add("j", "int")
override def createReaderFactory(): PartitionReaderFactory = SimpleReaderFactory
}
class SimpleSinglePartitionSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
// This class is used by pyspark tests. If this class is modified/moved, make sure pyspark
// tests still pass.
class SimpleDataSourceV2 extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
class AdvancedDataSourceV2 extends TableProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new AdvancedScanBuilder()
}
}
}
class AdvancedScanBuilder extends ScanBuilder
with Scan with SupportsPushDownFilters with SupportsPushDownRequiredColumns {
var requiredSchema = new StructType().add("i", "int").add("j", "int")
var filters = Array.empty[Filter]
override def pruneColumns(requiredSchema: StructType): Unit = {
this.requiredSchema = requiredSchema
}
override def readSchema(): StructType = requiredSchema
override def pushFilters(filters: Array[Filter]): Array[Filter] = {
val (supported, unsupported) = filters.partition {
case GreaterThan("i", _: Int) => true
case _ => false
}
this.filters = supported
unsupported
}
override def pushedFilters(): Array[Filter] = filters
override def build(): Scan = this
override def toBatch: Batch = new AdvancedBatch(filters, requiredSchema)
}
class AdvancedBatch(val filters: Array[Filter], val requiredSchema: StructType) extends Batch {
override def planInputPartitions(): Array[InputPartition] = {
val lowerBound = filters.collectFirst {
case GreaterThan("i", v: Int) => v
}
val res = scala.collection.mutable.ArrayBuffer.empty[InputPartition]
if (lowerBound.isEmpty) {
res.append(RangeInputPartition(0, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 4) {
res.append(RangeInputPartition(lowerBound.get + 1, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 9) {
res.append(RangeInputPartition(lowerBound.get + 1, 10))
}
res.toArray
}
override def createReaderFactory(): PartitionReaderFactory = {
new AdvancedReaderFactory(requiredSchema)
}
}
class AdvancedReaderFactory(requiredSchema: StructType) extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = {
val values = requiredSchema.map(_.name).map {
case "i" => current
case "j" => -current
}
InternalRow.fromSeq(values)
}
override def close(): Unit = {}
}
}
}
class SchemaRequiredDataSource extends TableProvider {
class MyScanBuilder(schema: StructType) extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = Array.empty
override def readSchema(): StructType = schema
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
throw new IllegalArgumentException("requires a user-supplied schema")
}
override def getTable(options: CaseInsensitiveStringMap, schema: StructType): Table = {
val userGivenSchema = schema
new SimpleBatchTable {
override def schema(): StructType = userGivenSchema
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder(userGivenSchema)
}
}
}
}
class ColumnarDataSourceV2 extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 50), RangeInputPartition(50, 90))
}
override def createReaderFactory(): PartitionReaderFactory = {
ColumnarReaderFactory
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
object ColumnarReaderFactory extends PartitionReaderFactory {
private final val BATCH_SIZE = 20
override def supportColumnarReads(partition: InputPartition): Boolean = true
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
throw new UnsupportedOperationException
}
override def createColumnarReader(partition: InputPartition): PartitionReader[ColumnarBatch] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[ColumnarBatch] {
private lazy val i = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val j = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val batch = new ColumnarBatch(Array(i, j))
private var current = start
override def next(): Boolean = {
i.reset()
j.reset()
var count = 0
while (current < end && count < BATCH_SIZE) {
i.putInt(count, current)
j.putInt(count, -current)
current += 1
count += 1
}
if (count == 0) {
false
} else {
batch.setNumRows(count)
true
}
}
override def get(): ColumnarBatch = batch
override def close(): Unit = batch.close()
}
}
}
class PartitionAwareDataSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportPartitioning{
override def planInputPartitions(): Array[InputPartition] = {
// Note that we don't have same value of column `a` across partitions.
Array(
SpecificInputPartition(Array(1, 1, 3), Array(4, 4, 6)),
SpecificInputPartition(Array(2, 4, 4), Array(6, 2, 2)))
}
override def createReaderFactory(): PartitionReaderFactory = {
SpecificReaderFactory
}
override def outputPartitioning(): Partitioning = new MyPartitioning
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
class MyPartitioning extends Partitioning {
override def numPartitions(): Int = 2
override def satisfy(distribution: Distribution): Boolean = distribution match {
case c: ClusteredDistribution => c.clusteredColumns.contains("i")
case _ => false
}
}
}
case class SpecificInputPartition(i: Array[Int], j: Array[Int]) extends InputPartition
object SpecificReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val p = partition.asInstanceOf[SpecificInputPartition]
new PartitionReader[InternalRow] {
private var current = -1
override def next(): Boolean = {
current += 1
current < p.i.length
}
override def get(): InternalRow = InternalRow(p.i(current), p.j(current))
override def close(): Unit = {}
}
}
}
class SchemaReadAttemptException(m: String) extends RuntimeException(m)
class SimpleWriteOnlyDataSource extends SimpleWritableDataSource {
override def getTable(options: CaseInsensitiveStringMap): Table = {
new MyTable(options) {
override def schema(): StructType = {
throw new SchemaReadAttemptException("schema should not be read.")
}
}
}
}
class ReportStatisticsDataSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportStatistics {
override def estimateStatistics(): Statistics = {
new Statistics {
override def sizeInBytes(): OptionalLong = OptionalLong.of(80)
override def numRows(): OptionalLong = OptionalLong.of(10)
}
}
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder
}
}
}
}
|
Aegeaner/spark
|
sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2Suite.scala
|
Scala
|
apache-2.0
| 26,296
|
package scala.slick.driver
import java.util.UUID
import java.sql.{PreparedStatement, ResultSet}
import scala.slick.lifted._
import scala.slick.ast.{SequenceNode, Library, FieldSymbol, Node}
import scala.slick.util.MacroSupport.macroSupportInterpolation
import scala.slick.compiler.CompilerState
import scala.slick.jdbc.meta.MTable
import scala.slick.jdbc.{Invoker, JdbcType}
/** Slick driver for PostgreSQL.
*
* This driver implements all capabilities of [[scala.slick.driver.JdbcProfile]].
*
* Notes:
*
* <ul>
* <li>[[scala.slick.profile.RelationalProfile.capabilities.typeBlob]]:
* The default implementation of the <code>Blob</code> type uses the
* database type <code>lo</code> and the stored procedure
* <code>lo_manage</code>, both of which are provided by the "lo"
* extension in PostgreSQL.</li>
* </ul>
*/
trait MyPostgresDriver extends JdbcDriver { driver =>
override def getTables: Invoker[MTable] = MTable.getTables(None, None, None, Some(Seq("TABLE")))
override val columnTypes = new JdbcTypes
override def createQueryBuilder(n: Node, state: CompilerState): QueryBuilder = new QueryBuilder(n, state)
override def createTableDDLBuilder(table: Table[_]): TableDDLBuilder = new TableDDLBuilder(table)
override def createColumnDDLBuilder(column: FieldSymbol, table: Table[_]): ColumnDDLBuilder = new ColumnDDLBuilder(column)
override def defaultSqlTypeName(tmd: JdbcType[_]): String = tmd.sqlType match {
case java.sql.Types.BLOB => "lo"
case java.sql.Types.DOUBLE => "DOUBLE PRECISION"
/* PostgreSQL does not have a TINYINT type, so we use SMALLINT instead. */
case java.sql.Types.TINYINT => "SMALLINT"
case _ => super.defaultSqlTypeName(tmd)
}
class QueryBuilder(tree: Node, state: CompilerState) extends super.QueryBuilder(tree, state) {
override protected val concatOperator = Some("||")
override protected val supportsEmptyJoinConditions = false
override protected def buildFetchOffsetClause(fetch: Option[Long], offset: Option[Long]) = (fetch, offset) match {
case (Some(t), Some(d)) => b" limit $t offset $d"
case (Some(t), None ) => b" limit $t"
case (None, Some(d)) => b" offset $d"
case _ =>
}
override def expr(n: Node, skipParens: Boolean = false) = n match {
case Library.NextValue(SequenceNode(name)) => b"nextval('$name')"
case Library.CurrentValue(SequenceNode(name)) => b"currval('$name')"
case _ => super.expr(n, skipParens)
}
}
class TableDDLBuilder(table: Table[_]) extends super.TableDDLBuilder(table) {
override def createPhase1 = super.createPhase1 ++ columns.flatMap {
case cb: ColumnDDLBuilder => cb.createLobTrigger(table.tableName)
}
override def dropPhase1 = {
val dropLobs = columns.flatMap {
case cb: ColumnDDLBuilder => cb.dropLobTrigger(table.tableName)
}
if(dropLobs.isEmpty) super.dropPhase1
else Seq("delete from "+quoteIdentifier(table.tableName)) ++ dropLobs ++ super.dropPhase1
}
}
class ColumnDDLBuilder(column: FieldSymbol) extends super.ColumnDDLBuilder(column) {
override def appendColumn(sb: StringBuilder) {
sb append quoteIdentifier(column.name) append ' '
if(autoIncrement && !customSqlType) sb append "SERIAL"
else sb append sqlType
autoIncrement = false
appendOptions(sb)
}
def lobTrigger(tname: String) =
quoteIdentifier(tname+"__"+quoteIdentifier(column.name)+"_lob")
def createLobTrigger(tname: String): Option[String] =
if(sqlType == "lo") Some(
"create trigger "+lobTrigger(tname)+" before update or delete on "+
quoteIdentifier(tname)+" for each row execute procedure lo_manage("+quoteIdentifier(column.name)+")"
) else None
def dropLobTrigger(tname: String): Option[String] =
if(sqlType == "lo") Some(
"drop trigger "+lobTrigger(tname)+" on "+quoteIdentifier(tname)
) else None
}
class JdbcTypes extends super.JdbcTypes {
override val byteArrayJdbcType = new ByteArrayJdbcType
override val uuidJdbcType = new UUIDJdbcType
class ByteArrayJdbcType extends super.ByteArrayJdbcType {
override val sqlType = java.sql.Types.BINARY
override val sqlTypeName = "BYTEA"
}
class UUIDJdbcType extends super.UUIDJdbcType {
override def sqlTypeName = "UUID"
override def setValue(v: UUID, p: PreparedStatement, idx: Int) = p.setObject(idx, v, sqlType)
override def getValue(r: ResultSet, idx: Int) = r.getObject(idx).asInstanceOf[UUID]
override def updateValue(v: UUID, r: ResultSet, idx: Int) = r.updateObject(idx, v)
override def valueToSQLLiteral(value: UUID) = "'" + value + "'"
override def hasLiteralForm = true
}
}
}
object MyPostgresDriver extends MyPostgresDriver
|
leithaus/strategies
|
slicktrix/src/main/scala/com/synereo/MyPostgresDriver.scala
|
Scala
|
cc0-1.0
| 4,827
|
import scala.tools.partest.JavapTest
object Test extends JavapTest {
def code = """
|case class Betty(i: Int) { def next = Betty(i+1) }
|:javap Betty
""".stripMargin
override def yah(res: Seq[String]) = {
def filtered = res filter (_ contains "public class Betty")
1 == filtered.size
}
}
|
yusuke2255/dotty
|
tests/pending/run/repl-javap.scala
|
Scala
|
bsd-3-clause
| 314
|
package org.denigma.nlp.pages
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.{Directives, Route}
import scalacss.Defaults._
class Head extends Directives
{
lazy val webjarsPrefix = "lib"
lazy val resourcePrefix = "resources"
def mystyles = path("styles" / "mystyles.css"){
complete {
HttpResponse( entity = HttpEntity(MediaTypes.`text/css`.withCharset(HttpCharsets.`UTF-8`), MyStyles.render )) }
}
def loadResources = pathPrefix(resourcePrefix ~ Slash) {
getFromResourceDirectory("")
}
def webjars =pathPrefix(webjarsPrefix ~ Slash) { getFromResourceDirectory(webjarsPrefix) }
def routes: Route = mystyles ~ webjars ~ loadResources
}
|
antonkulaga/bio-nlp
|
app/jvm/src/main/scala/org/denigma/nlp/pages/Head.scala
|
Scala
|
mpl-2.0
| 706
|
package chandu0101.scalajs.react.components
package semanticui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.VdomNode
import scala.scalajs.js
import scala.scalajs.js.`|`
/**
* This file is generated - submit issues instead of PR against it
*/
case class SuiListContent(
ref: js.UndefOr[String] = js.undefined,
description: js.UndefOr[VdomNode] = js.undefined,
floated: js.UndefOr[SemanticFLOATS] = js.undefined,
content: js.UndefOr[VdomNode] = js.undefined,
header: js.UndefOr[VdomNode] = js.undefined,
key: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
verticalAlign: js.UndefOr[SemanticVERTICALALIGNMENTS] = js.undefined,
as: js.UndefOr[String | js.Function] = js.undefined
) {
def apply(children: VdomNode*) = {
val props = JSMacro[SuiListContent](this)
val component = JsComponent[js.Object, Children.Varargs, Null](Sui.ListContent)
component(props)(children: _*)
}
}
|
rleibman/scalajs-react-components
|
core/src/main/scala/chandu0101/scalajs/react/components/semanticui/SuiListContent.scala
|
Scala
|
apache-2.0
| 1,028
|
package filodb.akkabootstrapper
import akka.actor.{ActorSystem, AddressFromURIString}
import akka.cluster.Cluster
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
class SeedNodeHeadDiscoverySpec extends BaseSeedNodeDiscoverySpec(AbstractTestKit.head) {
"ExplicitListClusterSeedDiscovery" must {
"discover if selfNode is head of list" in {
seeds.headOption shouldEqual Some(selfAddress)
val discovery = new ExplicitListClusterSeedDiscovery(cluster, settings)
// remove self node unless it is in the head of the sorted list.
discovery.discoverClusterSeeds.contains(cluster.selfAddress) shouldBe true
discovery.discoverClusterSeeds.size shouldEqual settings.seedsExplicitlyListed.size
}
}
}
class SeedNodeLastDiscoverySpec extends BaseSeedNodeDiscoverySpec(AbstractTestKit.last) {
"ExplicitListClusterSeedDiscovery" must {
"discover if selfNode is last of list" in {
seeds.last shouldEqual selfAddress
val discovery = new ExplicitListClusterSeedDiscovery(cluster, settings)
// remove self node unless it is in the head of the sorted list.
discovery.discoverClusterSeeds.contains(cluster.selfAddress) shouldBe false
discovery.discoverClusterSeeds.size shouldEqual settings.seedsExplicitlyListed.size - 1
}
}
}
abstract class BaseSeedNodeDiscoverySpec(config: Config)
extends AbstractTestKit(config) with AnyWordSpecLike {
protected val cluster = Cluster(system)
protected val selfAddress = cluster.selfAddress
protected val settings = new AkkaBootstrapperSettings(system.settings.config)
protected val seeds = settings.seedsExplicitlyListed.map(AddressFromURIString(_))
"ExplicitListClusterSeedDiscovery" must {
"include the self node in seeds if first not malformed and first is self" in {
seeds.contains(selfAddress) shouldBe true
}
}
}
object AbstractTestKit {
val name = "seed-test"
val host = "127.0.0.1"
val port = 2552
val rootConfig: Config =
ConfigFactory.parseString(
s"""
|akka-bootstrapper {
| seed-discovery.timeout = 1 minute
| seed-discovery.class = "filodb.akkabootstrapper.ExplicitListClusterSeedDiscovery"
| http-seeds.base-url = "http://$host:8080/"
|}
|akka.remote.netty.tcp.port = $port
|akka.remote.netty.tcp.hostname = $host
|akka.jvm-exit-on-fatal-error = off
|akka.loggers = ["akka.testkit.TestEventListener"]
|akka.actor.provider = "cluster"
"""
.stripMargin)
.withFallback(ConfigFactory.load("application_test.conf"))
def head: Config =
ConfigFactory.parseString(
s"""
|akka-bootstrapper{
| explicit-list.seeds = [
| "akka.tcp://$name@$host:$port",
| "akka.tcp://$name@$host:2553",
| "akka.tcp://$name@$host:2554"
| ]
|}
""".stripMargin).withFallback(rootConfig)
def last: Config =
ConfigFactory.parseString(
s"""
|akka-bootstrapper{
| explicit-list.seeds = [
| "akka.tcp://$name@$host:2553",
| "akka.tcp://$name@$host:2554",
| "akka.tcp://$name@$host:$port" ]
|}
""".stripMargin).withFallback(rootConfig)
}
abstract class AbstractTestKit(config: Config)
extends TestKit(ActorSystem(AbstractTestKit.name, config))
with Suite with Matchers
with BeforeAndAfterAll with BeforeAndAfter
with ImplicitSender {
override def afterAll(): Unit = {
super.afterAll()
TestKit.shutdownActorSystem(system)
}
}
|
filodb/FiloDB
|
akka-bootstrapper/src/test/scala/filodb/akkabootstrapper/SeedNodeDiscoverySpec.scala
|
Scala
|
apache-2.0
| 3,724
|
package com.github.mdr.mash.ns.view
import com.github.mdr.mash.functions.{ BoundParams, MashFunction, Parameter, ParameterModel }
import com.github.mdr.mash.runtime.MashObject
object PrintFunction extends MashFunction("view.print") {
object Params {
val Data = Parameter(
nameOpt = Some("data"),
summaryOpt = Some("Data to print"))
}
import Params._
val params = ParameterModel(Data)
def call(boundParams: BoundParams): MashObject = {
val data = boundParams(Data)
ViewClass.build(data, print = true)
}
override def typeInferenceStrategy = ViewClass
override def summaryOpt = Some("Print the data")
}
|
mdr/mash
|
src/main/scala/com/github/mdr/mash/ns/view/PrintFunction.scala
|
Scala
|
mit
| 648
|
/*
* Copyright (c) 2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the
* Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the Apache License Version 2.0 for the specific
* language governing permissions and limitations there under.
*/
import sbt._
import Keys._
object Json4sBuildSettings {
import BuildSettings._
// Settings specific for Iglu Core Json4s implementation
lazy val json4sBuildSettings = commonSettings ++ Seq[Setting[_]](
description := "Iglu Core type classes instances for Json4s"
) ++ mavenCentralExtras ++ publishSettings
}
|
snowplow/iglu
|
0-common/scala-core/project/Json4sBuildSettings.scala
|
Scala
|
apache-2.0
| 1,032
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* Author(s) :
* - David Courtinot
* - Xiaowen Ji
*/
/**
* This package contains all the classes needed to model the Clang AST (well, only a small
* part of it but it is sufficient to play with purely imperative C++).
*/
package object ast {
/**
* Enables to split the 'data' field of the ConcreteASTNode(s)
* */
implicit class DataProcessor(data: String) {
val dataList = DataProcessor.splitReg.findAllIn(data).map(_.replaceAll("'", "")).toSeq
}
/**
* Separates a data string into several fragments
* */
implicit object DataProcessor {
private val splitReg = "<[^>]+>+|\\\\(.+\\\\)|(\\"[^\\"]+\\")|('[^']+'|[^\\\\s']+)+".r
}
/**
* Enables negative indexes for look-up on a Seq
* */
implicit class SeqFetcher[T](seq: Seq[T]) {
def get(idx: Int) = if (idx >= 0) seq(idx) else seq(seq.length + idx)
}
}
|
jxw1102/Projet-merou
|
ModelChecker/src/ast/package.scala
|
Scala
|
apache-2.0
| 1,712
|
/*
* OpenVC, an open source VHDL compiler/simulator
* Copyright (C) 2010 Christian Reisinger
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.jku.ssw.openvc.util
/**
* Represents a position in a source file
*
* @author <a href="mailto:chr_reisinger@yahoo.de">Christian Reisinger</a>
* @see [[at.jku.ssw.openvc.parser.VHDLParser.toPosition]]
*/
sealed abstract class Position extends Ordered[Position] {
/**
* Is this position not NoPosition?
* If isDefined is true, line, column, start and end are defined.
*/
val isDefined = true
/**Is this position a range position? */
val isRange = false
/**
* returns a new position with a character offset
* @param characterOffset the offset
* @return the new position
*/
def addCharacterOffset(characterOffset: Int): Position = sys.error("Position.addCharacterOffset")
/**
* returns a new position with a line offset
* @param lineOffset the offset
* @return the new position
*/
def addLineOffset(lineOffset: Int): Position = sys.error("Position.addLineOffset")
/**the line in the source file */
def line: Int
/**column the character position in the line */
def column: Int
/**The start of the position, either of the token or range */
def start: Int
/**The end of the position, either of the token or range */
def end: Int
/**Result of comparing <code>this</code> with operand <code>that</code>.
* returns <code>x</code> where
* <code>x < 0</code> iff <code>this < that</code>
* <code>x == 0</code> iff <code>this == that</code>
* <code>x > 0</code> iff <code>this > that</code>
*/
override def compare(that: Position): Int =
if (this == NoPosition) -1
else if (that == NoPosition) 1
else if (this.start < that.start) -1
else if (this.start == that.start) this.end - that.end
else 1
}
/**
* Represents a point in a source file
*
* @author <a href="mailto:chr_reisinger@yahoo.de">Christian Reisinger</a>
* @see [[at.jku.ssw.openvc.util.SourceFile]]
* @param line the line in the source file
* @param column the character position in the line
* @param start the index of the first character of the token in the content array of a source file
* @param end the index of the last character of the token in the content array of a source file
*/
final case class OffsetPosition(line: Int, column: Int, start: Int, end: Int) extends Position {
override def addLineOffset(lineOffset: Int) = new OffsetPosition(this.line + lineOffset, this.column, -1, -1)
override def addCharacterOffset(characterOffset: Int) = new OffsetPosition(this.line, this.column + characterOffset, -1, -1)
}
/**
* Represents a range in a source file, it is used for syntax errors
*
* @author <a href="mailto:chr_reisinger@yahoo.de">Christian Reisinger</a>
* @see [[at.jku.ssw.openvc.util.SourceFile]]
* @param point the point where the range starts
* @param start the index of the first character of the range in the content array of a source file
* @param end the index of the last character of the range in the content array of a source file
*/
final case class RangePosition(point: OffsetPosition, start: Int, end: Int) extends Position {
override val isRange = true
val line = point.line
val column = point.column
}
/**
* NoPosition is a dummy position that always throws for `line`, `column`, `start` and `end` an exception.
* NoPosition is used when there is no real position to avoid null values.
* @author <a href="mailto:chr_reisinger@yahoo.de">Christian Reisinger</a>
*/
object NoPosition extends Position {
override val isDefined = false
def line = sys.error("NoPosition.line")
def column = sys.error("NoPosition.column")
def start = sys.error("NoPosition.start")
def end = sys.error("NoPosition.end")
}
|
chrreisinger/OpenVC
|
src/main/scala/at/jku/ssw/openvc/util/Position.scala
|
Scala
|
gpl-3.0
| 4,486
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.validation
import org.mockito.Mockito.when
import uk.gov.hmrc.ct.accounts.retriever.AccountsBoxRetriever
import uk.gov.hmrc.ct.accounts.{AC402, AC404}
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.utils.UnitSpec
class AC402Spec extends UnitSpec {
"AC402 validation" should {
val boxRetriever = mock[AccountsBoxRetriever]
"not show error messages where AC402 is within limit" in {
when(boxRetriever.ac404()).thenReturn(AC404(Some(0)))
AC402(Some(0)).validate(boxRetriever) shouldBe Set.empty[CtValidation]
AC402(Some(999999)).validate(boxRetriever) shouldBe Set.empty[CtValidation]
}
"show correct error messages where AC402 is outside limit" in {
when(boxRetriever.ac404()).thenReturn(AC404(Some(0)))
AC402(Some(-1)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC402"),"error.AC402.mustBeZeroOrPositive",None))
AC402(Some(-0)).validate(boxRetriever) shouldBe Set.empty[CtValidation]
AC402(Some(1000000)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC402"),"error.AC402.exceeds.max",Some(List("999999"))))
}
"show correct error message when AC402 doesn't need to be present" in{
when(boxRetriever.ac404()).thenReturn(AC404(Some(1)))
AC402(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC402"),"error.AC402.required",None))
}
"don't show error message when AC402 should be present" in{
when(boxRetriever.ac404()).thenReturn(AC404(None))
AC402(None).validate(boxRetriever) shouldBe Set.empty[CtValidation]
}
}
}
|
hmrc/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/accounts/validation/AC402Spec.scala
|
Scala
|
apache-2.0
| 2,216
|
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
/* StreamRowImpl wraps a DenseVectorView that represents a Stream row.
*
* author: Arvind Sujeeth (asujeeth@stanford.edu)
* created: 3/15/11
*
* Pervasive Parallelism Laboratory (PPL)
* Stanford University
*
*/
class IntStreamRow(chunkRow: Int, offset: Int, stream: IntStream, x: Array[Int])
extends IntDenseVectorView(x, chunkRow*stream.numCols, 1, stream.numCols, true) {
// absolute row index in the stream
val index = offset*stream.chunkSize + chunkRow
}
|
TiarkRompf/lancet
|
src/main/scala/generated/scala/IntStreamRow.scala
|
Scala
|
agpl-3.0
| 1,492
|
package io.github.dmitrib.elasticsearch.cli
import java.io.{InputStreamReader, BufferedReader, FileInputStream}
import java.util
import java.util.concurrent.{TimeUnit, ArrayBlockingQueue}
import com.beust.jcommander.{Parameter, Parameters}
import org.elasticsearch.action.ActionListener
import org.elasticsearch.action.get.{MultiGetRequest, MultiGetResponse}
import org.elasticsearch.search.fetch.source.FetchSourceContext
import scala.collection.JavaConverters._
@Parameters(commandDescription = "Retrieve documents by its key")
object MultiGetCommand extends Runnable {
import EsTool._
@Parameter(
names = Array("--batch-size"),
description = "Number of params to supply in each search request")
var batchSize = 100
@Parameter(
names = Array("--file"),
description = "A file to read newline-separated search attributes, system input will be used if no file is specified")
var file: String = _
@Parameter(
names = Array("--exclude"),
description = "A wildcard pattern for fields to exclude from source, can be specified multiple times")
val excludeFields: util.List[String] = new util.ArrayList[String]
@Parameter(
names = Array("--include"),
description = "A wildcard pattern for fields to include in source, can be specified multiple times")
val includeFields: util.List[String] = new util.ArrayList[String]
@Parameter(
names = Array("--src-only"),
description = "print only source JSON")
val srcOnly = false
@Parameter(
names = Array("--src-id-tsv"),
description = "print ID and source separated by TAB")
val srcIdTsv = false
@Parameter(
names = Array("--max-jobs"),
description = "number of requests to execute in parallel")
val maxJobs = 1
def run() {
val stream = Option(file).fold(System.in)(new FileInputStream(_))
val reader = new BufferedReader(new InputStreamReader(stream))
val it = Iterator.continually(reader.readLine).takeWhile(_ != null).grouped(batchSize)
val respQueue = new ArrayBlockingQueue[Either[MultiGetResponse, Throwable]](maxJobs)
def executeBatch(batch: Seq[String]) {
val req = client.prepareMultiGet()
batch.foreach { id =>
val item = new MultiGetRequest.Item(
index,
Option(kind).getOrElse(throw new IllegalStateException("type is not set")),
id
)
if (!excludeFields.isEmpty || !includeFields.isEmpty) {
item.fetchSourceContext(
new FetchSourceContext(includeFields.asScala.toArray, excludeFields.asScala.toArray)
)
}
req.add(item)
}
req.execute(new ActionListener[MultiGetResponse] {
override def onFailure(e: Throwable) {
respQueue.put(Right(e))
}
override def onResponse(response: MultiGetResponse) {
respQueue.put(Left(response))
}
})
}
var activeJobs = 0
var pollTimeotCount = 0
while (activeJobs > 0 || it.hasNext) {
while (activeJobs < maxJobs && it.hasNext) {
executeBatch(it.next())
activeJobs = activeJobs + 1
}
val res = respQueue.poll(requestTimeoutMins*60+10, TimeUnit.SECONDS)
res match {
case Left(hits) =>
activeJobs = activeJobs - 1
hits.getResponses.foreach { hit =>
if (hit.isFailed) {
println(hit.getFailure.getMessage)
} else if (hit.getResponse.isExists) {
println(hitToString(hit.getId, hit.getResponse.getSourceAsString, srcOnly, srcIdTsv))
}
}
case Right(e) =>
throw e
case null =>
throw new RuntimeException("timeout on waiting for response")
}
}
}
}
|
Digsolab/elasticsearch-cli
|
src/main/scala/io/github/dmitrib/elasticsearch/cli/MultiGetCommand.scala
|
Scala
|
apache-2.0
| 3,727
|
package com.karasiq.shadowcloud.webapp.components.common
import com.karasiq.bootstrap.Bootstrap.default._
import com.karasiq.shadowcloud.webapp.context.AppContext
import org.scalajs.dom
import rx.Var
import scalaTags.all._
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobal
@js.native
@JSGlobal("pell")
object PellJS extends js.Object {
def init(options: js.Dynamic): Element = js.native
}
@js.native
@JSGlobal
class TurndownService(options: js.Dynamic) extends js.Object {
def turndown(html: String): String = js.native
}
object Pell {
abstract class Editor(implicit ac: AppContext) extends BootstrapHtmlComponent {
val submitting = Var(false)
val html = Var("")
def onSubmit(): Unit
override def renderTag(md: ModifierT*): TagT = {
val parent = div(height := 400.px).render
val container = div("pell".addClass).render
parent.appendChild(container)
val options = js.Dynamic.literal(
element = container,
onChange = (s: String) => html() = s
)
dom.window.setTimeout(() => {
PellJS.init(options)
val editor = container.asInstanceOf[js.Dynamic].content.asInstanceOf[dom.Element]
html.trigger {
if (editor.innerHTML != html.now)
editor.innerHTML = html.now
}
}, 0)
div(
parent,
Button(ButtonStyle.success, block = true)(
ac.locale.submit,
onclick := Callback.onClick(_ => if (!submitting.now) onSubmit()),
"disabled".classIf(submitting)
)
)
}
}
def apply(f: Editor => Unit)(implicit ac: AppContext): Editor = new Editor {
override def onSubmit(): Unit = f(this)
}
def toMarkdown(html: String): String =
new TurndownService(js.Dynamic.literal()).turndown(html)
}
|
Karasiq/shadowcloud
|
server/webapp/src/main/scala/com/karasiq/shadowcloud/webapp/components/common/Pell.scala
|
Scala
|
apache-2.0
| 1,812
|
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class CP19(value: Option[Int]) extends CtBoxIdentifier(name = "Accountancy and audit") with CtOptionalInteger with Input
object CP19 {
def apply(int: Int): CP19 = CP19(Some(int))
}
|
scottcutts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CP19.scala
|
Scala
|
apache-2.0
| 904
|
package org.zouzias.spark.lucenerdd.examples.wikipedia
import org.apache.spark.sql.SparkSession
import org.apache.spark.SparkConf
import org.zouzias.spark.lucenerdd.LuceneRDD
import org.zouzias.spark.lucenerdd._
import org.zouzias.spark.lucenerdd.logging.Logging
/**
* Example that demonstrates how to search on a list of capital names using [[LuceneRDD]]
*
* Search over all capitals for a specific capital
*/
object CapitalsSearchExample extends Logging {
def main(args: Array[String]) {
// initialise spark context
val conf = new SparkConf().setAppName("CapitalsSearchExample")
val k = 10
implicit val spark: SparkSession = SparkSession.builder.config(conf).getOrCreate()
val start = System.currentTimeMillis()
// Load DataFrame and instantiate LuceneRDD
val capitals = spark.read.parquet("data/spatial/capitals.parquet").select("name", "country")
val luceneRDD = LuceneRDD(capitals)
// Perform a term query
val result = luceneRDD.termQuery("name", "ottawa", k)
val end = System.currentTimeMillis()
logInfo("=" * 40)
logInfo(s"Elapsed time: ${(end - start) / 1000.0} seconds")
logInfo("=" * 40)
logInfo(result.take(k).mkString("\n"))
// terminate spark context
spark.stop()
}
}
|
zouzias/spark-lucenerdd-examples
|
src/main/scala/org/zouzias/spark/lucenerdd/examples/wikipedia/CapitalsSearchExample.scala
|
Scala
|
apache-2.0
| 1,268
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.