code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
import org.springframework.context.ApplicationContext
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import util.{Try, Success, Failure}
import play.api._
import play.api.mvc._
import play.api.mvc.Results._
import play.api.Play.current
import play.api.libs.concurrent.Akka
import play.api.libs.concurrent.Execution.Implicits._
import models._
import misc.{Constants, SpringConfiguration}
object Global extends WithFilters(AccessLog) with GlobalSettings
{
override def onStart(app: Application) {
Logger.info("Starting up play-eventsourced")
}
override def onStop(app: Application) = {
Logger.info("Shutting down play-eventsourced")
Akka.system.shutdown()
}
private var ctx: ApplicationContext = new AnnotationConfigApplicationContext(classOf[SpringConfiguration])
override def getControllerInstance[A](clazz: Class[A]): A = {
return ctx.getBean(clazz)
}
}
object AccessLog extends Filter {
import play.api.libs.concurrent.Execution.Implicits._
import org.springframework.util.StopWatch
def apply(next: (RequestHeader) => Result)(requestHeader: RequestHeader) = {
val stopWatch = new org.springframework.util.StopWatch
stopWatch.start()
def logTime(result: PlainResult): Result = {
stopWatch.stop()
val time = stopWatch.getTotalTimeMillis()
val status = result.header.status
Logger.debug(String.format("%s %s took %s ms and returned %s", requestHeader.method, requestHeader.uri, time.toString, status.toString))
result.withHeaders("Request-Time" -> time.toString)
}
next(requestHeader) match {
case plain: PlainResult => {
Logger.debug("requestHeader has plain result")
logTime(plain)
}
case async: AsyncResult => {
Logger.debug("requestHeader has async result")
async.transform(logTime)
}
}
}
}
| alanktwong/play-eventsourced | app/Global.scala | Scala | mit | 1,822 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.scalding
import com.twitter.algebird.{ MapAlgebra, Monoid, Group, Interval, Last }
import com.twitter.algebird.monad._
import com.twitter.summingbird.{ Producer, TimeExtractor, TestGraphs }
import com.twitter.summingbird.batch._
import com.twitter.summingbird.batch.state.HDFSState
import java.util.TimeZone
import java.io.File
import com.twitter.scalding.{ Source => ScaldingSource, Test => TestMode, _ }
import com.twitter.scalding.typed.TypedSink
import org.scalacheck._
import org.scalacheck.Prop._
import org.scalacheck.Properties
import org.apache.hadoop.conf.Configuration
import scala.collection.JavaConverters._
import scala.collection.mutable.{ ArrayBuffer, Buffer, HashMap => MutableHashMap, Map => MutableMap, SynchronizedBuffer, SynchronizedMap }
import cascading.scheme.local.{ TextDelimited => CLTextDelimited }
import cascading.tuple.{ Tuple, Fields, TupleEntry }
import cascading.flow.FlowDef
import cascading.tap.Tap
import cascading.scheme.NullScheme
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.RecordReader
import org.apache.hadoop.mapred.OutputCollector
import org.specs2.mutable._
object VersionBatchLaws extends Properties("VersionBatchLaws") {
property("version -> BatchID -> version") = forAll { (l: Long) =>
(l == Long.MinValue) || {
// This law is only true for numbers greater than MinValue
val vbs = new store.VersionedBatchStore[Int, Int, Array[Byte], Array[Byte]](null,
0, Batcher.ofHours(1))(null)(null)
val b = vbs.versionToBatchID(l)
vbs.batchIDToVersion(b) <= l
}
}
property("BatchID -> version -> BatchID") = forAll { (bint: Int) =>
val b = BatchID(bint)
val vbs = new store.VersionedBatchStore[Int, Int, Array[Byte], Array[Byte]](null,
0, Batcher.ofHours(1))(null)(null)
val v = vbs.batchIDToVersion(b)
vbs.versionToBatchID(v) == b
}
property("version is an upperbound on time") = forAll { (lBig: Long) =>
val l = lBig / 1000L
val batcher = Batcher.ofHours(1)
val vbs = new store.VersionedBatchStore[Int, Int, Array[Byte], Array[Byte]](null,
0, batcher)(null)(null)
val b = vbs.versionToBatchID(l)
(batcher.earliestTimeOf(b.next).milliSinceEpoch <= l) &&
(batcher.earliestTimeOf(b).milliSinceEpoch < l)
(batcher.earliestTimeOf(b.next.next).milliSinceEpoch > l)
}
}
| nvoron23/summingbird | summingbird-scalding-test/src/test/scala/com/twitter/summingbird/scalding/VersionBatchLaws.scala | Scala | apache-2.0 | 2,950 |
package openreveal.service
import openreveal.model._
import org.joda.time.DateTime
/**
* Created by Paul Lysak on 02.06.15.
*/
trait FactStorage {
def saveFact(fact: Fact)
def defineEntity[T <: Entity](entityDef: EntityDefinition[T]): T
def generateFactId(): String
def createUser(id: String, email: String): User = {
val user = User(id, email)
defineEntity(EntityDefinition(reportedBy = user, reportedAt = clock.now(), entity = user))
}
val clock: Clock
}
| paul-lysak/OpenReveal | src/main/scala/openreveal/service/FactStorage.scala | Scala | apache-2.0 | 485 |
package vggames.shared.email
import javax.servlet.{ ServletContextEvent, ServletContextListener }
class Notifier extends ServletContextListener {
override def contextInitialized(event : ServletContextEvent) {
Mail(to = "jonas@vidageek.net", from = "games@vidageek.net", subject = "Server just got up!",
message = "Either a deploy or Watchdog reboot").send
}
override def contextDestroyed(event : ServletContextEvent) {
}
} | vidageek/games | web/src/main/scala/vggames/shared/email/Notifier.scala | Scala | gpl-3.0 | 444 |
package org.penny_craal.atlatl
import java.time.{Duration, LocalDateTime, LocalTime}
/**
* @author Ville Jokela
*/
object Helpers {
implicit class DoubleHelper(minutes: Double) {
def minutesToTimeString: String = {
if (Math.abs(minutes) > 120) f"${(minutes / 60).floor}%1.0f h"
else if (Math.abs(minutes) > 2) f"${minutes.floor}%1.0f m"
else f"${(minutes * 60).floor}%1.0f s"
}
}
implicit class LocalTimeHelper(lt: LocalTime) {
def plusMinutes(minutes: Double): LocalTime = lt.plus(Duration.ofMillis((minutes * 60 * 1000).toLong))
def until(that: LocalTime): Double = TimeRange(lt, that).lengthMinutes
}
implicit class LocalDateTimeHelper(ldt: LocalDateTime) {
def plusMinutes(minutes: Double): LocalDateTime = ldt.plus(Duration.ofMillis((minutes * 60 * 1000).toLong))
}
}
| razalhague/atlatl | src/main/scala/org/penny_craal/atlatl/Helpers.scala | Scala | gpl-3.0 | 831 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.enhancement
trait AirSavant extends SorcererEnhancement
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/enhancement/AirSavant.scala | Scala | apache-2.0 | 754 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.net.URLEncoder
import java.util.Date
import javax.servlet.http.HttpServletRequest
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, ListBuffer}
import scala.xml._
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.JobExecutionStatus
import org.apache.spark.scheduler._
import org.apache.spark.ui._
import org.apache.spark.ui.jobs.UIData.{JobUIData, StageUIData}
import org.apache.spark.util.Utils
/** Page showing list of all ongoing and recently finished jobs */
private[ui] class AllJobsPage(parent: JobsTab) extends WebUIPage("") {
private val JOBS_LEGEND =
<div class="legend-area"><svg width="150px" height="85px">
<rect class="succeeded-job-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Succeeded</text>
<rect class="failed-job-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Failed</text>
<rect class="running-job-legend"
x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="67px">Running</text>
</svg></div>.toString.filter(_ != '\\n')
private val EXECUTORS_LEGEND =
<div class="legend-area"><svg width="150px" height="55px">
<rect class="executor-added-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Added</text>
<rect class="executor-removed-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Removed</text>
</svg></div>.toString.filter(_ != '\\n')
private def getLastStageNameAndDescription(job: JobUIData): (String, String) = {
val lastStageInfo = Option(job.stageIds)
.filter(_.nonEmpty)
.flatMap { ids => parent.jobProgresslistener.stageIdToInfo.get(ids.max)}
val lastStageData = lastStageInfo.flatMap { s =>
parent.jobProgresslistener.stageIdToData.get((s.stageId, s.attemptId))
}
val name = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)")
val description = lastStageData.flatMap(_.description).getOrElse("")
(name, description)
}
private def makeJobEvent(jobUIDatas: Seq[JobUIData]): Seq[String] = {
jobUIDatas.filter { jobUIData =>
jobUIData.status != JobExecutionStatus.UNKNOWN && jobUIData.submissionTime.isDefined
}.map { jobUIData =>
val jobId = jobUIData.jobId
val status = jobUIData.status
val (jobName, jobDescription) = getLastStageNameAndDescription(jobUIData)
val displayJobDescription =
if (jobDescription.isEmpty) {
jobName
} else {
UIUtils.makeDescription(jobDescription, "", plainText = true).text
}
val submissionTime = jobUIData.submissionTime.get
val completionTimeOpt = jobUIData.completionTime
val completionTime = completionTimeOpt.getOrElse(System.currentTimeMillis())
val classNameByStatus = status match {
case JobExecutionStatus.SUCCEEDED => "succeeded"
case JobExecutionStatus.FAILED => "failed"
case JobExecutionStatus.RUNNING => "running"
case JobExecutionStatus.UNKNOWN => "unknown"
}
// The timeline library treats contents as HTML, so we have to escape them. We need to add
// extra layers of escaping in order to embed this in a Javascript string literal.
val escapedDesc = Utility.escape(displayJobDescription)
val jsEscapedDesc = StringEscapeUtils.escapeEcmaScript(escapedDesc)
val jobEventJsonAsStr =
s"""
|{
| 'className': 'job application-timeline-object ${classNameByStatus}',
| 'group': 'jobs',
| 'start': new Date(${submissionTime}),
| 'end': new Date(${completionTime}),
| 'content': '<div class="application-timeline-content"' +
| 'data-html="true" data-placement="top" data-toggle="tooltip"' +
| 'data-title="${jsEscapedDesc} (Job ${jobId})<br>' +
| 'Status: ${status}<br>' +
| 'Submitted: ${UIUtils.formatDate(new Date(submissionTime))}' +
| '${
if (status != JobExecutionStatus.RUNNING) {
s"""<br>Completed: ${UIUtils.formatDate(new Date(completionTime))}"""
} else {
""
}
}">' +
| '${jsEscapedDesc} (Job ${jobId})</div>'
|}
""".stripMargin
jobEventJsonAsStr
}
}
private def makeExecutorEvent(executorUIDatas: Seq[SparkListenerEvent]):
Seq[String] = {
val events = ListBuffer[String]()
executorUIDatas.foreach {
case a: SparkListenerExecutorAdded =>
val addedEvent =
s"""
|{
| 'className': 'executor added',
| 'group': 'executors',
| 'start': new Date(${a.time}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${a.executorId}<br>' +
| 'Added at ${UIUtils.formatDate(new Date(a.time))}"' +
| 'data-html="true">Executor ${a.executorId} added</div>'
|}
""".stripMargin
events += addedEvent
case e: SparkListenerExecutorRemoved =>
val removedEvent =
s"""
|{
| 'className': 'executor removed',
| 'group': 'executors',
| 'start': new Date(${e.time}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${e.executorId}<br>' +
| 'Removed at ${UIUtils.formatDate(new Date(e.time))}' +
| '${
if (e.reason != null) {
s"""<br>Reason: ${e.reason.replace("\\n", " ")}"""
} else {
""
}
}"' +
| 'data-html="true">Executor ${e.executorId} removed</div>'
|}
""".stripMargin
events += removedEvent
}
events.toSeq
}
private def makeTimeline(
jobs: Seq[JobUIData],
executors: Seq[SparkListenerEvent],
startTime: Long): Seq[Node] = {
val jobEventJsonAsStrSeq = makeJobEvent(jobs)
val executorEventJsonAsStrSeq = makeExecutorEvent(executors)
val groupJsonArrayAsStr =
s"""
|[
| {
| 'id': 'executors',
| 'content': '<div>Executors</div>${EXECUTORS_LEGEND}',
| },
| {
| 'id': 'jobs',
| 'content': '<div>Jobs</div>${JOBS_LEGEND}',
| }
|]
""".stripMargin
val eventArrayAsStr =
(jobEventJsonAsStrSeq ++ executorEventJsonAsStrSeq).mkString("[", ",", "]")
<span class="expand-application-timeline">
<span class="expand-application-timeline-arrow arrow-closed"></span>
<a data-toggle="tooltip" title={ToolTips.JOB_TIMELINE} data-placement="right">
Event Timeline
</a>
</span> ++
<div id="application-timeline" class="collapsed">
<div class="control-panel">
<div id="application-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
</div> ++
<script type="text/javascript">
{Unparsed(s"drawApplicationTimeline(${groupJsonArrayAsStr}," +
s"${eventArrayAsStr}, ${startTime}, ${UIUtils.getTimeZoneOffset()});")}
</script>
}
private def jobsTable(
request: HttpServletRequest,
tableHeaderId: String,
jobTag: String,
jobs: Seq[JobUIData],
killEnabled: Boolean): Seq[Node] = {
val allParameters = request.getParameterMap.asScala.toMap
val parameterOtherTable = allParameters.filterNot(_._1.startsWith(jobTag))
.map(para => para._1 + "=" + para._2(0))
val someJobHasJobGroup = jobs.exists(_.jobGroup.isDefined)
val jobIdTitle = if (someJobHasJobGroup) "Job Id (Job Group)" else "Job Id"
val parameterJobPage = request.getParameter(jobTag + ".page")
val parameterJobSortColumn = request.getParameter(jobTag + ".sort")
val parameterJobSortDesc = request.getParameter(jobTag + ".desc")
val parameterJobPageSize = request.getParameter(jobTag + ".pageSize")
val parameterJobPrevPageSize = request.getParameter(jobTag + ".prevPageSize")
val jobPage = Option(parameterJobPage).map(_.toInt).getOrElse(1)
val jobSortColumn = Option(parameterJobSortColumn).map { sortColumn =>
UIUtils.decodeURLParameter(sortColumn)
}.getOrElse(jobIdTitle)
val jobSortDesc = Option(parameterJobSortDesc).map(_.toBoolean).getOrElse(
// New jobs should be shown above old jobs by default.
if (jobSortColumn == jobIdTitle) true else false
)
val jobPageSize = Option(parameterJobPageSize).map(_.toInt).getOrElse(100)
val jobPrevPageSize = Option(parameterJobPrevPageSize).map(_.toInt).getOrElse(jobPageSize)
val page: Int = {
// If the user has changed to a larger page size, then go to page 1 in order to avoid
// IndexOutOfBoundsException.
if (jobPageSize <= jobPrevPageSize) {
jobPage
} else {
1
}
}
val currentTime = System.currentTimeMillis()
try {
new JobPagedTable(
jobs,
tableHeaderId,
jobTag,
UIUtils.prependBaseUri(parent.basePath),
"jobs", // subPath
parameterOtherTable,
parent.jobProgresslistener.stageIdToInfo,
parent.jobProgresslistener.stageIdToData,
killEnabled,
currentTime,
jobIdTitle,
pageSize = jobPageSize,
sortColumn = jobSortColumn,
desc = jobSortDesc
).table(page)
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
<div class="alert alert-error">
<p>Error while rendering job table:</p>
<pre>
{Utils.exceptionString(e)}
</pre>
</div>
}
}
def render(request: HttpServletRequest): Seq[Node] = {
val listener = parent.jobProgresslistener
listener.synchronized {
val startTime = listener.startTime
val endTime = listener.endTime
val activeJobs = listener.activeJobs.values.toSeq
val completedJobs = listener.completedJobs.reverse.toSeq
val failedJobs = listener.failedJobs.reverse.toSeq
val activeJobsTable =
jobsTable(request, "active", "activeJob", activeJobs, killEnabled = parent.killEnabled)
val completedJobsTable =
jobsTable(request, "completed", "completedJob", completedJobs, killEnabled = false)
val failedJobsTable =
jobsTable(request, "failed", "failedJob", failedJobs, killEnabled = false)
val shouldShowActiveJobs = activeJobs.nonEmpty
val shouldShowCompletedJobs = completedJobs.nonEmpty
val shouldShowFailedJobs = failedJobs.nonEmpty
val completedJobNumStr = if (completedJobs.size == listener.numCompletedJobs) {
s"${completedJobs.size}"
} else {
s"${listener.numCompletedJobs}, only showing ${completedJobs.size}"
}
val summary: NodeSeq =
<div>
<ul class="unstyled">
<li>
<strong>User:</strong>
{parent.getSparkUser}
</li>
<li>
<strong>Total Uptime:</strong>
{
if (endTime < 0 && parent.sc.isDefined) {
UIUtils.formatDuration(System.currentTimeMillis() - startTime)
} else if (endTime > 0) {
UIUtils.formatDuration(endTime - startTime)
}
}
</li>
<li>
<strong>Scheduling Mode: </strong>
{listener.schedulingMode.map(_.toString).getOrElse("Unknown")}
</li>
{
if (shouldShowActiveJobs) {
<li>
<a href="#active"><strong>Active Jobs:</strong></a>
{activeJobs.size}
</li>
}
}
{
if (shouldShowCompletedJobs) {
<li id="completed-summary">
<a href="#completed"><strong>Completed Jobs:</strong></a>
{completedJobNumStr}
</li>
}
}
{
if (shouldShowFailedJobs) {
<li>
<a href="#failed"><strong>Failed Jobs:</strong></a>
{listener.numFailedJobs}
</li>
}
}
</ul>
</div>
var content = summary
val executorListener = parent.executorListener
content ++= makeTimeline(activeJobs ++ completedJobs ++ failedJobs,
executorListener.executorEvents, startTime)
if (shouldShowActiveJobs) {
content ++= <h4 id="active">Active Jobs ({activeJobs.size})</h4> ++
activeJobsTable
}
if (shouldShowCompletedJobs) {
content ++= <h4 id="completed">Completed Jobs ({completedJobNumStr})</h4> ++
completedJobsTable
}
if (shouldShowFailedJobs) {
content ++= <h4 id ="failed">Failed Jobs ({failedJobs.size})</h4> ++
failedJobsTable
}
val helpText = """A job is triggered by an action, like count() or saveAsTextFile().""" +
" Click on a job to see information about the stages of tasks inside it."
UIUtils.headerSparkPage("Jobs", content, parent, helpText = Some(helpText))
}
}
}
private[ui] class JobTableRowData(
val jobData: JobUIData,
val lastStageName: String,
val lastStageDescription: String,
val duration: Long,
val formattedDuration: String,
val submissionTime: Long,
val formattedSubmissionTime: String,
val jobDescription: NodeSeq,
val detailUrl: String)
private[ui] class JobDataSource(
jobs: Seq[JobUIData],
stageIdToInfo: HashMap[Int, StageInfo],
stageIdToData: HashMap[(Int, Int), StageUIData],
basePath: String,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean) extends PagedDataSource[JobTableRowData](pageSize) {
// Convert JobUIData to JobTableRowData which contains the final contents to show in the table
// so that we can avoid creating duplicate contents during sorting the data
private val data = jobs.map(jobRow).sorted(ordering(sortColumn, desc))
private var _slicedJobIds: Set[Int] = null
override def dataSize: Int = data.size
override def sliceData(from: Int, to: Int): Seq[JobTableRowData] = {
val r = data.slice(from, to)
_slicedJobIds = r.map(_.jobData.jobId).toSet
r
}
private def getLastStageNameAndDescription(job: JobUIData): (String, String) = {
val lastStageInfo = Option(job.stageIds)
.filter(_.nonEmpty)
.flatMap { ids => stageIdToInfo.get(ids.max)}
val lastStageData = lastStageInfo.flatMap { s =>
stageIdToData.get((s.stageId, s.attemptId))
}
val name = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)")
val description = lastStageData.flatMap(_.description).getOrElse("")
(name, description)
}
private def jobRow(jobData: JobUIData): JobTableRowData = {
val (lastStageName, lastStageDescription) = getLastStageNameAndDescription(jobData)
val duration: Option[Long] = {
jobData.submissionTime.map { start =>
val end = jobData.completionTime.getOrElse(System.currentTimeMillis())
end - start
}
}
val formattedDuration = duration.map(d => UIUtils.formatDuration(d)).getOrElse("Unknown")
val submissionTime = jobData.submissionTime
val formattedSubmissionTime = submissionTime.map(UIUtils.formatDate).getOrElse("Unknown")
val jobDescription = UIUtils.makeDescription(lastStageDescription, basePath, plainText = false)
val detailUrl = "%s/jobs/job?id=%s".format(basePath, jobData.jobId)
new JobTableRowData (
jobData,
lastStageName,
lastStageDescription,
duration.getOrElse(-1),
formattedDuration,
submissionTime.getOrElse(-1),
formattedSubmissionTime,
jobDescription,
detailUrl
)
}
/**
* Return Ordering according to sortColumn and desc
*/
private def ordering(sortColumn: String, desc: Boolean): Ordering[JobTableRowData] = {
val ordering: Ordering[JobTableRowData] = sortColumn match {
case "Job Id" | "Job Id (Job Group)" => Ordering.by(_.jobData.jobId)
case "Description" => Ordering.by(x => (x.lastStageDescription, x.lastStageName))
case "Submitted" => Ordering.by(_.submissionTime)
case "Duration" => Ordering.by(_.duration)
case "Stages: Succeeded/Total" | "Tasks (for all stages): Succeeded/Total" =>
throw new IllegalArgumentException(s"Unsortable column: $sortColumn")
case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
if (desc) {
ordering.reverse
} else {
ordering
}
}
}
private[ui] class JobPagedTable(
data: Seq[JobUIData],
tableHeaderId: String,
jobTag: String,
basePath: String,
subPath: String,
parameterOtherTable: Iterable[String],
stageIdToInfo: HashMap[Int, StageInfo],
stageIdToData: HashMap[(Int, Int), StageUIData],
killEnabled: Boolean,
currentTime: Long,
jobIdTitle: String,
pageSize: Int,
sortColumn: String,
desc: Boolean
) extends PagedTable[JobTableRowData] {
val parameterPath = basePath + s"/$subPath/?" + parameterOtherTable.mkString("&")
override def tableId: String = jobTag + "-table"
override def tableCssClass: String =
"table table-bordered table-condensed table-striped " +
"table-head-clickable table-cell-width-limited"
override def pageSizeFormField: String = jobTag + ".pageSize"
override def prevPageSizeFormField: String = jobTag + ".prevPageSize"
override def pageNumberFormField: String = jobTag + ".page"
override val dataSource = new JobDataSource(
data,
stageIdToInfo,
stageIdToData,
basePath,
currentTime,
pageSize,
sortColumn,
desc)
override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
parameterPath +
s"&$pageNumberFormField=$page" +
s"&$jobTag.sort=$encodedSortColumn" +
s"&$jobTag.desc=$desc" +
s"&$pageSizeFormField=$pageSize" +
s"#$tableHeaderId"
}
override def goButtonFormPath: String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
s"$parameterPath&$jobTag.sort=$encodedSortColumn&$jobTag.desc=$desc#$tableHeaderId"
}
override def headers: Seq[Node] = {
// Information for each header: title, cssClass, and sortable
val jobHeadersAndCssClasses: Seq[(String, String, Boolean)] =
Seq(
(jobIdTitle, "", true),
("Description", "", true), ("Submitted", "", true), ("Duration", "", true),
("Stages: Succeeded/Total", "", false),
("Tasks (for all stages): Succeeded/Total", "", false)
)
if (!jobHeadersAndCssClasses.filter(_._3).map(_._1).contains(sortColumn)) {
throw new IllegalArgumentException(s"Unknown column: $sortColumn")
}
val headerRow: Seq[Node] = {
jobHeadersAndCssClasses.map { case (header, cssClass, sortable) =>
if (header == sortColumn) {
val headerLink = Unparsed(
parameterPath +
s"&$jobTag.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&$jobTag.desc=${!desc}" +
s"&$jobTag.pageSize=$pageSize" +
s"#$tableHeaderId")
val arrow = if (desc) "▾" else "▴" // UP or DOWN
<th class={cssClass}>
<a href={headerLink}>
{header}<span>
{Unparsed(arrow)}
</span>
</a>
</th>
} else {
if (sortable) {
val headerLink = Unparsed(
parameterPath +
s"&$jobTag.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&$jobTag.pageSize=$pageSize" +
s"#$tableHeaderId")
<th class={cssClass}>
<a href={headerLink}>
{header}
</a>
</th>
} else {
<th class={cssClass}>
{header}
</th>
}
}
}
}
<thead>{headerRow}</thead>
}
override def row(jobTableRow: JobTableRowData): Seq[Node] = {
val job = jobTableRow.jobData
val killLink = if (killEnabled) {
val confirm =
s"if (window.confirm('Are you sure you want to kill job ${job.jobId} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
// SPARK-6846 this should be POST-only but YARN AM won't proxy POST
/*
val killLinkUri = s"$basePathUri/jobs/job/kill/"
<form action={killLinkUri} method="POST" style="display:inline">
<input type="hidden" name="id" value={job.jobId.toString}/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
*/
val killLinkUri = s"$basePath/jobs/job/kill/?id=${job.jobId}"
<a href={killLinkUri} onclick={confirm} class="kill-link">(kill)</a>
} else {
Seq.empty
}
<tr id={"job-" + job.jobId}>
<td>
{job.jobId} {job.jobGroup.map(id => s"($id)").getOrElse("")}
</td>
<td>
{jobTableRow.jobDescription} {killLink}
<a href={jobTableRow.detailUrl} class="name-link">{jobTableRow.lastStageName}</a>
</td>
<td>
{jobTableRow.formattedSubmissionTime}
</td>
<td>{jobTableRow.formattedDuration}</td>
<td class="stage-progress-cell">
{job.completedStageIndices.size}/{job.stageIds.size - job.numSkippedStages}
{if (job.numFailedStages > 0) s"(${job.numFailedStages} failed)"}
{if (job.numSkippedStages > 0) s"(${job.numSkippedStages} skipped)"}
</td>
<td class="progress-cell">
{UIUtils.makeProgressBar(started = job.numActiveTasks, completed = job.numCompletedTasks,
failed = job.numFailedTasks, skipped = job.numSkippedTasks, killed = job.numKilledTasks,
total = job.numTasks - job.numSkippedTasks)}
</td>
</tr>
}
}
| SnappyDataInc/spark | core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala | Scala | apache-2.0 | 23,520 |
package com.github.j5ik2o.forseti.domain.client
object ClientType extends Enumeration {
val Confidential, Public = Value
}
| j5ik2o/forseti | domain/src/main/scala/com/github/j5ik2o/forseti/domain/client/ClientType.scala | Scala | mit | 126 |
package commentator
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import commentator.actions.{SendScheduledTweet, TrackFriends, TrackTrendingTags}
import commentator.campaign.CampaignRunner
import commentator.twitter.redis.Repository
import commentator.twitter.resources.{Friends, HomeTimeline}
import commentator.twitter.scheduler.TweetsQueue
import twitter4j.conf.Configuration
class Commentator(commentatorConf: Config, twitterConf: Configuration)(implicit system: ActorSystem) {
def startCommenting(): Unit = {
implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit val conf: Configuration = twitterConf
val tweetsQueue = system.actorOf(TweetsQueue.props())
system.actorOf(Friends.props()) ! TrackFriends(commentatorConf.getString("name"))
system.actorOf(HomeTimeline.props()) ! TrackTrendingTags(commentatorConf.getString("name"))
tweetsQueue ! SendScheduledTweet
new CampaignRunner(new Repository(), tweetsQueue).runCampaign(commentatorConf.getString("comments"), commentatorConf.getString("name"))
}
}
| emaillenin/commentator | src/main/scala/commentator/Commentator.scala | Scala | apache-2.0 | 1,122 |
package net.categoricaldata
package object ontology {
def ??? = throw new NoSuchMethodException
} | JasonGross/categoricaldata | src/main/scala/net/categoricaldata/ontology/package.scala | Scala | mit | 100 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
class RankTest extends TableTestBase {
private val util = batchTestUtil()
util.addTableSource[(Int, String, Long)]("MyTable", 'a, 'b, 'c)
@Test(expected = classOf[RuntimeException])
def testRowNumberWithoutOrderBy(): Unit = {
val sqlQuery =
"""
|SELECT ROW_NUMBER() over (partition by a) FROM MyTable
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[RuntimeException])
def testRowNumberWithMultiGroups(): Unit = {
val sqlQuery =
"""
|SELECT ROW_NUMBER() over (partition by a order by b) as a,
| ROW_NUMBER() over (partition by b) as b
| FROM MyTable
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[ValidationException])
def testRankWithoutOrderBy(): Unit = {
val sqlQuery =
"""
|SELECT RANK() over (partition by a) FROM MyTable
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[ValidationException])
def testRankWithMultiGroups(): Unit = {
val sqlQuery =
"""
|SELECT RANK() over (partition by a order by b) as a,
| RANK() over (partition by b) as b
| FROM MyTable
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[ValidationException])
def testDenseRankWithoutOrderBy(): Unit = {
val sqlQuery =
"""
|SELECT dense_rank() over (partition by a) FROM MyTable
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[ValidationException])
def testDenseRankWithMultiGroups(): Unit = {
val sqlQuery =
"""
|SELECT DENSE_RANK() over (partition by a order by b) as a,
| DENSE_RANK() over (partition by b) as b
| FROM MyTable
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithUpperValue(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a) rk FROM MyTable) t
|WHERE rk <= 2 AND a > 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithRange(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b, c ORDER BY a) rk FROM MyTable) t
|WHERE rk <= 2 AND rk > -2
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithEquals(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a, c) rk FROM MyTable) t
|WHERE rk = 2
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWithoutPartitionBy(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (ORDER BY a) rk FROM MyTable) t
|WHERE rk < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testMultiSameRankFunctionsWithSameGroup(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b,
| RANK() OVER (PARTITION BY b ORDER BY a) rk1,
| RANK() OVER (PARTITION BY b ORDER BY a) rk2 FROM MyTable) t
|WHERE rk1 < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testDuplicateRankFunctionColumnName(): Unit = {
util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'rk)
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a) rk FROM MyTable2) t
|WHERE rk < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankFunctionInMiddle(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, RANK() OVER (PARTITION BY a ORDER BY a) rk, b, c FROM MyTable) t
|WHERE rk < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
}
| jinglining/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/RankTest.scala | Scala | apache-2.0 | 4,947 |
package com.github.mtailor.srtplayground.analysis
import com.github.mtailor.srtdissector.Vocabulary.{SubtitleBlock, Time, Srt}
import com.github.mtailor.srtplayground.analysis.SrtFullComparisonHelper._
import com.github.mtailor.srtplayground.analysis.SrtsTextualMatchingHelper.TextualMatchingParameters
import Math._
object SrtFullComparisonHelper {
case class FullComparisonParameters(
textualMatchingParams: TextualMatchingParameters,
minimumMatchingBlocksRate: Double,
timingsShiftApproximation: Time
)
case class TimingShift(delay: Int, factor: Double)
object ZeroShift extends TimingShift(0, 1)
sealed trait SrtsFullComparisonResult
//the texts are different, they must come from two different transcriptions
//(or are not even for the same media)
object Unrelated extends SrtsFullComparisonResult
//the texts are near identical
sealed trait SameText extends SrtsFullComparisonResult
//the texts are near identical but the timings have nothing in common (should not be common)
object SameTextUnrelatedTimings extends SameText
//the texts are near identical and the timings are similar with a shift (which may be the zero shift)
case class SameTextShiftedTimings(shift: TimingShift) extends SameText
}
class SrtFullComparisonHelper(val textualComparisonHelper: SrtsTextualMatchingHelper) {
/**
* Compares two .srt files
* both on their texts and their timings
*
*/
def compare(base: Srt, other: Srt)(implicit params: FullComparisonParameters): SrtsFullComparisonResult = {
val matchingBlocks = textualComparisonHelper.computeMatches(base, other)(params.textualMatchingParams)
val idealNbOfMatchingBlocks = min(max(base.size, other.size), params.textualMatchingParams.blocksToConsiderFromBeginning)
if (matchingBlocks.size < 2 || matchingBlocks.size.toDouble/idealNbOfMatchingBlocks < params.minimumMatchingBlocksRate)
Unrelated
else {
//use the first block and the last to determine the shift
val shift = computeShift(
matchingBlocks.head._1.start,
matchingBlocks.last._1.end,
matchingBlocks.head._2.start,
matchingBlocks.last._2.end
)
//check it on all blocks
if(verifyShift(matchingBlocks, shift, params.timingsShiftApproximation))
SameTextShiftedTimings(shift)
else
SameTextUnrelatedTimings
}
}
private def computeShift(baseStart: Time, baseEnd: Time, otherStart: Time, otherEnd: Time): TimingShift = {
val factor: Double = (otherEnd - otherStart).toDouble / (baseEnd - baseStart)
val delay: Int = (otherEnd - baseEnd * factor).toInt
TimingShift(delay, factor)
}
private def verifyShift(
matchingBlocks: Seq[(SubtitleBlock, SubtitleBlock)],
shift: TimingShift,
tolerance: Int
): Boolean =
matchingBlocks.forall { case (baseBlock, otherBlock) =>
matchesWithShift(baseBlock.start, otherBlock.start, shift, tolerance) &&
matchesWithShift(baseBlock.end, otherBlock.end, shift, tolerance)
}
private def matchesWithShift(base: Time, other: Time, shift: TimingShift, tolerance: Int) =
abs(applyShift(base, shift) - other) <= tolerance
private def applyShift(time: Time, shift: TimingShift): Time =
(time * shift.factor + shift.delay).toInt
}
| implicitdef/srt-playground | src/main/scala/com/github/mtailor/srtplayground/analysis/SrtFullComparisonHelper.scala | Scala | apache-2.0 | 3,295 |
package org.scalatra
package oauth2
package service
import org.json4s._
import OAuth2Imports._
object PathReading {
trait Separator {
def beginning: String
def end: String
}
object Separator {
object Dotted extends Separator {
val beginning = "."
val end = ""
}
object Dollar extends Separator {
val beginning = "$"
val end = ""
}
object Slash extends Separator {
val beginning = "/"
val end = ""
}
object BackSlash extends Separator {
val beginning = "$"
val end = ""
}
object Bracketed extends Separator {
val beginning = "["
val end = "]"
}
}
val DefaultSeparator = Separator.Dotted
abstract class PathReader[Subject: Manifest] {
def separator: Separator
def obj: Subject
protected def get[TResult](key: String, subj: Subject)(implicit mf: Manifest[TResult]): Option[TResult]
def apply[TResult: Manifest](path: String, subj: Subject = obj): TResult =
read[TResult](path, subj).get
final def read[TResult: Manifest](path: String, subj: Subject = obj): Option[TResult] = {
val partIndex = path.indexOf(separator.beginning)
val (part, rest) = if (path.indexOf(separator.beginning) > -1) path.splitAt(partIndex) else (path, "")
val realRest = if (rest.nonEmpty) {
if (separator.end.nonBlank) {
if (rest.size > 1) rest.substring(2) else rest.substring(1)
} else rest.substring(1)
} else rest
if (realRest.isEmpty) {
get[TResult](part, subj)
} else {
get[Subject](part, subj) flatMap (read[TResult](realRest, _))
}
}
}
class JValuePathReader(val obj: JValue, val separator: Separator = DefaultSeparator)(implicit formats: Formats, subjMf: Manifest[JValue]) extends PathReader[JValue] {
protected def get[TResult](key: String, subj: JValue)(implicit mf: Manifest[TResult]) = {
if (key.contains(separator)) read[TResult](key, subj) else {
val jv = (subj \\ key)
if (subjMf <:< mf)
jv match {
case JNull | JNothing ⇒ None
case _ ⇒ Some(jv.asInstanceOf[TResult])
}
else
jv.extractOpt[TResult]
}
}
}
def apply(subj: JValue)(implicit formats: Formats): JValuePathReader = new JValuePathReader(subj)
} | scalatra/oauth2-server | src/main/scala/org/scalatra/oauth2/service/PathReader.scala | Scala | mit | 2,359 |
package com.cognism.controllers
import javax.inject.{Inject, Singleton}
import play.api.libs.json.Json
import play.api.mvc._
import com.cognism.common.controllers.Secured
@Singleton
class Application @Inject() extends InjectedController with Secured {
def status = Action { request =>
val payload = Json.obj(
"status" -> "OK"
)
Ok(payload)
}
def echo(msg: String) = ApiAction { request => Ok(msg) }
}
| Cognism/cognism-template-play | app/com/cognism/controllers/Application.scala | Scala | mit | 431 |
package spire
package syntax
import spire.NoImplicit
import spire.algebra._
import spire.algebra.lattice._
import spire.algebra.partial._
import spire.math._
import spire.macros.Syntax
import spire.syntax.std._
trait EqSyntax {
implicit def eqOps[A:Eq](a:A): EqOps[A] = new EqOps(a)
}
trait PartialOrderSyntax extends EqSyntax {
implicit def partialOrderOps[A:PartialOrder](a:A): PartialOrderOps[A] = new PartialOrderOps(a)
}
trait OrderSyntax extends PartialOrderSyntax {
implicit def orderOps[A:Order](a:A): OrderOps[A] = new OrderOps(a)
implicit def literalIntOrderOps(lhs: Int): LiteralIntOrderOps = new LiteralIntOrderOps(lhs)
implicit def literalLongOrderOps(lhs: Long): LiteralLongOrderOps = new LiteralLongOrderOps(lhs)
implicit def literalDoubleOrderOps(lhs: Double): LiteralDoubleOrderOps = new LiteralDoubleOrderOps(lhs)
}
trait SignedSyntax extends OrderSyntax {
implicit def signedOps[A: Signed](a: A): SignedOps[A] = new SignedOps(a)
}
trait TruncatedDivisionSyntax extends SignedSyntax {
implicit def truncatedDivisionOps[A:TruncatedDivision](a: A): TruncatedDivisionOps[A] = new TruncatedDivisionOps(a)
implicit def literalIntTruncatedDivisionOps(lhs:Int): LiteralIntTruncatedDivisionOps = new LiteralIntTruncatedDivisionOps(lhs)
implicit def literalLongTruncatedDivisionOps(lhs:Long): LiteralLongTruncatedDivisionOps = new LiteralLongTruncatedDivisionOps(lhs)
implicit def literalDoubleTruncatedDivisionOps(lhs:Double): LiteralDoubleTruncatedDivisionOps = new LiteralDoubleTruncatedDivisionOps(lhs)
}
trait InvolutionSyntax {
implicit def involutionOps[A:Involution](lhs: A): InvolutionOps[A] = new InvolutionOps(lhs)
}
trait IsRealSyntax extends SignedSyntax {
implicit def isRealOps[A:IsReal](a:A): IsRealOps[A] = new IsRealOps(a)
}
trait SemigroupoidSyntax {
implicit def semigroupoidOps[A:Semigroupoid](a:A): SemigroupoidOps[A] = new SemigroupoidOps[A](a)
}
trait GroupoidSyntax extends SemigroupoidSyntax {
implicit def groupoidCommonOps[A](a:A)(implicit ev: Groupoid[A], ni: NoImplicit[Monoid[A]]): GroupoidCommonOps[A] = new GroupoidCommonOps[A](a)(ev)
implicit def groupoidOps[A](a:A)(implicit ev: Groupoid[A]): GroupoidOps[A] = new GroupoidOps[A](a)
}
trait SemigroupSyntax {
implicit def semigroupOps[A:Semigroup](a:A): SemigroupOps[A] = new SemigroupOps(a)
}
trait MonoidSyntax extends SemigroupSyntax {
implicit def monoidOps[A](a:A)(implicit ev: Monoid[A]): MonoidOps[A] = new MonoidOps(a)
}
trait GroupSyntax extends MonoidSyntax {
implicit def groupOps[A:Group](a:A): GroupOps[A] = new GroupOps(a)
}
trait AdditiveSemigroupSyntax {
implicit def additiveSemigroupOps[A:AdditiveSemigroup](a:A): AdditiveSemigroupOps[A] = new AdditiveSemigroupOps(a)
implicit def literalIntAdditiveSemigroupOps(lhs:Int): LiteralIntAdditiveSemigroupOps = new LiteralIntAdditiveSemigroupOps(lhs)
implicit def literalLongAdditiveSemigroupOps(lhs:Long): LiteralLongAdditiveSemigroupOps = new LiteralLongAdditiveSemigroupOps(lhs)
implicit def literalDoubleAdditiveSemigroupOps(lhs:Double): LiteralDoubleAdditiveSemigroupOps = new LiteralDoubleAdditiveSemigroupOps(lhs)
}
trait AdditiveMonoidSyntax extends AdditiveSemigroupSyntax {
implicit def additiveMonoidOps[A](a:A)(implicit ev: AdditiveMonoid[A]): AdditiveMonoidOps[A] = new AdditiveMonoidOps(a)
}
trait AdditiveGroupSyntax extends AdditiveMonoidSyntax {
implicit def additiveGroupOps[A:AdditiveGroup](a:A): AdditiveGroupOps[A] = new AdditiveGroupOps(a)
implicit def literalIntAdditiveGroupOps(lhs:Int): LiteralIntAdditiveGroupOps = new LiteralIntAdditiveGroupOps(lhs)
implicit def literalLongAdditiveGroupOps(lhs:Long): LiteralLongAdditiveGroupOps = new LiteralLongAdditiveGroupOps(lhs)
implicit def literalDoubleAdditiveGroupOps(lhs:Double): LiteralDoubleAdditiveGroupOps = new LiteralDoubleAdditiveGroupOps(lhs)
}
trait MultiplicativeSemigroupSyntax {
implicit def multiplicativeSemigroupOps[A:MultiplicativeSemigroup](a:A): MultiplicativeSemigroupOps[A] = new MultiplicativeSemigroupOps(a)
implicit def literalIntMultiplicativeSemigroupOps(lhs:Int): LiteralIntMultiplicativeSemigroupOps = new LiteralIntMultiplicativeSemigroupOps(lhs)
implicit def literalLongMultiplicativeSemigroupOps(lhs:Long): LiteralLongMultiplicativeSemigroupOps = new LiteralLongMultiplicativeSemigroupOps(lhs)
implicit def literalDoubleMultiplicativeSemigroupOps(lhs:Double): LiteralDoubleMultiplicativeSemigroupOps = new LiteralDoubleMultiplicativeSemigroupOps(lhs)
}
trait MultiplicativeMonoidSyntax extends MultiplicativeSemigroupSyntax {
implicit def multiplicativeMonoidOps[A](a:A)(implicit ev: MultiplicativeMonoid[A]): MultiplicativeMonoidOps[A] =
new MultiplicativeMonoidOps(a)
}
trait MultiplicativeGroupSyntax extends MultiplicativeMonoidSyntax {
implicit def multiplicativeGroupOps[A:MultiplicativeGroup](a:A): MultiplicativeGroupOps[A] = new MultiplicativeGroupOps(a)
implicit def literalIntMultiplicativeGroupOps(lhs:Int): LiteralIntMultiplicativeGroupOps = new LiteralIntMultiplicativeGroupOps(lhs)
implicit def literalLongMultiplicativeGroupOps(lhs:Long): LiteralLongMultiplicativeGroupOps = new LiteralLongMultiplicativeGroupOps(lhs)
implicit def literalDoubleMultiplicativeGroupOps(lhs:Double): LiteralDoubleMultiplicativeGroupOps = new LiteralDoubleMultiplicativeGroupOps(lhs)
}
trait SemiringSyntax extends AdditiveSemigroupSyntax with MultiplicativeSemigroupSyntax {
implicit def semiringOps[A:Semiring](a:A): SemiringOps[A] = new SemiringOps(a)
}
trait RigSyntax extends SemiringSyntax
trait RngSyntax extends SemiringSyntax with AdditiveGroupSyntax
trait RingSyntax extends RngSyntax with RigSyntax
trait GCDRingSyntax extends RingSyntax {
implicit def gcdRingOps[A: GCDRing](a:A): GCDRingOps[A] = new GCDRingOps(a)
}
trait EuclideanRingSyntax extends GCDRingSyntax {
implicit def euclideanRingOps[A:EuclideanRing](a:A): EuclideanRingOps[A] = new EuclideanRingOps(a)
implicit def literalIntEuclideanRingOps(lhs:Int): LiteralIntEuclideanRingOps = new LiteralIntEuclideanRingOps(lhs)
implicit def literalLongEuclideanRingOps(lhs:Long): LiteralLongEuclideanRingOps = new LiteralLongEuclideanRingOps(lhs)
implicit def literalDoubleEuclideanRingOps(lhs:Double): LiteralDoubleEuclideanRingOps = new LiteralDoubleEuclideanRingOps(lhs)
}
trait FieldSyntax extends EuclideanRingSyntax with MultiplicativeGroupSyntax
trait NRootSyntax {
implicit def nrootOps[A: NRoot](a: A): NRootOps[A] = new NRootOps(a)
}
trait LeftModuleSyntax extends RingSyntax {
implicit def leftModuleOps[V](v:V): LeftModuleOps[V] = new LeftModuleOps[V](v)
}
trait RightModuleSyntax extends RingSyntax {
implicit def rightModuleOps[V](v:V): RightModuleOps[V] = new RightModuleOps[V](v)
}
trait CModuleSyntax extends LeftModuleSyntax with RightModuleSyntax
trait VectorSpaceSyntax extends CModuleSyntax with FieldSyntax {
implicit def vectorSpaceOps[V](v:V): VectorSpaceOps[V] = new VectorSpaceOps[V](v)
}
trait MetricSpaceSyntax extends VectorSpaceSyntax {
implicit def metricSpaceOps[V](v:V): MetricSpaceOps[V] = new MetricSpaceOps[V](v)
}
trait NormedVectorSpaceSyntax extends MetricSpaceSyntax {
implicit def normedVectorSpaceOps[V](v:V): NormedVectorSpaceOps[V] = new NormedVectorSpaceOps[V](v)
}
trait InnerProductSpaceSyntax extends VectorSpaceSyntax {
implicit def innerProductSpaceOps[V](v:V): InnerProductSpaceOps[V] = new InnerProductSpaceOps[V](v)
}
trait CoordinateSpaceSyntax extends InnerProductSpaceSyntax {
implicit def coordinateSpaceOps[V](v:V): CoordinateSpaceOps[V] = new CoordinateSpaceOps[V](v)
}
trait TrigSyntax {
implicit def trigOps[A:Trig](a: A): TrigOps[A] = new TrigOps(a)
}
trait LatticeSyntax {
implicit def meetOps[A: MeetSemilattice](a: A): MeetOps[A] = new MeetOps(a)
implicit def joinOps[A: JoinSemilattice](a: A): JoinOps[A] = new JoinOps(a)
}
trait HeytingSyntax {
implicit def heytingOps[A: Heyting](a: A): HeytingOps[A] = new HeytingOps(a)
}
trait BoolSyntax extends HeytingSyntax {
implicit def boolOps[A: Bool](a: A): BoolOps[A] = new BoolOps(a)
}
trait BitStringSyntax {
implicit def bitStringOps[A: BitString](a: A): BitStringOps[A] = new BitStringOps(a)
}
trait PartialActionSyntax {
implicit def leftPartialActionOps[G](g: G): LeftPartialActionOps[G] = new LeftPartialActionOps(g)
implicit def rightPartialActionOps[P](p: P): RightPartialActionOps[P] = new RightPartialActionOps(p)
}
trait ActionSyntax {
implicit def leftActionOps[G](g: G): LeftActionOps[G] = new LeftActionOps(g)
implicit def rightActionOps[P](p: P): RightActionOps[P] = new RightActionOps(p)
}
trait IntervalSyntax {
implicit def groupActionGroupOps[A: Order: AdditiveGroup](a: A): IntervalPointOps[A] =
new IntervalPointOps(a)
}
trait UnboundSyntax {
implicit def moduleUnboundOps[F](f: F)(implicit ev: CModule[_, F]): ModuleUnboundOps[F] =
new ModuleUnboundOps(f)
implicit def vectorSpaceUnboundOps[F](f: F)(implicit ev: VectorSpace[_, F]): VectorSpaceUnboundOps[F] =
new VectorSpaceUnboundOps(f)
implicit def groupActionUnboundOps[G](g: G)(implicit ev: Action[_, G]): ActionUnboundOps[G] =
new ActionUnboundOps(g)
implicit def additiveActionUnboundOps[G](g: G)(implicit ev: AdditiveAction[_, G]): AdditiveActionUnboundOps[G] =
new AdditiveActionUnboundOps(g)
implicit def multiplicativeActionUnboundOps[G](g: G)(implicit ev: MultiplicativeAction[_, G]): MultiplicativeActionUnboundOps[G] =
new MultiplicativeActionUnboundOps(g)
}
trait TorsorSyntax {
implicit def torsorPointOps[P](p: P): TorsorPointOps[P] = new TorsorPointOps(p)
}
trait IntegralSyntax extends
EuclideanRingSyntax with
ConvertableFromSyntax with
OrderSyntax with
SignedSyntax {
implicit def integralOps[A: Integral](a: A): IntegralOps[A] = new IntegralOps(a)
}
trait FractionalSyntax extends
FieldSyntax with
NRootSyntax with
ConvertableFromSyntax with
OrderSyntax with
SignedSyntax
trait NumericSyntax extends
FieldSyntax with
NRootSyntax with
ConvertableFromSyntax with
OrderSyntax with
SignedSyntax
trait ConvertableFromSyntax {
implicit def convertableOps[A:ConvertableFrom](a:A): ConvertableFromOps[A] = new ConvertableFromOps(a)
}
trait CforSyntax {
def cfor[A](init:A)(test:A => Boolean, next:A => A)(body:A => Unit): Unit =
macro Syntax.cforMacro[A]
def cforRange(r: Range)(body: Int => Unit): Unit =
macro Syntax.cforRangeMacro
def cforRange2(r1: Range, r2: Range)(body: (Int, Int) => Unit): Unit =
macro Syntax.cforRange2Macro
}
trait LiteralsSyntax {
implicit def literals(s:StringContext): Literals = new Literals(s)
object radix { implicit def radix(s:StringContext): Radix = new Radix(s) }
object si { implicit def siLiterals(s:StringContext): SiLiterals = new SiLiterals(s) }
object us { implicit def usLiterals(s:StringContext): UsLiterals = new UsLiterals(s) }
object eu { implicit def euLiterals(s:StringContext): EuLiterals = new EuLiterals(s) }
}
trait AllSyntax extends
LiteralsSyntax with
CforSyntax with
EqSyntax with
PartialOrderSyntax with
OrderSyntax with
SignedSyntax with
TruncatedDivisionSyntax with
InvolutionSyntax with
IsRealSyntax with
ConvertableFromSyntax with
SemigroupoidSyntax with
GroupoidSyntax with
SemigroupSyntax with
MonoidSyntax with
GroupSyntax with
AdditiveSemigroupSyntax with
AdditiveMonoidSyntax with
AdditiveGroupSyntax with
MultiplicativeSemigroupSyntax with
MultiplicativeMonoidSyntax with
MultiplicativeGroupSyntax with
SemiringSyntax with
RigSyntax with
RngSyntax with
RingSyntax with
GCDRingSyntax with
EuclideanRingSyntax with
FieldSyntax with
NRootSyntax with
TrigSyntax with
IntervalSyntax with
LeftModuleSyntax with
RightModuleSyntax with
CModuleSyntax with
VectorSpaceSyntax with
NormedVectorSpaceSyntax with
InnerProductSpaceSyntax with
CoordinateSpaceSyntax with
LatticeSyntax with
HeytingSyntax with
BoolSyntax with
BitStringSyntax with
PartialActionSyntax with
ActionSyntax with
TorsorSyntax with
IntegralSyntax with
FractionalSyntax with
NumericSyntax with
IntSyntax with
LongSyntax with
DoubleSyntax with
BigIntSyntax with
ArraySyntax with
SeqSyntax
| adampingel/spire | core/src/main/scala/spire/syntax/Syntax.scala | Scala | mit | 12,355 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.planning
import java.io.ByteArrayOutputStream
import java.nio.charset.StandardCharsets
import java.util.Date
import com.vividsolutions.jts.geom.Envelope
import org.geotools.data.Query
import org.geotools.factory.Hints
import org.locationtech.geomesa.arrow.io.records.RecordBatchUnloader
import org.locationtech.geomesa.arrow.io.{DeltaWriter, DictionaryBuildingWriter}
import org.locationtech.geomesa.arrow.vector.SimpleFeatureVector
import org.locationtech.geomesa.arrow.vector.SimpleFeatureVector.SimpleFeatureEncoding
import org.locationtech.geomesa.features.{ScalaSimpleFeature, TransformSimpleFeature}
import org.locationtech.geomesa.filter.factory.FastFilterFactory
import org.locationtech.geomesa.index.iterators.{ArrowScan, DensityScan, StatsScan}
import org.locationtech.geomesa.index.stats.GeoMesaStats
import org.locationtech.geomesa.index.utils.{Explainer, Reprojection}
import org.locationtech.geomesa.security.{AuthorizationsProvider, SecurityUtils, VisibilityEvaluator}
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodingOptions
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.{GeometryUtils, GridSnap, SimpleFeatureOrdering}
import org.locationtech.geomesa.utils.stats.{Stat, TopK}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
abstract class InMemoryQueryRunner(stats: GeoMesaStats, authProvider: Option[AuthorizationsProvider])
extends QueryRunner {
import InMemoryQueryRunner.{authVisibilityCheck, noAuthVisibilityCheck, transform}
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
private val isVisible: (SimpleFeature, Seq[Array[Byte]]) => Boolean =
if (authProvider.isDefined) { authVisibilityCheck } else { noAuthVisibilityCheck }
protected def name: String
/**
* Return features for the given schema and filter. Does not need to account for visibility
*
* @param sft simple feature type
* @param filter filter (will not be Filter.INCLUDE), if any
* @return
*/
protected def features(sft: SimpleFeatureType, filter: Option[Filter]): CloseableIterator[SimpleFeature]
override def runQuery(sft: SimpleFeatureType, original: Query, explain: Explainer): CloseableIterator[SimpleFeature] = {
import scala.collection.JavaConversions._
val auths = authProvider.map(_.getAuthorizations.map(_.getBytes(StandardCharsets.UTF_8))).getOrElse(Seq.empty)
val query = configureQuery(sft, original)
optimizeFilter(sft, query)
explain.pushLevel(s"$name query: '${sft.getTypeName}' ${org.locationtech.geomesa.filter.filterToString(query.getFilter)}")
explain(s"bin[${query.getHints.isBinQuery}] arrow[${query.getHints.isArrowQuery}] " +
s"density[${query.getHints.isDensityQuery}] stats[${query.getHints.isStatsQuery}]")
explain(s"Transforms: ${query.getHints.getTransformDefinition.getOrElse("None")}")
explain(s"Sort: ${query.getHints.getSortReadableString}")
explain.popLevel()
val filter = Option(query.getFilter).filter(_ != Filter.INCLUDE)
val iter = features(sft, filter).filter(isVisible(_, auths))
val result = CloseableIterator(transform(iter, sft, stats, query.getHints, filter))
Reprojection(query) match {
case None => result
case Some(r) => result.map(r.reproject)
}
}
override protected def optimizeFilter(sft: SimpleFeatureType, filter: Filter): Filter =
FastFilterFactory.optimize(sft, filter)
override protected [geomesa] def getReturnSft(sft: SimpleFeatureType, hints: Hints): SimpleFeatureType = {
if (hints.isBinQuery) {
BinaryOutputEncoder.BinEncodedSft
} else if (hints.isArrowQuery) {
org.locationtech.geomesa.arrow.ArrowEncodedSft
} else if (hints.isDensityQuery) {
DensityScan.DensitySft
} else if (hints.isStatsQuery) {
StatsScan.StatsSft
} else {
super.getReturnSft(sft, hints)
}
}
}
object InMemoryQueryRunner {
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
def transform(features: Iterator[SimpleFeature],
sft: SimpleFeatureType,
stats: GeoMesaStats,
hints: Hints,
filter: Option[Filter]): Iterator[SimpleFeature] = {
if (hints.isBinQuery) {
val trackId = Option(hints.getBinTrackIdField).filter(_ != "id").map(sft.indexOf)
val geom = hints.getBinGeomField.map(sft.indexOf)
val dtg = hints.getBinDtgField.map(sft.indexOf)
binTransform(features, sft, trackId, geom, dtg, hints.getBinLabelField.map(sft.indexOf), hints.isBinSorting)
} else if (hints.isArrowQuery) {
arrowTransform(features, sft, stats, hints, filter)
} else if (hints.isDensityQuery) {
val Some(envelope) = hints.getDensityEnvelope
val Some((width, height)) = hints.getDensityBounds
densityTransform(features, sft, envelope, width, height, hints.getDensityWeight)
} else if (hints.isStatsQuery) {
statsTransform(features, sft, hints.getTransform, hints.getStatsQuery, hints.isStatsEncode || hints.isSkipReduce)
} else {
hints.getTransform match {
case None =>
val sort = hints.getSortFields.map(SimpleFeatureOrdering(sft, _))
noTransform(sft, features, sort)
case Some((defs, tsft)) =>
val sort = hints.getSortFields.map(SimpleFeatureOrdering(tsft, _))
projectionTransform(features, sft, tsft, defs, sort)
}
}
}
private def binTransform(features: Iterator[SimpleFeature],
sft: SimpleFeatureType,
trackId: Option[Int],
geom: Option[Int],
dtg: Option[Int],
label: Option[Int],
sorting: Boolean): Iterator[SimpleFeature] = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val encoder = BinaryOutputEncoder(sft, EncodingOptions(geom, dtg, trackId, label))
val sf = new ScalaSimpleFeature(BinaryOutputEncoder.BinEncodedSft, "", Array(null, GeometryUtils.zeroPoint))
val sorted = if (!sorting) { features } else {
val i = dtg.orElse(sft.getDtgIndex).getOrElse(throw new IllegalArgumentException("Can't sort BIN features by date"))
features.toList.sortBy(_.getAttribute(i).asInstanceOf[Date]).iterator
}
sorted.map { feature =>
sf.setAttribute(BinaryOutputEncoder.BIN_ATTRIBUTE_INDEX, encoder.encode(feature))
sf
}
}
private def arrowTransform(original: Iterator[SimpleFeature],
sft: SimpleFeatureType,
stats: GeoMesaStats,
hints: Hints,
filter: Option[Filter]): Iterator[SimpleFeature] = {
import org.locationtech.geomesa.arrow.allocator
val sort = hints.getArrowSort
val batchSize = ArrowScan.getBatchSize(hints)
val encoding = SimpleFeatureEncoding.min(hints.isArrowIncludeFid, hints.isArrowProxyFid)
val (features, arrowSft) = hints.getTransform match {
case None =>
val sorting = sort.map { case (field, reverse) =>
if (reverse) { SimpleFeatureOrdering(sft, field).reverse } else { SimpleFeatureOrdering(sft, field) }
}
(noTransform(sft, original, sorting), sft)
case Some((definitions, tsft)) =>
val sorting = sort.map { case (field, reverse) =>
if (reverse) { SimpleFeatureOrdering(tsft, field).reverse } else { SimpleFeatureOrdering(tsft, field) }
}
(projectionTransform(original, sft, tsft, definitions, sorting), tsft)
}
val dictionaryFields = hints.getArrowDictionaryFields
val providedDictionaries = hints.getArrowDictionaryEncodedValues(sft)
val cachedDictionaries: Map[String, TopK[AnyRef]] = if (!hints.isArrowCachedDictionaries) { Map.empty } else {
val toLookup = dictionaryFields.filterNot(providedDictionaries.contains)
stats.getStats[TopK[AnyRef]](sft, toLookup).map(k => k.property -> k).toMap
}
if (hints.isArrowDoublePass ||
dictionaryFields.forall(f => providedDictionaries.contains(f) || cachedDictionaries.contains(f))) {
// we have all the dictionary values, or we will run a query to determine them up front
val dictionaries = ArrowScan.createDictionaries(stats, sft, filter, dictionaryFields,
providedDictionaries, cachedDictionaries)
val vector = SimpleFeatureVector.create(arrowSft, dictionaries, encoding)
val batchWriter = new RecordBatchUnloader(vector)
val sf = ArrowScan.resultFeature()
val arrows = new Iterator[SimpleFeature] {
override def hasNext: Boolean = features.hasNext
override def next(): SimpleFeature = {
var index = 0
vector.clear()
while (index < batchSize && features.hasNext) {
vector.writer.set(index, features.next)
index += 1
}
sf.setAttribute(0, batchWriter.unload(index))
sf
}
}
if (hints.isSkipReduce) { arrows } else {
ArrowScan.mergeBatches(arrowSft, dictionaries, encoding, batchSize, sort)(arrows)
}
} else if (hints.isArrowMultiFile) {
val writer = DictionaryBuildingWriter.create(arrowSft, dictionaryFields, encoding)
val os = new ByteArrayOutputStream()
val sf = ArrowScan.resultFeature()
val arrows = new Iterator[SimpleFeature] {
override def hasNext: Boolean = features.hasNext
override def next(): SimpleFeature = {
writer.clear()
os.reset()
var index = 0
while (index < batchSize && features.hasNext) {
writer.add(features.next)
index += 1
}
writer.encode(os)
sf.setAttribute(0, os.toByteArray)
sf
}
}
if (hints.isSkipReduce) { arrows } else {
ArrowScan.mergeFiles(arrowSft, dictionaryFields, encoding, sort)(arrows)
}
} else {
val writer = new DeltaWriter(arrowSft, dictionaryFields, encoding, None, batchSize)
val array = Array.ofDim[SimpleFeature](batchSize)
val sf = ArrowScan.resultFeature()
val arrows = new Iterator[SimpleFeature] {
override def hasNext: Boolean = features.hasNext
override def next(): SimpleFeature = {
var index = 0
while (index < batchSize && features.hasNext) {
array(index) = features.next
index += 1
}
sf.setAttribute(0, writer.encode(array, index))
sf
}
}
if (hints.isSkipReduce) { arrows } else {
ArrowScan.mergeDeltas(arrowSft, dictionaryFields, encoding, batchSize, sort)(arrows)
}
}
}
private def densityTransform(features: Iterator[SimpleFeature],
sft: SimpleFeatureType,
envelope: Envelope,
width: Int,
height: Int,
weight: Option[String]): Iterator[SimpleFeature] = {
val grid = new GridSnap(envelope, width, height)
val result = scala.collection.mutable.Map.empty[(Int, Int), Double]
val getWeight = DensityScan.getWeight(sft, weight)
val writeGeom = DensityScan.writeGeometry(sft, grid)
features.foreach(f => writeGeom(f, getWeight(f), result))
val sf = new ScalaSimpleFeature(DensityScan.DensitySft, "", Array(GeometryUtils.zeroPoint))
// Return value in user data so it's preserved when passed through a RetypingFeatureCollection
sf.getUserData.put(DensityScan.DensityValueKey, DensityScan.encodeResult(result))
Iterator(sf)
}
private def statsTransform(features: Iterator[SimpleFeature],
sft: SimpleFeatureType,
transform: Option[(String, SimpleFeatureType)],
query: String,
encode: Boolean): Iterator[SimpleFeature] = {
val stat = Stat(sft, query)
val toObserve = transform match {
case None => features
case Some((tdefs, tsft)) => projectionTransform(features, sft, tsft, tdefs, None)
}
toObserve.foreach(stat.observe)
val encoded = if (encode) { StatsScan.encodeStat(sft)(stat) } else { stat.toJson }
Iterator(new ScalaSimpleFeature(StatsScan.StatsSft, "stat", Array(encoded, GeometryUtils.zeroPoint)))
}
private def projectionTransform(features: Iterator[SimpleFeature],
sft: SimpleFeatureType,
transform: SimpleFeatureType,
definitions: String,
ordering: Option[Ordering[SimpleFeature]]): Iterator[SimpleFeature] = {
val attributes = TransformSimpleFeature.attributes(sft, transform, definitions)
def setValues(from: SimpleFeature, to: ScalaSimpleFeature): ScalaSimpleFeature = {
var i = 0
while (i < attributes.length) {
to.setAttributeNoConvert(i, attributes(i).apply(from))
i += 1
}
to.setId(from.getID)
to
}
ordering match {
case None => val reusableSf = new ScalaSimpleFeature(transform, ""); features.map(setValues(_, reusableSf))
case Some(o) => features.map(setValues(_, new ScalaSimpleFeature(transform, ""))).toList.sorted(o).iterator
}
}
private def noTransform(sft: SimpleFeatureType,
features: Iterator[SimpleFeature],
ordering: Option[Ordering[SimpleFeature]]): Iterator[SimpleFeature] = {
ordering match {
case None => features
case Some(o) => features.toList.sorted(o).iterator
}
}
/**
* Used when we don't have an auth provider - any visibilities in the feature will
* cause the check to fail, so we can skip parsing
*
* @param f simple feature to check
* @param ignored not used
* @return true if feature is visible without any authorizations, otherwise false
*/
private def noAuthVisibilityCheck(f: SimpleFeature, ignored: Seq[Array[Byte]]): Boolean = {
val vis = SecurityUtils.getVisibility(f)
vis == null || vis.isEmpty
}
/**
* Parses any visibilities in the feature and compares with the user's authorizations
*
* @param f simple feature to check
* @param auths authorizations for the current user
* @return true if feature is visible to the current user, otherwise false
*/
private def authVisibilityCheck(f: SimpleFeature, auths: Seq[Array[Byte]]): Boolean = {
val vis = SecurityUtils.getVisibility(f)
vis == null || VisibilityEvaluator.parse(vis).evaluate(auths)
}
}
| ddseapy/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/planning/InMemoryQueryRunner.scala | Scala | apache-2.0 | 15,377 |
package com.twitter.finagle
import com.twitter.finagle.client.{DefaultPool, StackClient, StdStackClient, Transporter}
import com.twitter.finagle.decoder.LengthFieldFramer
import com.twitter.finagle.mysql._
import com.twitter.finagle.mysql.transport.Packet
import com.twitter.finagle.netty4.Netty4Transporter
import com.twitter.finagle.param.{ExceptionStatsHandler => _, Monitor => _, ResponseClassifier => _, Tracer => _, _}
import com.twitter.finagle.service.{ResponseClassifier, RetryBudget}
import com.twitter.finagle.stats.{ExceptionStatsHandler, NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.{Transport, TransportContext}
import com.twitter.io.Buf
import com.twitter.util.{Duration, Future, Monitor}
import java.net.SocketAddress
/**
* Supplements a [[com.twitter.finagle.Client]] with convenient
* builder methods for constructing a mysql client.
*/
trait MysqlRichClient { self: com.twitter.finagle.Client[Request, Result] =>
/**
* Whether the client supports unsigned integer fields
*/
protected val supportUnsigned: Boolean
def richClientStatsReceiver: StatsReceiver = NullStatsReceiver
/**
* Creates a new `RichClient` connected to the logical
* destination described by `dest` with the assigned
* `label`. The `label` is used to scope client stats.
*/
def newRichClient(
dest: Name,
label: String
): mysql.Client with mysql.Transactions =
mysql.Client(newClient(dest, label), richClientStatsReceiver, supportUnsigned)
/**
* Creates a new `RichClient` connected to the logical
* destination described by `dest`.
*/
def newRichClient(dest: String): mysql.Client with mysql.Transactions =
mysql.Client(newClient(dest), richClientStatsReceiver, supportUnsigned)
}
object MySqlClientTracingFilter {
object Stackable extends Stack.Module1[param.Label, ServiceFactory[Request, Result]] {
val role: Stack.Role = ClientTracingFilter.role
val description: String = "Add MySql client specific annotations to the trace"
def make(
_label: param.Label,
next: ServiceFactory[Request, Result]
): ServiceFactory[Request, Result] = {
// TODO(jeff): should be able to get this directly from ClientTracingFilter
val annotations = new AnnotatingTracingFilter[Request, Result](
_label.label,
Annotation.ClientSend(),
Annotation.ClientRecv()
)
annotations.andThen(TracingFilter).andThen(next)
}
}
object TracingFilter extends SimpleFilter[Request, Result] {
def apply(request: Request, service: Service[Request, Result]): Future[Result] = {
if (Trace.isActivelyTracing) {
request match {
case QueryRequest(sqlStatement) => Trace.recordBinary("mysql.query", sqlStatement)
case PrepareRequest(sqlStatement) => Trace.recordBinary("mysql.prepare", sqlStatement)
// TODO: save the prepared statement and put it in the executed request trace
case ExecuteRequest(id, _, _, _) => Trace.recordBinary("mysql.execute", id)
case _ => Trace.record("mysql." + request.getClass.getSimpleName.replace("$", ""))
}
}
service(request)
}
}
}
/**
* @example {{{
* val client = Mysql.client
* .withCredentials("<username>", "<password>")
* .withDatabase("<db>")
* .newRichClient("inet!localhost:3306")
* }}}
*/
object Mysql extends com.twitter.finagle.Client[Request, Result] with MysqlRichClient {
protected val supportUnsigned: Boolean = param.UnsignedColumns.param.default.supported
object param {
/**
* A class eligible for configuring the maximum number of prepare
* statements. After creating `num` prepare statements, we'll start purging
* old ones.
*/
case class MaxConcurrentPrepareStatements(num: Int) {
assert(num <= Int.MaxValue, s"$num is not <= Int.MaxValue bytes")
assert(num > 0, s"$num must be positive")
def mk(): (MaxConcurrentPrepareStatements, Stack.Param[MaxConcurrentPrepareStatements]) =
(this, MaxConcurrentPrepareStatements.param)
}
object MaxConcurrentPrepareStatements {
implicit val param = Stack.Param(MaxConcurrentPrepareStatements(20))
}
/**
* Configure whether to support unsigned integer fields should be considered when
* returning elements of a [[Row]]. If not supported, unsigned fields will be decoded
* as if they were signed, potentially resulting in corruption in the case of overflowing
* the signed representation. Because Java doesn't support unsigned integer types
* widening may be necessary to support the unsigned variants. For example, an unsigned
* Int is represented as a Long.
*
* `Value` representations of unsigned columns which are widened when enabled:
* `ByteValue` -> `ShortValue``
* `ShortValue` -> IntValue`
* `LongValue` -> `LongLongValue`
* `LongLongValue` -> `BigIntValue`
*/
case class UnsignedColumns(supported: Boolean)
object UnsignedColumns {
implicit val param = Stack.Param(UnsignedColumns(false))
}
}
object Client {
private object PoisonConnection {
val Role: Stack.Role = Stack.Role("PoisonConnection")
def module: Stackable[ServiceFactory[Request, Result]] =
new Stack.Module0[ServiceFactory[Request, Result]] {
def role: Stack.Role = Role
def description: String = "Allows the connection to be poisoned and recycled"
def make(next: ServiceFactory[Request, Result]): ServiceFactory[Request, Result] =
new PoisonConnection(next)
}
}
/**
* This is a workaround for connection pooling that allows us to close a connection.
*/
private class PoisonConnection(underlying: ServiceFactory[Request, Result])
extends ServiceFactoryProxy(underlying) {
override def apply(conn: ClientConnection): Future[Service[Request, Result]] = {
super.apply(conn).map { svc =>
new ServiceProxy[Request, Result](svc) {
override def apply(request: Request): Future[Result] = {
if (request eq PoisonConnectionRequest) {
underlying.close().before {
Future.value(PoisonedConnectionResult)
}
} else {
super.apply(request)
}
}
}
}
}
}
private val params: Stack.Params = StackClient.defaultParams +
ProtocolLibrary("mysql") +
DefaultPool.Param(
low = 0,
high = 1,
bufferSize = 0,
idleTime = Duration.Top,
maxWaiters = Int.MaxValue
)
private val stack: Stack[ServiceFactory[Request, Result]] = StackClient.newStack
.replace(ClientTracingFilter.role, MySqlClientTracingFilter.Stackable)
// Note: there is a stack overflow in insertAfter using CanStackFrom, thus the module.
.insertAfter(DefaultPool.Role, PoisonConnection.module)
}
/**
* Implements a mysql client in terms of a
* [[com.twitter.finagle.client.StackClient]]. The client inherits a wealth
* of features from finagle including connection pooling and load
* balancing.
*
* Additionally, this class provides methods for constructing a rich
* client which exposes a rich mysql api.
*/
case class Client(
stack: Stack[ServiceFactory[Request, Result]] = Client.stack,
params: Stack.Params = Client.params
) extends StdStackClient[Request, Result, Client]
with WithSessionPool[Client]
with WithDefaultLoadBalancer[Client]
with MysqlRichClient {
protected val supportUnsigned: Boolean = params[param.UnsignedColumns].supported
protected def copy1(
stack: Stack[ServiceFactory[Request, Result]] = this.stack,
params: Stack.Params = this.params
): Client = copy(stack, params)
protected type In = Buf
protected type Out = Buf
protected type Context = TransportContext
protected def newTransporter(addr: SocketAddress): Transporter[In, Out, Context] = {
val framerFactory = () => {
new LengthFieldFramer(
lengthFieldBegin = 0,
lengthFieldLength = 3,
lengthAdjust = Packet.HeaderSize, // Packet size field doesn't include the header size.
maxFrameLength = Packet.HeaderSize + Packet.MaxBodySize,
bigEndian = false
)
}
Netty4Transporter.framedBuf(Some(framerFactory), addr, params)
}
protected def newDispatcher(transport: Transport[Buf, Buf] {
type Context <: Client.this.Context
}): Service[Request, Result] = {
val param.MaxConcurrentPrepareStatements(num) = params[param.MaxConcurrentPrepareStatements]
mysql.ClientDispatcher(
transport.map(_.toBuf, Packet.fromBuf),
Handshake(params),
num,
supportUnsigned
)
}
/**
* The maximum number of concurrent prepare statements.
*/
def withMaxConcurrentPrepareStatements(num: Int): Client =
configured(param.MaxConcurrentPrepareStatements(num))
/**
* The credentials to use when authenticating a new session.
*/
def withCredentials(u: String, p: String): Client =
configured(Handshake.Credentials(Option(u), Option(p)))
/**
* Database to use when this client establishes a new session.
*/
def withDatabase(db: String): Client =
configured(Handshake.Database(Option(db)))
/**
* The default character set used when establishing a new session.
*/
def withCharset(charset: Short): Client =
configured(Handshake.Charset(charset))
/**
* Don't set the CLIENT_FOUND_ROWS flag when establishing a new
* session. This will make "INSERT ... ON DUPLICATE KEY UPDATE"
* statements return the "correct" update count.
*
* See https://dev.mysql.com/doc/refman/5.7/en/information-functions.html#function_row-count
*/
def withAffectedRows(): Client =
configured(Handshake.FoundRows(false))
// Java-friendly forwarders
// See https://issues.scala-lang.org/browse/SI-8905
override val withSessionPool: SessionPoolingParams[Client] =
new SessionPoolingParams(this)
override val withLoadBalancer: DefaultLoadBalancingParams[Client] =
new DefaultLoadBalancingParams(this)
override val withTransport: ClientTransportParams[Client] =
new ClientTransportParams(this)
override val withSession: ClientSessionParams[Client] =
new ClientSessionParams(this)
override val withSessionQualifier: SessionQualificationParams[Client] =
new SessionQualificationParams(this)
override val withAdmissionControl: ClientAdmissionControlParams[Client] =
new ClientAdmissionControlParams(this)
override def withLabel(label: String): Client = super.withLabel(label)
override def withStatsReceiver(statsReceiver: StatsReceiver): Client =
super.withStatsReceiver(statsReceiver)
override def withMonitor(monitor: Monitor): Client = super.withMonitor(monitor)
override def withTracer(tracer: Tracer): Client = super.withTracer(tracer)
override def withExceptionStatsHandler(exceptionStatsHandler: ExceptionStatsHandler): Client =
super.withExceptionStatsHandler(exceptionStatsHandler)
override def withRequestTimeout(timeout: Duration): Client =
super.withRequestTimeout(timeout)
override def withResponseClassifier(responseClassifier: ResponseClassifier): Client =
super.withResponseClassifier(responseClassifier)
override def withRetryBudget(budget: RetryBudget): Client = super.withRetryBudget(budget)
override def withRetryBackoff(backoff: Stream[Duration]): Client =
super.withRetryBackoff(backoff)
override def withStack(stack: Stack[ServiceFactory[Request, Result]]): Client =
super.withStack(stack)
override def configured[P](psp: (P, Stack.Param[P])): Client = super.configured(psp)
override def filtered(filter: Filter[Request, Result, Request, Result]): Client =
super.filtered(filter)
override def richClientStatsReceiver: StatsReceiver = params[Stats].statsReceiver
}
def client: Mysql.Client = Client()
def newClient(dest: Name, label: String): ServiceFactory[Request, Result] =
client.newClient(dest, label)
def newService(dest: Name, label: String): Service[Request, Result] =
client.newService(dest, label)
}
| mkhq/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/Mysql.scala | Scala | apache-2.0 | 12,453 |
package com.nthportal.shell.async
import scala.concurrent.Future
/**
* Something which transmits input between a source and an [[AsyncShell]].
*/
trait InputChannel extends InputProvider {
/**
* Sends an action to be executed asynchronously by an [[AsyncShell]].
*
* Returns a [[Future]] which will contain the result of the action.
* The Future returned SHALL be equivalent to the one returned by
* invoking the [[InputAction.future future]] method of the given
* action.
*
* @param action the action to be executed
* @tparam T the type of the result of the action to be executed
* @return a Future which will contain the result of the action
*/
def sendAction[T](action: InputAction[T]): Future[T]
}
| NthPortal/app-shell | src/main/scala/com/nthportal/shell/async/InputChannel.scala | Scala | apache-2.0 | 756 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * Itszuvalex@gmail.com
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.power.tiles
import com.itszuvalex.femtocraft.FemtocraftGuiConstants
import com.itszuvalex.femtocraft.core.tiles.TileEntityBase
import com.itszuvalex.femtocraft.core.traits.tile.MultiBlockComponent
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.inventory.IInventory
import net.minecraft.item.ItemStack
import net.minecraft.tileentity.TileEntity
import net.minecraftforge.common.util.ForgeDirection
import net.minecraftforge.fluids.{Fluid, FluidStack, FluidTankInfo, IFluidHandler}
class TileEntityNanoFissionReactorHousing extends TileEntityBase with MultiBlockComponent with IFluidHandler with IInventory {
override def onSideActivate(par5EntityPlayer: EntityPlayer, side: Int): Boolean = {
if (isValidMultiBlock) {
val te: TileEntity = worldObj.getTileEntity(info.x, info.y, info.z)
if (te == null) {
return false
}
par5EntityPlayer.openGui(getMod, getGuiID, worldObj, info.x, info.y, info.z)
return true
}
false
}
override def getGuiID: Int = FemtocraftGuiConstants.NanoFissionReactorGuiID
override def hasGUI: Boolean = isValidMultiBlock
def fill(from: ForgeDirection, resource: FluidStack, doFill: Boolean): Int = {
if (isValidMultiBlock) {
val core: IFluidHandler = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IFluidHandler]
if (core == null) return 0
val result: Int = core.fill(from, resource, doFill)
if (result > 0) {
setModified()
}
return result
}
0
}
def drain(from: ForgeDirection, resource: FluidStack, doDrain: Boolean): FluidStack = {
if (isValidMultiBlock) {
val core: IFluidHandler = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IFluidHandler]
if (core == null) return null
val result: FluidStack = core.drain(from, resource, doDrain)
if (result != null) {
setModified()
}
return result
}
null
}
def drain(from: ForgeDirection, maxDrain: Int, doDrain: Boolean): FluidStack = {
if (isValidMultiBlock) {
val core: IFluidHandler = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IFluidHandler]
if (core == null) return null
val result: FluidStack = core.drain(from, maxDrain, doDrain)
if (result != null) {
setModified()
}
return result
}
null
}
def canFill(from: ForgeDirection, fluid: Fluid): Boolean = {
if (info.isValidMultiBlock) {
val core: IFluidHandler = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IFluidHandler]
return core != null && core.canFill(from, fluid)
}
false
}
def canDrain(from: ForgeDirection, fluid: Fluid): Boolean = {
if (isValidMultiBlock) {
val core: IFluidHandler = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IFluidHandler]
return core != null && core.canDrain(from, fluid)
}
false
}
def getTankInfo(from: ForgeDirection): Array[FluidTankInfo] = {
if (isValidMultiBlock) {
val core: IFluidHandler = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IFluidHandler]
if (core == null) return null
return core.getTankInfo(from)
}
new Array[FluidTankInfo](0)
}
def getSizeInventory: Int = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return 0
return core.getSizeInventory
}
0
}
def getStackInSlot(i: Int): ItemStack = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return null
val result: ItemStack = core.getStackInSlot(i)
if (result != null) {
setModified()
markDirty()
}
return result
}
null
}
def decrStackSize(i: Int, j: Int): ItemStack = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return null
val result: ItemStack = core.decrStackSize(i, j)
if (result != null) {
setModified()
markDirty()
}
return result
}
null
}
def getStackInSlotOnClosing(i: Int): ItemStack = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return null
return core.getStackInSlotOnClosing(i)
}
null
}
def setInventorySlotContents(i: Int, itemstack: ItemStack) {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return
core.setInventorySlotContents(i, itemstack)
setModified()
markDirty()
}
}
def getInventoryName: String = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return null
return core.getInventoryName
}
null
}
def hasCustomInventoryName: Boolean = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
return core != null && core.hasCustomInventoryName
}
false
}
def getInventoryStackLimit: Int = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return 0
return core.getInventoryStackLimit
}
0
}
def isUseableByPlayer(entityplayer: EntityPlayer): Boolean = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
return core != null && core.isUseableByPlayer(entityplayer)
}
false
}
def openInventory() {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return
core.openInventory()
}
}
def closeInventory() {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
if (core == null) return
core.closeInventory()
}
}
def isItemValidForSlot(i: Int, itemstack: ItemStack): Boolean = {
if (isValidMultiBlock) {
val core: IInventory = worldObj.getTileEntity(info.x, info.y, info.z).asInstanceOf[IInventory]
return core != null && core.isItemValidForSlot(i, itemstack)
}
false
}
}
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/power/tiles/TileEntityNanoFissionReactorHousing.scala | Scala | gpl-2.0 | 7,707 |
/*
* Original implementation (C) 2009-2011 Debasish Ghosh
* Adapted and extended in 2011 by Mathias Doenitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spray.json
import annotation.implicitNotFound
/**
* Provides the JSON deserialization for type T.
*/
@implicitNotFound(msg = "Cannot find JsonReader or JsonFormat type class for ${T}")
trait JsonReader[T] {
def read(json: JsValue): T
}
object JsonReader {
implicit def func2Reader[T](f: JsValue => T): JsonReader[T] = new JsonReader[T] {
def read(json: JsValue) = f(json)
}
}
/**
* Provides the JSON serialization for type T.
*/
@implicitNotFound(msg = "Cannot find JsonWriter or JsonFormat type class for ${T}")
trait JsonWriter[T] {
def write(obj: T): JsValue
}
object JsonWriter {
implicit def func2Writer[T](f: T => JsValue): JsonWriter[T] = new JsonWriter[T] {
def write(obj: T) = f(obj)
}
}
/**
* Provides the JSON deserialization and serialization for type T.
*/
trait JsonFormat[T] extends JsonReader[T] with JsonWriter[T]
/**
* A special JsonReader capable of reading a legal JSON root object, i.e. either a JSON array or a JSON object.
*/
@implicitNotFound(msg = "Cannot find RootJsonReader or RootJsonFormat type class for ${T}")
trait RootJsonReader[T] extends JsonReader[T]
/**
* A special JsonWriter capable of writing a legal JSON root object, i.e. either a JSON array or a JSON object.
*/
@implicitNotFound(msg = "Cannot find RootJsonWriter or RootJsonFormat type class for ${T}")
trait RootJsonWriter[T] extends JsonWriter[T]
/**
* A special JsonFormat signaling that the format produces a legal JSON root object, i.e. either a JSON array
* or a JSON object.
*/
trait RootJsonFormat[T] extends JsonFormat[T] with RootJsonReader[T] with RootJsonWriter[T] | lustefaniak/spray-json | src/main/scala/spray/json/JsonFormat.scala | Scala | apache-2.0 | 2,292 |
package mesosphere.marathon
package core.election
import akka.stream.scaladsl.{Keep, Sink, SinkQueue, Source}
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.Executors
import mesosphere.AkkaUnitTest
import mesosphere.marathon.core.base.LifecycleState
import mesosphere.marathon.core.storage.store.impl.zk.NoRetryPolicy
import mesosphere.marathon.metrics.dummy.DummyMetrics
import mesosphere.marathon.storage.StorageConfig
import mesosphere.marathon.stream.EnrichedFlow
import mesosphere.marathon.test.ThrowExceptionAndDontCrashStrategy
import mesosphere.marathon.util.{LifeCycledCloseable, ZookeeperServerTest}
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.framework.recipes.leader.LeaderLatch
import org.rogach.scallop.ScallopConf
import org.scalatest.Inside
import org.scalatest.concurrent.Eventually
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration._
class CuratorElectionStreamTest extends AkkaUnitTest with Inside with ZookeeperServerTest with Eventually {
val prefixId = new AtomicInteger(0)
case class Fixture(prefix: String = "curator") {
val metrics = DummyMetrics
val leaderPath = s"/curator-${prefixId.getAndIncrement}"
def newClient() = {
val c = CuratorFrameworkFactory.newClient(zkServer.connectUri, NoRetryPolicy)
c.start()
c.blockUntilConnected()
c
}
val client = new LifeCycledCloseable(newClient())
val client2 = new LifeCycledCloseable(newClient())
val electionExecutor = Executors.newSingleThreadExecutor()
val electionEC = ExecutionContext.fromExecutor(electionExecutor)
lazy val dummyLatch = new LeaderLatch(client.closeable, leaderPath + "-curator", "dummy")
}
def withFixture(fn: Fixture => Unit): Unit = {
val f = Fixture()
try fn(f)
finally {
f.client.close()
f.client2.close()
f.electionExecutor.shutdown()
}
}
private def nextKnownState(sinkQueue: SinkQueue[LeadershipState]): Option[LeadershipState] = {
val next = sinkQueue.pull().futureValue
if (next == Some(LeadershipState.Standby(None)))
nextKnownState(sinkQueue)
else
next
}
"CuratorElectionStream.newCuratorConnection" should {
"throw an exception when given an unresolvable hostname" in {
val conf = new ScallopConf(args = List("--zk", "zk://unresolvable:8080/marathon/leader")) with ZookeeperConf
conf.verify()
a[Throwable] shouldBe thrownBy {
StorageConfig.curatorFramework(conf, ThrowExceptionAndDontCrashStrategy, LifecycleState.Ignore).client
}
}
}
"Yields an event that it is the leader on connection" in withFixture { f =>
val (cancellable, leader) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 5000.millis, "host:8080", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
nextKnownState(leader) shouldBe Some(LeadershipState.ElectedAsLeader)
cancellable.cancel()
leader.pull().futureValue shouldBe Some(LeadershipState.Standby(None))
leader.pull().futureValue shouldBe None
}
"Abdicates leadership immediately when the client is closed" in withFixture { f =>
// implicit val patienceConfig = PatienceConfig(30.seconds, 10.millis)
val (cancellable1, leader1) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 15000.millis, "host:1", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
nextKnownState(leader1) shouldBe Some(LeadershipState.ElectedAsLeader)
val (cancellable2, leader2) = CuratorElectionStream(
f.metrics, f.client2, f.leaderPath, 15000.millis, "host:2", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
nextKnownState(leader2) shouldBe Some(LeadershipState.Standby(Some("host:1")))
f.client.close() // simulate a connection close for the first client
nextKnownState(leader2) shouldBe Some(LeadershipState.ElectedAsLeader)
cancellable1.cancel()
cancellable2.cancel()
}
"Monitors leadership changes" in withFixture { f =>
val (cancellable1, leader1) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 15000.millis, "changehost:1", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
nextKnownState(leader1) shouldBe Some(LeadershipState.ElectedAsLeader)
val (cancellable2, leader2) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 15000.millis, "changehost:2", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
nextKnownState(leader2) shouldBe Some(LeadershipState.Standby(Some("changehost:1")))
eventually { f.dummyLatch.getParticipants.size() shouldBe 2 } // wait for leader2 to register its leadership record
val (cancellable3, leader3) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 15000.millis, "changehost:3", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
nextKnownState(leader3) shouldBe Some(LeadershipState.Standby(Some("changehost:1")))
cancellable1.cancel()
nextKnownState(leader2) shouldBe Some(LeadershipState.ElectedAsLeader)
nextKnownState(leader3) shouldBe Some(LeadershipState.Standby(Some("changehost:2")))
cancellable2.cancel()
cancellable3.cancel()
}
"It cleans up after itself when the stream completes due to an exception" in withFixture { f =>
val killSwitch = Promise[Unit]
val (cancellable, events) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 15000.millis, "exceptionhost:1", f.electionEC)
.via(EnrichedFlow.stopOnFirst(Source.fromFuture(killSwitch.future)))
.toMat(Sink.queue())(Keep.both)
.run
eventually { f.client.beforeCloseHooksLength shouldBe 1 }
nextKnownState(events) shouldBe Some(LeadershipState.ElectedAsLeader)
killSwitch.success(())
events.pull().futureValue shouldBe None
eventually { f.client.beforeCloseHooksLength shouldBe 0 }
}
"It fails at least one of the streams if multiple participants register with the same id" in withFixture { f =>
/*
* It's not possible to predict which of the streams will crash; it's inherently racy. Participant 2 could connect,
* detect the duplicate, crash, and remove its leader record before the participant 1 has a chance to see it.
*
* Conversely, participant 2 could connect, and already connected participant 1 could spot the illegal state and
* remove its own participant record before participant 2 first sees any of the participant records.
*
* Or, both could see spot the illegal state, and both could crash.
*/
val futures = Stream.continually {
CuratorElectionStream(f.metrics, f.client, f.leaderPath, 15000.millis, "duplicate-host", f.electionEC)
.runWith(Sink.last)
}.take(2)
val failure = Future.firstCompletedOf(futures.map(_.failed)).futureValue
inside(failure) {
case ex: IllegalStateException =>
ex.getMessage shouldBe "Multiple election participants have the same id: duplicate-host. This is not allowed."
}
}
"CuratorElectionStream quickly emits uncertainty about current leader during connection troubles" in withFixture { f =>
val (cancellable, leader) = CuratorElectionStream(
f.metrics, f.client, f.leaderPath, 5000.millis, "host:8080", f.electionEC)
.toMat(Sink.queue())(Keep.both)
.run
Given("an elected leader")
nextKnownState(leader) shouldBe Some(LeadershipState.ElectedAsLeader)
When("we stop the Zookeeper server")
zkServer.stop()
val serverStopped = System.currentTimeMillis()
Then("The stream should emit uncertainty about leadership within 5 seconds")
leader.pull().futureValue shouldBe Some(LeadershipState.Standby(None))
val uncertaintyDetermined = System.currentTimeMillis()
(uncertaintyDetermined - serverStopped).millis should be < 5.seconds
When("we start the Zookeeper server again")
zkServer.start()
Then("The stream should emit the current leadership state again")
nextKnownState(leader) shouldBe Some(LeadershipState.ElectedAsLeader)
cancellable.cancel()
leader.pull().futureValue shouldBe Some(LeadershipState.Standby(None))
leader.pull().futureValue shouldBe None
}
}
| gsantovena/marathon | tests/integration/src/test/scala/mesosphere/marathon/core/election/CuratorElectionStreamTest.scala | Scala | apache-2.0 | 8,268 |
package toolkit.neuralnetwork
import cogx.platform.opencl.OpenCLPlatform
import libcog._
import toolkit.neuralnetwork.examples.networks.AlexNet
import toolkit.neuralnetwork.operator.fourierProjectMAC
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/** A regression test of AlexNet across batchSizes and available devices.
*
* Performance can often be increased by taking active control of clocks and fans speeds.
*
* The access to fan control is through nvidia-settings, which is tied to the /etc/X11/xorg.conf
* file. The GPUs permitting fan control must be listed in the xorg.conf file as driving
* a screen and a monitor (if only a virtual one). The following are some sample additions to xorg.conf
* to permit fan control on an NVIDIA 1080 described as 'Device 0' that had no monitor attached:
*
* # Dummy monitor description
* Section "Monitor"
* Identifier "Monitor2"
* VendorName "Unknown"
* ModelName "CRT-0"
* HorizSync 0.0 - 0.0
* VertRefresh 0.0
* Option "DPMS"
* EndSection
*
* Section "Screen"
* Identifier "Screen1"
* Device "Device0"
* Monitor "Monitor2"
* DefaultDepth 24
* Option "ConnectedMonitor" "CRT"
* Option "Coolbits" "12"
* Option "nvidiaXineramaInfoOrder" "CRT-0"
* Option "Stereo" "0"
* Option "metamodes" "nvidia-auto-select +0+0"
* Option "SLI" "Off"
* Option "MultiGPU" "Off"
* Option "BaseMosaic" "off"
* SubSection "Display"
* Depth 24
* EndSubSection
* EndSection
*
* Also, insert the following line into the xorg.conf "ServerLayout" section after the "Screen 0" line:
*
* Screen 1 "Screen1" RightOf "Screen0"
*
* Note the "Coolbits" line, a requirement for enabling fan control. If your GPUs are already
* driving a monitor, you can get by with only adding the "Coolbits" lines above. Finally do an
* internet search for nvidiafanspeed.py: a python script for controlling the fans.
* We have a derivative version in-house that we haven't made externally available.
*
* By our experience, active fan control yields a modest 3-4% performance boost on the 1080, and a 5-8%
* improvement on the TitanX. For the TitanX only, an additional 2% boost was achieved
* by actively setting clocks and power via (for a brick listed as GPU 1):
*
* nvidia-smi --persistence-mode=1
* nvidia-smi -i 1 --auto-boost-default=0
* nvidia-smi -i 1 --application-clocks=3505,1392
* nvidia-smi -i 1 --power-limit=275
*
* Bottom line, you might get as much as 10% added performance on a TitanX by worrying about its configuration.
*
* We haven't played with clock offsets or over-voltages. Those techniques, as well as anything mentioned here,
* you are doing at your own risk. Feel free to do your own research.
*
* @author Dick Carter
*/
object AlexNetTest extends App {
// Some constants effecting the testing
// When a device is first used, how long to run the model to potentially invoke thermal throttling before taking
// measurements. On subsequent tests, a lesser time can be used, since the GPU is only idle during the compile.
val firstWarmUpSeconds = 120
val notFirstWarmUpSeconds = 60
// Time for actual measurements
val testSeconds = 60
// If instead you're interested in peak (pre-throttled) performance, you might want to spec some cool down time.
val coolDownSeconds = 0
val deviceNum = -1 // If -1 then all devices
val deviceDescriptors = {
val platform = OpenCLPlatform()
try
platform.devices.map(device => device.toString)
finally
platform.release()
}
def numDevices = deviceDescriptors.length
val deviceRange =
if (deviceNum == -1)
0 to numDevices - 1
else
deviceNum to deviceNum
/** Core routine that builds and times a single AlexNet model. */
def timeAlexNet(batchSize: Int, device: Int, warmUpSeconds: Float, testSeconds: Float, coolDownSeconds: Float) = {
val cg = new ComputeGraph(device = Some(device)) {
new AlexNet(batchSize = batchSize, enableNormalization = true, useRandomData = false)
}
try {
cg.reset
if (warmUpSeconds > 0) {
print(s"Beginning warm-up phase of $warmUpSeconds seconds...")
cg.run
Thread.sleep((warmUpSeconds*1000).toLong)
cg.stop
println("done.")
}
println(s"Beginning testing phase of $testSeconds seconds...")
val testStartSimTick = cg.stop
val start = System.nanoTime()
cg.run
Thread.sleep((testSeconds*1000).toLong)
val testEndSimTick = cg.stop
val durationMsec = (System.nanoTime() - start)/1000000.0
val testSteps = testEndSimTick - testStartSimTick
val stepTimeMsec = durationMsec/testSteps
val stepfreq = 1000.0/stepTimeMsec
println(f"Step time = $stepTimeMsec%.3f msec. as measured over $testSteps steps (freq = $stepfreq%.3f Hz)")
if (coolDownSeconds > 0) {
print(s"Sleeping for $coolDownSeconds seconds to let the GPU cool...")
Thread.sleep((coolDownSeconds*1000).toLong)
println("done.\n")
}
stepTimeMsec
}
finally
cg.release
}
val batchSizes = Seq(32, 64, 128, 256, 512)
println(s"AlexNet regression over batchsizes $batchSizes and devices $deviceRange")
// Loop over selected devices. Then perform an inner test loop over the batchSizes.
for (device <- deviceRange) {
var warmUpSeconds = firstWarmUpSeconds
for (batchSize <- batchSizes) {
println(s"\n*************** Beginning batchSize $batchSize test " +
s"on device $device (${deviceDescriptors(device)}) ******************\n")
val stepTimeMsec = timeAlexNet(batchSize, device, warmUpSeconds, testSeconds, coolDownSeconds)
val trainingRate = batchSize * (1000.0 / stepTimeMsec)
println(f"\nFor batchsize ${batchSize}%d, training rate = ${trainingRate}%.2f images/sec.\n")
warmUpSeconds = notFirstWarmUpSeconds
}
}
}
| hpe-cct/cct-nn | src/test/scala/toolkit/neuralnetwork/performance/AlexNetTest.scala | Scala | apache-2.0 | 6,129 |
package slack.models
case class User (
id: String,
name: String,
deleted: Option[Boolean],
color: Option[String],
profile: Option[UserProfile],
is_admin: Option[Boolean],
is_owner: Option[Boolean],
is_primary_owner: Option[Boolean],
is_restricted: Option[Boolean],
is_ultra_restricted: Option[Boolean],
has_2fa: Option[Boolean],
has_files: Option[Boolean]
)
case class UserProfile (
first_name: Option[String],
last_name: Option[String],
real_name: Option[String],
email: Option[String],
skype: Option[String],
phone: Option[String],
image_24: String,
image_32: String,
image_48: String,
image_72: String,
image_192: String
) | weirded/slack-scala-client | src/main/scala/slack/models/User.scala | Scala | mit | 673 |
package com.twitter.zipkin.storage.redis
import com.google.common.base.Charsets.UTF_8
import com.twitter.util.Await._
import com.twitter.util.Duration
import com.twitter.zipkin.storage.IndexedTraceId
import org.jboss.netty.buffer.ChannelBuffers.copiedBuffer
class TraceIndexSpec extends RedisSpecification {
def index = new TraceIndex[String](_client, None) {
override def encodeKey(key: String) = copiedBuffer("foo:" + key, UTF_8)
}
test("create/read") {
result(index.add("key", 1, 1234))
result(index.list("key", 1, 1)) should be(Seq(IndexedTraceId(1234, 1)))
}
test("list with multiple trace ids") {
result(index.add("key", 1, 1234))
result(index.add("key", 2, 4567))
result(index.list("key", 100, 100)) should be(Seq(
IndexedTraceId(4567, 2),
IndexedTraceId(1234, 1))
)
}
test("list respects limit") {
result(index.add("key", 1, 1234))
result(index.add("key", 2, 4567))
result(index.list("key", 1, 1)) should be(Seq(IndexedTraceId(1234, 1)))
}
test("list is unique on trace id") {
result(index.add("key", 1, 1234))
result(index.add("key", 2, 1234))
result(index.list("key", 100, 100)) should be(Seq(IndexedTraceId(1234, 2)))
}
test("list respects ttl") {
// Using seconds granularity as ttl literally expires entries
val indexWithTtl = new TraceIndex[String](_client, Some(Duration.fromSeconds(10))) {
override def encodeKey(key: String) = copiedBuffer("ttl:" + key, UTF_8)
}
result(indexWithTtl.add("key", 10 * 1000000, 1234))
result(indexWithTtl.add("key", 20 * 1000000, 4567))
result(indexWithTtl.add("key", 30 * 1000000, 8910))
result(indexWithTtl.add("key", 40 * 1000000, 1112))
result(indexWithTtl.list("key", 35 * 1000000, 100)) should be(Seq(
IndexedTraceId(8910, 30 * 1000000))
)
}
}
| jstanier/zipkin | zipkin-redis/src/test/scala/com/twitter/zipkin/storage/redis/TraceIndexSpec.scala | Scala | apache-2.0 | 1,842 |
package akka.persistence.jdbc.integration
import akka.persistence.jdbc.query.{ EventsByTagTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner }
class PostgresScalaEventsByTagTest extends EventsByTagTest("postgres-application.conf") with PostgresCleaner
class MySQLScalaEventByTagTest extends EventsByTagTest("mysql-application.conf") with MysqlCleaner
class OracleScalaEventByTagTest extends EventsByTagTest("oracle-application.conf") with OracleCleaner {
override def timeoutMultiplier: Int = 4
}
class SqlServerScalaEventByTagTest extends EventsByTagTest("sqlserver-application.conf") with SqlServerCleaner
| dnvriend/akka-persistence-jdbc | core/src/it/scala/akka/persistence/jdbc/integration/EventsByTagTest.scala | Scala | apache-2.0 | 633 |
package com.yammer.dropwizard.examples
import com.yammer.dropwizard.config.{Bootstrap, Environment}
import com.yammer.dropwizard.{Logging, ScalaService}
object ExampleService extends ScalaService[ExampleConfiguration] with Logging {
def initialize(bootstrap: Bootstrap[ExampleConfiguration]) {
bootstrap.addCommand(new SayCommand)
bootstrap.addCommand(new SplodyCommand)
}
def run(configuration: ExampleConfiguration, environment: Environment) {
environment.addResource(new HelloWorldResource(configuration.saying))
environment.addResource(new UploadResource)
environment.addResource(new SplodyResource)
environment.addHealthCheck(new DumbHealthCheck)
environment.manage(new StartableObject(configuration.saying))
}
}
| ericholscher/dropwizard | dropwizard-scala_2.9.1/src/test/scala/com/yammer/dropwizard/examples/ExampleService.scala | Scala | apache-2.0 | 757 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import org.apache.flink.annotation.{Internal, Public, PublicEvolving}
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.functions.{FilterFunction, FlatMapFunction, MapFunction, Partitioner}
import org.apache.flink.api.common.io.OutputFormat
import org.apache.flink.api.common.operators.ResourceSpec
import org.apache.flink.api.common.serialization.SerializationSchema
import org.apache.flink.api.common.state.MapStateDescriptor
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.api.java.tuple.{Tuple => JavaTuple}
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.api.scala.operators.ScalaCsvOutputFormat
import org.apache.flink.core.fs.{FileSystem, Path}
import org.apache.flink.streaming.api.collector.selector.OutputSelector
import org.apache.flink.streaming.api.datastream.{AllWindowedStream => JavaAllWindowedStream, DataStream => JavaStream, KeyedStream => JavaKeyedStream, _}
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.functions.timestamps.{AscendingTimestampExtractor, BoundedOutOfOrdernessTimestampExtractor}
import org.apache.flink.streaming.api.functions.{AssignerWithPeriodicWatermarks, AssignerWithPunctuatedWatermarks, ProcessFunction, TimestampExtractor}
import org.apache.flink.streaming.api.operators.OneInputStreamOperator
import org.apache.flink.streaming.api.windowing.assigners._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.{GlobalWindow, TimeWindow, Window}
import org.apache.flink.util.Collector
import scala.collection.JavaConverters._
@Public
class DataStream[T](stream: JavaStream[T]) {
/**
* Returns the [[StreamExecutionEnvironment]] associated with the current [[DataStream]].
*
* @return associated execution environment
* @deprecated Use [[executionEnvironment]] instead
*/
@deprecated
@PublicEvolving
def getExecutionEnvironment: StreamExecutionEnvironment =
new StreamExecutionEnvironment(stream.getExecutionEnvironment)
/**
* Returns the TypeInformation for the elements of this DataStream.
*
* @deprecated Use [[dataType]] instead.
*/
@deprecated
@PublicEvolving
def getType(): TypeInformation[T] = stream.getType()
/**
* Returns the parallelism of this operation.
*
* @deprecated Use [[parallelism]] instead.
*/
@deprecated
@PublicEvolving
def getParallelism = stream.getParallelism
/**
* Returns the execution config.
*
* @deprecated Use [[executionConfig]] instead.
*/
@deprecated
@PublicEvolving
def getExecutionConfig = stream.getExecutionConfig
/**
* Returns the ID of the DataStream.
*/
@Internal
private[flink] def getId = stream.getId()
// --------------------------------------------------------------------------
// Scalaesk accessors
// --------------------------------------------------------------------------
/**
* Gets the underlying java DataStream object.
*/
def javaStream: JavaStream[T] = stream
/**
* Returns the TypeInformation for the elements of this DataStream.
*/
def dataType: TypeInformation[T] = stream.getType()
/**
* Returns the execution config.
*/
def executionConfig: ExecutionConfig = stream.getExecutionConfig()
/**
* Returns the [[StreamExecutionEnvironment]] associated with this data stream
*/
def executionEnvironment: StreamExecutionEnvironment =
new StreamExecutionEnvironment(stream.getExecutionEnvironment())
/**
* Returns the parallelism of this operation.
*/
def parallelism: Int = stream.getParallelism()
/**
* Sets the parallelism of this operation. This must be at least 1.
*/
def setParallelism(parallelism: Int): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.setParallelism(parallelism)
case _ =>
throw new UnsupportedOperationException(
"Operator " + stream + " cannot set the parallelism.")
}
this
}
def setMaxParallelism(maxParallelism: Int): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.setMaxParallelism(maxParallelism)
case _ =>
throw new UnsupportedOperationException("Operator " + stream + " cannot set the maximum" +
"paralllelism")
}
this
}
/**
* Returns the minimum resources of this operation.
*/
@PublicEvolving
def minResources: ResourceSpec = stream.getMinResources()
/**
* Returns the preferred resources of this operation.
*/
@PublicEvolving
def preferredResources: ResourceSpec = stream.getPreferredResources()
// ---------------------------------------------------------------------------
// Fine-grained resource profiles are an incomplete work-in-progress feature
// The setters are hence commented out at this point.
// ---------------------------------------------------------------------------
// /**
// * Sets the minimum and preferred resources of this operation.
// */
// @PublicEvolving
// def resources(minResources: ResourceSpec, preferredResources: ResourceSpec) : DataStream[T] =
// stream match {
// case stream : SingleOutputStreamOperator[T] => asScalaStream(
// stream.setResources(minResources, preferredResources))
// case _ =>
// throw new UnsupportedOperationException("Operator does not support " +
// "configuring custom resources specs.")
// this
// }
//
// /**
// * Sets the resource of this operation.
// */
// @PublicEvolving
// def resources(resources: ResourceSpec) : Unit = {
// this.resources(resources, resources)
// }
/**
* Gets the name of the current data stream. This name is
* used by the visualization and logging during runtime.
*
* @return Name of the stream.
*/
def name: String = stream match {
case stream : SingleOutputStreamOperator[T] => stream.getName
case _ => throw new
UnsupportedOperationException("Only supported for operators.")
}
// --------------------------------------------------------------------------
/**
* Gets the name of the current data stream. This name is
* used by the visualization and logging during runtime.
*
* @return Name of the stream.
* @deprecated Use [[name]] instead
*/
@deprecated
@PublicEvolving
def getName : String = name
/**
* Sets the name of the current data stream. This name is
* used by the visualization and logging during runtime.
*
* @return The named operator
*/
def name(name: String) : DataStream[T] = stream match {
case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.name(name))
case _ => throw new UnsupportedOperationException("Only supported for operators.")
this
}
/**
* Sets an ID for this operator.
*
* The specified ID is used to assign the same operator ID across job
* submissions (for example when starting a job from a savepoint).
*
* <strong>Important</strong>: this ID needs to be unique per
* transformation and job. Otherwise, job submission will fail.
*
* @param uid The unique user-specified ID of this transformation.
* @return The operator with the specified ID.
*/
@PublicEvolving
def uid(uid: String) : DataStream[T] = javaStream match {
case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.uid(uid))
case _ => throw new UnsupportedOperationException("Only supported for operators.")
this
}
@PublicEvolving
def getSideOutput[X: TypeInformation](tag: OutputTag[X]): DataStream[X] = javaStream match {
case stream : SingleOutputStreamOperator[X] =>
asScalaStream(stream.getSideOutput(tag: OutputTag[X]))
}
/**
* Sets an user provided hash for this operator. This will be used AS IS the create
* the JobVertexID.
* <p/>
* <p>The user provided hash is an alternative to the generated hashes, that is
* considered when identifying an operator through the default hash mechanics fails
* (e.g. because of changes between Flink versions).
* <p/>
* <p><strong>Important</strong>: this should be used as a workaround or for trouble
* shooting. The provided hash needs to be unique per transformation and job. Otherwise,
* job submission will fail. Furthermore, you cannot assign user-specified hash to
* intermediate nodes in an operator chain and trying so will let your job fail.
*
* @param hash the user provided hash for this operator.
* @return The operator with the user provided hash.
*/
@PublicEvolving
def setUidHash(hash: String) : DataStream[T] = javaStream match {
case stream : SingleOutputStreamOperator[T] =>
asScalaStream(stream.setUidHash(hash))
case _ => throw new UnsupportedOperationException("Only supported for operators.")
this
}
/**
* Turns off chaining for this operator so thread co-location will not be
* used as an optimization. </p> Chaining can be turned off for the whole
* job by [[StreamExecutionEnvironment.disableOperatorChaining()]]
* however it is not advised for performance considerations.
*
*/
@PublicEvolving
def disableChaining(): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.disableChaining()
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Starts a new task chain beginning at this operator. This operator will
* not be chained (thread co-located for increased performance) to any
* previous tasks even if possible.
*
*/
@PublicEvolving
def startNewChain(): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.startNewChain()
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Sets the slot sharing group of this operation. Parallel instances of
* operations that are in the same slot sharing group will be co-located in the same
* TaskManager slot, if possible.
*
* Operations inherit the slot sharing group of input operations if all input operations
* are in the same slot sharing group and no slot sharing group was explicitly specified.
*
* Initially an operation is in the default slot sharing group. An operation can be put into
* the default group explicitly by setting the slot sharing group to `"default"`.
*
* @param slotSharingGroup The slot sharing group name.
*/
@PublicEvolving
def slotSharingGroup(slotSharingGroup: String): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.slotSharingGroup(slotSharingGroup)
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
/**
* Sets the maximum time frequency (ms) for the flushing of the output
* buffer. By default the output buffers flush only when they are full.
*
* @param timeoutMillis
* The maximum time between two output flushes.
* @return The operator with buffer timeout set.
*/
def setBufferTimeout(timeoutMillis: Long): DataStream[T] = {
stream match {
case ds: SingleOutputStreamOperator[T] => ds.setBufferTimeout(timeoutMillis)
case _ =>
throw new UnsupportedOperationException("Only supported for operators.")
}
this
}
// --------------------------------------------------------------------------
// Stream Transformations
// --------------------------------------------------------------------------
/**
* Creates a new DataStream by merging DataStream outputs of
* the same type with each other. The DataStreams merged using this operator
* will be transformed simultaneously.
*
*/
def union(dataStreams: DataStream[T]*): DataStream[T] =
asScalaStream(stream.union(dataStreams.map(_.javaStream): _*))
/**
* Creates a new ConnectedStreams by connecting
* DataStream outputs of different type with each other. The
* DataStreams connected using this operators can be used with CoFunctions.
*/
def connect[T2](dataStream: DataStream[T2]): ConnectedStreams[T, T2] =
asScalaStream(stream.connect(dataStream.javaStream))
/**
* Creates a new [[BroadcastConnectedStream]] by connecting the current
* [[DataStream]] or [[KeyedStream]] with a [[BroadcastStream]].
*
* The latter can be created using the [[broadcast(MapStateDescriptor[])]] method.
*
* The resulting stream can be further processed using the
* ``broadcastConnectedStream.process(myFunction)``
* method, where ``myFunction`` can be either a
* [[org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction]]
* or a [[org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction]]
* depending on the current stream being a [[KeyedStream]] or not.
*
* @param broadcastStream The broadcast stream with the broadcast state to be
* connected with this stream.
* @return The [[BroadcastConnectedStream]].
*/
@PublicEvolving
def connect[R](broadcastStream: BroadcastStream[R]): BroadcastConnectedStream[T, R] =
asScalaStream(stream.connect(broadcastStream))
/**
* Groups the elements of a DataStream by the given key positions (for tuple/array types) to
* be used with grouped operators like grouped reduce or grouped aggregations.
*/
def keyBy(fields: Int*): KeyedStream[T, JavaTuple] = asScalaStream(stream.keyBy(fields: _*))
/**
* Groups the elements of a DataStream by the given field expressions to
* be used with grouped operators like grouped reduce or grouped aggregations.
*/
def keyBy(firstField: String, otherFields: String*): KeyedStream[T, JavaTuple] =
asScalaStream(stream.keyBy(firstField +: otherFields.toArray: _*))
/**
* Groups the elements of a DataStream by the given K key to
* be used with grouped operators like grouped reduce or grouped aggregations.
*/
def keyBy[K: TypeInformation](fun: T => K): KeyedStream[T, K] = {
val cleanFun = clean(fun)
val keyType: TypeInformation[K] = implicitly[TypeInformation[K]]
val keyExtractor = new KeySelector[T, K] with ResultTypeQueryable[K] {
def getKey(in: T) = cleanFun(in)
override def getProducedType: TypeInformation[K] = keyType
}
asScalaStream(new JavaKeyedStream(stream, keyExtractor, keyType))
}
/**
* Partitions a tuple DataStream on the specified key fields using a custom partitioner.
* This method takes the key position to partition on, and a partitioner that accepts the key
* type.
*
* Note: This method works only on single field keys.
*/
def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], field: Int) : DataStream[T] =
asScalaStream(stream.partitionCustom(partitioner, field))
/**
* Partitions a POJO DataStream on the specified key fields using a custom partitioner.
* This method takes the key expression to partition on, and a partitioner that accepts the key
* type.
*
* Note: This method works only on single field keys.
*/
def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], field: String)
: DataStream[T] =
asScalaStream(stream.partitionCustom(partitioner, field))
/**
* Partitions a DataStream on the key returned by the selector, using a custom partitioner.
* This method takes the key selector to get the key to partition on, and a partitioner that
* accepts the key type.
*
* Note: This method works only on single field keys, i.e. the selector cannot return tuples
* of fields.
*/
def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], fun: T => K)
: DataStream[T] = {
val keyType = implicitly[TypeInformation[K]]
val cleanFun = clean(fun)
val keyExtractor = new KeySelector[T, K] with ResultTypeQueryable[K] {
def getKey(in: T) = cleanFun(in)
override def getProducedType(): TypeInformation[K] = keyType
}
asScalaStream(stream.partitionCustom(partitioner, keyExtractor))
}
/**
* Sets the partitioning of the DataStream so that the output tuples
* are broad casted to every parallel instance of the next component.
*/
def broadcast: DataStream[T] = asScalaStream(stream.broadcast())
/**
* Sets the partitioning of the [[DataStream]] so that the output elements
* are broadcasted to every parallel instance of the next operation. In addition,
* it implicitly creates as many
* [[org.apache.flink.api.common.state.BroadcastState broadcast states]]
* as the specified descriptors which can be used to store the element of the stream.
*
* @param broadcastStateDescriptors the descriptors of the broadcast states to create.
* @return A [[BroadcastStream]] which can be used in the
* [[DataStream.connect(BroadcastStream)]] to create a
* [[BroadcastConnectedStream]] for further processing of the elements.
*/
@PublicEvolving
def broadcast(broadcastStateDescriptors: MapStateDescriptor[_, _]*): BroadcastStream[T] = {
if (broadcastStateDescriptors == null) {
throw new NullPointerException("State Descriptors must not be null.")
}
javaStream.broadcast(broadcastStateDescriptors: _*)
}
/**
* Sets the partitioning of the DataStream so that the output values all go to
* the first instance of the next processing operator. Use this setting with care
* since it might cause a serious performance bottleneck in the application.
*/
@PublicEvolving
def global: DataStream[T] = asScalaStream(stream.global())
/**
* Sets the partitioning of the DataStream so that the output tuples
* are shuffled to the next component.
*/
@PublicEvolving
def shuffle: DataStream[T] = asScalaStream(stream.shuffle())
/**
* Sets the partitioning of the DataStream so that the output tuples
* are forwarded to the local subtask of the next component (whenever
* possible).
*/
def forward: DataStream[T] = asScalaStream(stream.forward())
/**
* Sets the partitioning of the DataStream so that the output tuples
* are distributed evenly to the next component.
*/
def rebalance: DataStream[T] = asScalaStream(stream.rebalance())
/**
* Sets the partitioning of the [[DataStream]] so that the output tuples
* are distributed evenly to a subset of instances of the downstream operation.
*
* The subset of downstream operations to which the upstream operation sends
* elements depends on the degree of parallelism of both the upstream and downstream operation.
* For example, if the upstream operation has parallelism 2 and the downstream operation
* has parallelism 4, then one upstream operation would distribute elements to two
* downstream operations while the other upstream operation would distribute to the other
* two downstream operations. If, on the other hand, the downstream operation has parallelism
* 2 while the upstream operation has parallelism 4 then two upstream operations will
* distribute to one downstream operation while the other two upstream operations will
* distribute to the other downstream operations.
*
* In cases where the different parallelisms are not multiples of each other one or several
* downstream operations will have a differing number of inputs from upstream operations.
*/
@PublicEvolving
def rescale: DataStream[T] = asScalaStream(stream.rescale())
/**
* Initiates an iterative part of the program that creates a loop by feeding
* back data streams. To create a streaming iteration the user needs to define
* a transformation that creates two DataStreams. The first one is the output
* that will be fed back to the start of the iteration and the second is the output
* stream of the iterative part.
*
* stepfunction: initialStream => (feedback, output)
*
* A common pattern is to use output splitting to create feedback and output DataStream.
* Please refer to the [[split]] method of the DataStream
*
* By default a DataStream with iteration will never terminate, but the user
* can use the maxWaitTime parameter to set a max waiting time for the iteration head.
* If no data received in the set time the stream terminates.
*
* Parallelism of the feedback stream must match the parallelism of the original stream.
* Please refer to the [[setParallelism]] method for parallelism modification
*/
@PublicEvolving
def iterate[R](stepFunction: DataStream[T] => (DataStream[T], DataStream[R]),
maxWaitTimeMillis:Long = 0) : DataStream[R] = {
val iterativeStream = stream.iterate(maxWaitTimeMillis)
val (feedback, output) = stepFunction(new DataStream[T](iterativeStream))
iterativeStream.closeWith(feedback.javaStream)
output
}
/**
* Initiates an iterative part of the program that creates a loop by feeding
* back data streams. To create a streaming iteration the user needs to define
* a transformation that creates two DataStreams. The first one is the output
* that will be fed back to the start of the iteration and the second is the output
* stream of the iterative part.
*
* The input stream of the iterate operator and the feedback stream will be treated
* as a ConnectedStreams where the input is connected with the feedback stream.
*
* This allows the user to distinguish standard input from feedback inputs.
*
* stepfunction: initialStream => (feedback, output)
*
* The user must set the max waiting time for the iteration head.
* If no data received in the set time the stream terminates. If this parameter is set
* to 0 then the iteration sources will indefinitely, so the job must be killed to stop.
*
*/
@PublicEvolving
def iterate[R, F: TypeInformation](
stepFunction: ConnectedStreams[T, F] => (DataStream[F], DataStream[R]),
maxWaitTimeMillis:Long): DataStream[R] = {
val feedbackType: TypeInformation[F] = implicitly[TypeInformation[F]]
val connectedIterativeStream = stream.iterate(maxWaitTimeMillis).
withFeedbackType(feedbackType)
val (feedback, output) = stepFunction(asScalaStream(connectedIterativeStream))
connectedIterativeStream.closeWith(feedback.javaStream)
output
}
/**
* Creates a new DataStream by applying the given function to every element of this DataStream.
*/
def map[R: TypeInformation](fun: T => R): DataStream[R] = {
if (fun == null) {
throw new NullPointerException("Map function must not be null.")
}
val cleanFun = clean(fun)
val mapper = new MapFunction[T, R] {
def map(in: T): R = cleanFun(in)
}
map(mapper)
}
/**
* Creates a new DataStream by applying the given function to every element of this DataStream.
*/
def map[R: TypeInformation](mapper: MapFunction[T, R]): DataStream[R] = {
if (mapper == null) {
throw new NullPointerException("Map function must not be null.")
}
val outType : TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(stream.map(mapper).returns(outType).asInstanceOf[JavaStream[R]])
}
/**
* Creates a new DataStream by applying the given function to every element and flattening
* the results.
*/
def flatMap[R: TypeInformation](flatMapper: FlatMapFunction[T, R]): DataStream[R] = {
if (flatMapper == null) {
throw new NullPointerException("FlatMap function must not be null.")
}
val outType : TypeInformation[R] = implicitly[TypeInformation[R]]
asScalaStream(stream.flatMap(flatMapper).returns(outType).asInstanceOf[JavaStream[R]])
}
/**
* Creates a new DataStream by applying the given function to every element and flattening
* the results.
*/
def flatMap[R: TypeInformation](fun: (T, Collector[R]) => Unit): DataStream[R] = {
if (fun == null) {
throw new NullPointerException("FlatMap function must not be null.")
}
val cleanFun = clean(fun)
val flatMapper = new FlatMapFunction[T, R] {
def flatMap(in: T, out: Collector[R]) { cleanFun(in, out) }
}
flatMap(flatMapper)
}
/**
* Creates a new DataStream by applying the given function to every element and flattening
* the results.
*/
def flatMap[R: TypeInformation](fun: T => TraversableOnce[R]): DataStream[R] = {
if (fun == null) {
throw new NullPointerException("FlatMap function must not be null.")
}
val cleanFun = clean(fun)
val flatMapper = new FlatMapFunction[T, R] {
def flatMap(in: T, out: Collector[R]) { cleanFun(in) foreach out.collect }
}
flatMap(flatMapper)
}
/**
* Applies the given [[ProcessFunction]] on the input stream, thereby
* creating a transformed output stream.
*
* The function will be called for every element in the stream and can produce
* zero or more output.
*
* @param processFunction The [[ProcessFunction]] that is called for each element
* in the stream.
*/
@PublicEvolving
def process[R: TypeInformation](
processFunction: ProcessFunction[T, R]): DataStream[R] = {
if (processFunction == null) {
throw new NullPointerException("ProcessFunction must not be null.")
}
asScalaStream(javaStream.process(processFunction, implicitly[TypeInformation[R]]))
}
/**
* Creates a new DataStream that contains only the elements satisfying the given filter predicate.
*/
def filter(filter: FilterFunction[T]): DataStream[T] = {
if (filter == null) {
throw new NullPointerException("Filter function must not be null.")
}
asScalaStream(stream.filter(filter))
}
/**
* Creates a new DataStream that contains only the elements satisfying the given filter predicate.
*/
def filter(fun: T => Boolean): DataStream[T] = {
if (fun == null) {
throw new NullPointerException("Filter function must not be null.")
}
val cleanFun = clean(fun)
val filterFun = new FilterFunction[T] {
def filter(in: T) = cleanFun(in)
}
filter(filterFun)
}
/**
* Windows this DataStream into tumbling time windows.
*
* This is a shortcut for either `.window(TumblingEventTimeWindows.of(size))` or
* `.window(TumblingProcessingTimeWindows.of(size))` depending on the time characteristic
* set using
* [[StreamExecutionEnvironment.setStreamTimeCharacteristic]].
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the window.
*/
def timeWindowAll(size: Time): AllWindowedStream[T, TimeWindow] = {
new AllWindowedStream(javaStream.timeWindowAll(size))
}
/**
* Windows this DataStream into sliding time windows.
*
* This is a shortcut for either `.window(SlidingEventTimeWindows.of(size, slide))` or
* `.window(SlidingProcessingTimeWindows.of(size, slide))` depending on the time characteristic
* set using
* [[StreamExecutionEnvironment.setStreamTimeCharacteristic]].
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the window.
*/
def timeWindowAll(size: Time, slide: Time): AllWindowedStream[T, TimeWindow] = {
new AllWindowedStream(javaStream.timeWindowAll(size, slide))
}
/**
* Windows this [[DataStream]] into sliding count windows.
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the windows in number of elements.
* @param slide The slide interval in number of elements.
*/
def countWindowAll(size: Long, slide: Long): AllWindowedStream[T, GlobalWindow] = {
new AllWindowedStream(stream.countWindowAll(size, slide))
}
/**
* Windows this [[DataStream]] into tumbling count windows.
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param size The size of the windows in number of elements.
*/
def countWindowAll(size: Long): AllWindowedStream[T, GlobalWindow] = {
new AllWindowedStream(stream.countWindowAll(size))
}
/**
* Windows this data stream to a [[AllWindowedStream]], which evaluates windows
* over a key grouped stream. Elements are put into windows by a [[WindowAssigner]]. The grouping
* of elements is done both by key and by window.
*
* A [[org.apache.flink.streaming.api.windowing.triggers.Trigger]] can be defined to specify
* when windows are evaluated. However, `WindowAssigner` have a default `Trigger`
* that is used if a `Trigger` is not specified.
*
* Note: This operation can be inherently non-parallel since all elements have to pass through
* the same operator instance. (Only for special cases, such as aligned time windows is
* it possible to perform this operation in parallel).
*
* @param assigner The `WindowAssigner` that assigns elements to windows.
* @return The trigger windows data stream.
*/
@PublicEvolving
def windowAll[W <: Window](assigner: WindowAssigner[_ >: T, W]): AllWindowedStream[T, W] = {
new AllWindowedStream[T, W](new JavaAllWindowedStream[T, W](stream, assigner))
}
/**
* Extracts a timestamp from an element and assigns it as the internal timestamp of that element.
* The internal timestamps are, for example, used to to event-time window operations.
*
* If you know that the timestamps are strictly increasing you can use an
* [[AscendingTimestampExtractor]]. Otherwise,
* you should provide a [[TimestampExtractor]] that also implements
* [[TimestampExtractor#getCurrentWatermark]] to keep track of watermarks.
*
* @see org.apache.flink.streaming.api.watermark.Watermark
*/
@deprecated
def assignTimestamps(extractor: TimestampExtractor[T]): DataStream[T] = {
asScalaStream(stream.assignTimestamps(clean(extractor)))
}
/**
* Assigns timestamps to the elements in the data stream and periodically creates
* watermarks to signal event time progress.
*
* This method creates watermarks periodically (for example every second), based
* on the watermarks indicated by the given watermark generator. Even when no new elements
* in the stream arrive, the given watermark generator will be periodically checked for
* new watermarks. The interval in which watermarks are generated is defined in
* [[org.apache.flink.api.common.ExecutionConfig#setAutoWatermarkInterval(long)]].
*
* Use this method for the common cases, where some characteristic over all elements
* should generate the watermarks, or where watermarks are simply trailing behind the
* wall clock time by a certain amount.
*
* For the second case and when the watermarks are required to lag behind the maximum
* timestamp seen so far in the elements of the stream by a fixed amount of time, and this
* amount is known in advance, use the
* [[BoundedOutOfOrdernessTimestampExtractor]].
*
* For cases where watermarks should be created in an irregular fashion, for example
* based on certain markers that some element carry, use the
* [[AssignerWithPunctuatedWatermarks]].
*
* @see AssignerWithPeriodicWatermarks
* @see AssignerWithPunctuatedWatermarks
* @see #assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks)
*/
@PublicEvolving
def assignTimestampsAndWatermarks(assigner: AssignerWithPeriodicWatermarks[T]): DataStream[T] = {
asScalaStream(stream.assignTimestampsAndWatermarks(assigner))
}
/**
* Assigns timestamps to the elements in the data stream and periodically creates
* watermarks to signal event time progress.
*
* This method creates watermarks based purely on stream elements. For each element
* that is handled via [[AssignerWithPunctuatedWatermarks#extractTimestamp(Object, long)]],
* the [[AssignerWithPunctuatedWatermarks#checkAndGetNextWatermark()]] method is called,
* and a new watermark is emitted, if the returned watermark value is larger than the previous
* watermark.
*
* This method is useful when the data stream embeds watermark elements, or certain elements
* carry a marker that can be used to determine the current event time watermark.
* This operation gives the programmer full control over the watermark generation. Users
* should be aware that too aggressive watermark generation (i.e., generating hundreds of
* watermarks every second) can cost some performance.
*
* For cases where watermarks should be created in a regular fashion, for example
* every x milliseconds, use the [[AssignerWithPeriodicWatermarks]].
*
* @see AssignerWithPunctuatedWatermarks
* @see AssignerWithPeriodicWatermarks
* @see #assignTimestampsAndWatermarks(AssignerWithPeriodicWatermarks)
*/
@PublicEvolving
def assignTimestampsAndWatermarks(assigner: AssignerWithPunctuatedWatermarks[T])
: DataStream[T] = {
asScalaStream(stream.assignTimestampsAndWatermarks(assigner))
}
/**
* Assigns timestamps to the elements in the data stream and periodically creates
* watermarks to signal event time progress.
*
* This method is a shortcut for data streams where the element timestamp are known
* to be monotonously ascending within each parallel stream.
* In that case, the system can generate watermarks automatically and perfectly
* by tracking the ascending timestamps.
*
* For cases where the timestamps are not monotonously increasing, use the more
* general methods [[assignTimestampsAndWatermarks(AssignerWithPeriodicWatermarks)]]
* and [[assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks)]].
*/
@PublicEvolving
def assignAscendingTimestamps(extractor: T => Long): DataStream[T] = {
val cleanExtractor = clean(extractor)
val extractorFunction = new AscendingTimestampExtractor[T] {
def extractAscendingTimestamp(element: T): Long = {
cleanExtractor(element)
}
}
asScalaStream(stream.assignTimestampsAndWatermarks(extractorFunction))
}
/**
*
* Operator used for directing tuples to specific named outputs using an
* OutputSelector. Calling this method on an operator creates a new
* [[SplitStream]].
*/
def split(selector: OutputSelector[T]): SplitStream[T] = asScalaStream(stream.split(selector))
/**
* Creates a new [[SplitStream]] that contains only the elements satisfying the
* given output selector predicate.
*/
def split(fun: T => TraversableOnce[String]): SplitStream[T] = {
if (fun == null) {
throw new NullPointerException("OutputSelector must not be null.")
}
val cleanFun = clean(fun)
val selector = new OutputSelector[T] {
def select(in: T): java.lang.Iterable[String] = {
cleanFun(in).toIterable.asJava
}
}
split(selector)
}
/**
* Creates a co-group operation. See [[CoGroupedStreams]] for an example of how the keys
* and window can be specified.
*/
def coGroup[T2](otherStream: DataStream[T2]): CoGroupedStreams[T, T2] = {
new CoGroupedStreams(this, otherStream)
}
/**
* Creates a join operation. See [[JoinedStreams]] for an example of how the keys
* and window can be specified.
*/
def join[T2](otherStream: DataStream[T2]): JoinedStreams[T, T2] = {
new JoinedStreams(this, otherStream)
}
/**
* Writes a DataStream to the standard output stream (stdout). For each
* element of the DataStream the result of .toString is
* written.
*
*/
@PublicEvolving
def print(): DataStreamSink[T] = stream.print()
/**
* Writes a DataStream to the standard output stream (stderr).
*
* For each element of the DataStream the result of
* [[AnyRef.toString()]] is written.
*
* @return The closed DataStream.
*/
@PublicEvolving
def printToErr() = stream.printToErr()
/**
* Writes a DataStream to the file specified by path in text format. For
* every element of the DataStream the result of .toString is written.
*
* @param path The path pointing to the location the text file is written to
* @return The closed DataStream
*/
@PublicEvolving
def writeAsText(path: String): DataStreamSink[T] =
stream.writeAsText(path)
/**
* Writes a DataStream to the file specified by path in text format. For
* every element of the DataStream the result of .toString is written.
*
* @param path The path pointing to the location the text file is written to
* @param writeMode Controls the behavior for existing files. Options are NO_OVERWRITE and
* OVERWRITE.
* @return The closed DataStream
*/
@PublicEvolving
def writeAsText(path: String, writeMode: FileSystem.WriteMode): DataStreamSink[T] = {
if (writeMode != null) {
stream.writeAsText(path, writeMode)
} else {
stream.writeAsText(path)
}
}
/**
* Writes the DataStream in CSV format to the file specified by the path parameter. The writing
* is performed periodically every millis milliseconds.
*
* @param path Path to the location of the CSV file
* @return The closed DataStream
*/
@PublicEvolving
def writeAsCsv(path: String): DataStreamSink[T] = {
writeAsCsv(
path,
null,
ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER,
ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER)
}
/**
* Writes the DataStream in CSV format to the file specified by the path parameter. The writing
* is performed periodically every millis milliseconds.
*
* @param path Path to the location of the CSV file
* @param writeMode Controls whether an existing file is overwritten or not
* @return The closed DataStream
*/
@PublicEvolving
def writeAsCsv(path: String, writeMode: FileSystem.WriteMode): DataStreamSink[T] = {
writeAsCsv(
path,
writeMode,
ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER,
ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER)
}
/**
* Writes the DataStream in CSV format to the file specified by the path parameter. The writing
* is performed periodically every millis milliseconds.
*
* @param path Path to the location of the CSV file
* @param writeMode Controls whether an existing file is overwritten or not
* @param rowDelimiter Delimiter for consecutive rows
* @param fieldDelimiter Delimiter for consecutive fields
* @return The closed DataStream
*/
@PublicEvolving
def writeAsCsv(
path: String,
writeMode: FileSystem.WriteMode,
rowDelimiter: String,
fieldDelimiter: String)
: DataStreamSink[T] = {
require(stream.getType.isTupleType, "CSV output can only be used with Tuple DataSets.")
val of = new ScalaCsvOutputFormat[Product](new Path(path), rowDelimiter, fieldDelimiter)
if (writeMode != null) {
of.setWriteMode(writeMode)
}
stream.writeUsingOutputFormat(of.asInstanceOf[OutputFormat[T]])
}
/**
* Writes a DataStream using the given [[OutputFormat]].
*/
@PublicEvolving
def writeUsingOutputFormat(format: OutputFormat[T]): DataStreamSink[T] = {
stream.writeUsingOutputFormat(format)
}
/**
* Writes the DataStream to a socket as a byte array. The format of the output is
* specified by a [[SerializationSchema]].
*/
@PublicEvolving
def writeToSocket(
hostname: String,
port: Integer,
schema: SerializationSchema[T]): DataStreamSink[T] = {
stream.writeToSocket(hostname, port, schema)
}
/**
* Adds the given sink to this DataStream. Only streams with sinks added
* will be executed once the StreamExecutionEnvironment.execute(...)
* method is called.
*
*/
def addSink(sinkFunction: SinkFunction[T]): DataStreamSink[T] =
stream.addSink(sinkFunction)
/**
* Adds the given sink to this DataStream. Only streams with sinks added
* will be executed once the StreamExecutionEnvironment.execute(...)
* method is called.
*
*/
def addSink(fun: T => Unit): DataStreamSink[T] = {
if (fun == null) {
throw new NullPointerException("Sink function must not be null.")
}
val cleanFun = clean(fun)
val sinkFunction = new SinkFunction[T] {
def invoke(in: T) = cleanFun(in)
}
this.addSink(sinkFunction)
}
/**
* Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning
* is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]].
*/
private[flink] def clean[F <: AnyRef](f: F): F = {
new StreamExecutionEnvironment(stream.getExecutionEnvironment).scalaClean(f)
}
/**
* Transforms the [[DataStream]] by using a custom [[OneInputStreamOperator]].
*
* @param operatorName name of the operator, for logging purposes
* @param operator the object containing the transformation logic
* @tparam R the type of elements emitted by the operator
*/
@PublicEvolving
def transform[R: TypeInformation](
operatorName: String,
operator: OneInputStreamOperator[T, R]): DataStream[R] = {
asScalaStream(stream.transform(operatorName, implicitly[TypeInformation[R]], operator))
}
}
| zhangminglei/flink | flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala | Scala | apache-2.0 | 42,987 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util.function
@FunctionalInterface
trait IntSupplier {
def getAsInt(): Int
}
| scala-js/scala-js | javalib/src/main/scala/java/util/function/IntSupplier.scala | Scala | apache-2.0 | 367 |
package org.scalatra
package auth
import servlet.ServletBase
import javax.servlet.http.HttpSession
object ScentryAuthStore {
trait ScentryAuthStore {
def get: String
def set(value: String)
def invalidate
}
class HttpOnlyCookieAuthStore(app: => (ServletBase with CookieSupport), secureOnly: Boolean = false)
extends CookieAuthStore(app.cookies, secureOnly) {
private val SET_COOKIE = "Set-Cookie".intern
override def set(value: String) {
//TODO: Make use of servlet 3.0 cookie implementation
app.response.addHeader(
SET_COOKIE,
Cookie(Scentry.scentryAuthKey, value)(CookieOptions(secure = secureOnly, httpOnly = true)).toCookieString
)
}
}
class CookieAuthStore(cookies: => SweetCookies, secureOnly: Boolean = false) extends ScentryAuthStore {
def get: String = {
cookies.get(Scentry.scentryAuthKey) getOrElse ""
}
def set(value: String) {
cookies.set(Scentry.scentryAuthKey, value)(CookieOptions(secure = secureOnly))
}
def invalidate {
cookies -= Scentry.scentryAuthKey
}
}
class SessionAuthStore(session: => HttpSession) extends ScentryAuthStore{
def get: String = {
session.getAttribute(Scentry.scentryAuthKey).asInstanceOf[String]
}
def set(value: String) {
session.setAttribute(Scentry.scentryAuthKey, value)
}
def invalidate {
session.invalidate()
}
}
}
| louk/scalatra | auth/src/main/scala/org/scalatra/auth/ScentryAuthStore.scala | Scala | bsd-2-clause | 1,437 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.util
import java.io._
import java.util.zip.{ ZipEntry, ZipInputStream }
import java.net.URL
import scala.util.parsing.input.{ Position, OffsetPosition }
import scala.language.implicitConversions
object IOUtil {
val log = Log(getClass); import log._
class InvalidDirectiveException(directive: String, pos: Position) extends RuntimeException(directive + " at " + pos, null)
/**
* Allows a File to be converted to a FileResource which also provides a Rich API for files
*/
implicit def toResource(file: File) = new FileResource(file, file.getPath)
implicit def toFile(resource: FileResource): File = resource.asFile
/**
* Creates any parent directories of the given path if they do not exist
*/
def makeParentDirs(fileName: String): Unit = makeParentDirs(new File(fileName))
/**
* Creates any parent directories of the given directory if they do not exist
*/
def makeParentDirs(file: File): Unit = {
val parent = file.getParentFile
if (parent != null) {
parent.mkdirs
}
}
/**
* Recursively deletes a file and all of it's children files if it's a directory.
*/
def recursiveDelete(file: File): Boolean = {
if (file.isDirectory) {
val children = file.listFiles
if (children != null) {
for (child <- children) {
recursiveDelete(child)
}
}
}
return file.delete
}
val includeRegEx = """@@include\\(\\"(.+)\\"\\)""".r
/**
* TODO: maybe we want other precompile directives at some point,
* so this may need to be made more flexible
*/
def mergeIncludes(sourceCode: String, encoding: String = "UTF-8"): String = {
val matches = includeRegEx.findAllIn(sourceCode)
if (!matches.hasNext) sourceCode
else {
matches.foldLeft(sourceCode) { (result, include) =>
val includeSource: String = try {
val includeRegEx(fileName) = include
loadTextFile(new java.io.File(fileName), encoding)
} catch {
case m: MatchError =>
throw new InvalidDirectiveException("include", OffsetPosition(include, 0))
case n: FileNotFoundException => throw n
}
result.replace(include, includeSource)
}
}
}
def loadText(in: InputStream, encoding: String = "UTF-8"): String = {
val sourceCode = new String(loadBytes(in), encoding)
mergeIncludes(sourceCode, encoding)
}
def loadTextFile(path: File, encoding: String = "UTF-8") = {
val sourceCode = new String(loadBinaryFile(path), encoding)
mergeIncludes(sourceCode, encoding)
}
def loadBinaryFile(path: File): Array[Byte] = {
val baos = new ByteArrayOutputStream
val in = new FileInputStream(path)
try {
copy(in, baos)
} finally {
in.close
}
baos.toByteArray
}
def loadBytes(in: InputStream): Array[Byte] = {
val baos = new ByteArrayOutputStream
try {
copy(in, baos)
} finally {
in.close
}
baos.toByteArray
}
def writeText(path: String, text: String): Unit = writeText(new File(path), text)
def writeText(path: File, text: String): Unit = writeText(new FileWriter(path), text)
def writeText(stream: OutputStream, text: String): Unit = writeText(new OutputStreamWriter(stream), text)
def writeText(out: Writer, text: String): Unit = {
try {
out.write(text)
} finally {
out.close
}
}
def writeBinaryFile(path: String, contents: Array[Byte]): Unit = writeBinaryFile(new File(path), contents)
def writeBinaryFile(path: File, contents: Array[Byte]): Unit = {
val out = new FileOutputStream(path)
try {
out.write(contents)
} finally {
out.close
}
}
def copy(in: File, out: File): Long = {
out.getParentFile.mkdirs
copy(new FileInputStream(in), new FileOutputStream(out))
}
def copy(file: File, out: OutputStream): Long = copy(new BufferedInputStream(new FileInputStream(file)), out)
def copy(in: InputStream, file: File): Long = {
val out = new FileOutputStream(file)
try {
copy(in, out)
} finally {
out.close
}
}
def copy(url: URL, file: File): Long = {
val in = url.openStream
try {
copy(in, file)
} finally {
in.close
}
}
// For ARM
def using[R, C <: Closeable](c: C)(func: (C) => R): R = {
try {
func(c)
} finally {
try {
c.close
} catch {
case _: Exception => // ignore
}
}
}
def copy(in: InputStream, out: OutputStream): Long = {
var bytesCopied: Long = 0
val buffer = new Array[Byte](8192)
var bytes = in.read(buffer)
while (bytes >= 0) {
out.write(buffer, 0, bytes)
bytesCopied += bytes
bytes = in.read(buffer)
}
bytesCopied
}
def copy(in: Reader, out: Writer): Long = {
var charsCopied: Long = 0
val buffer = new Array[Char](8192)
var chars = in.read(buffer)
while (chars >= 0) {
out.write(buffer, 0, chars)
charsCopied += chars
chars = in.read(buffer)
}
charsCopied
}
/**
* Unjars the given stream for entries which match the optional filter to the given directory
*/
def unjar(outputDir: File, input: InputStream, filter: ZipEntry => Boolean = allZipEntries): Unit = {
val zip = new ZipInputStream(input)
try {
val buffer = new Array[Byte](64 * 1024)
var ok = true
while (ok) {
val entry = zip.getNextEntry
if (entry == null) {
ok = false
} else {
val name = entry.getName
if (!entry.isDirectory && filter(entry)) {
debug("processing resource: %s", name)
val file = new File(outputDir.getCanonicalPath + "/" + name)
file.getParentFile.mkdirs
val bos = new FileOutputStream(file)
try {
var bytes = 1
while (bytes > 0) {
bytes = zip.read(buffer)
if (bytes > 0) {
bos.write(buffer, 0, bytes)
}
}
} finally {
bos.close
}
}
zip.closeEntry
}
}
} finally {
zip.close
}
}
/**
* Recursively deletes the directory and all its children which match the optional filter
*/
def recursiveDelete(file: File, filter: File => Boolean = allFiles): Unit = {
if (file.exists) {
if (file.isDirectory) {
for (c <- file.listFiles) {
recursiveDelete(c)
}
}
if (filter(file)) {
file.delete
}
}
}
protected def allZipEntries(entry: ZipEntry): Boolean = true
protected def allFiles(file: File): Boolean = true
}
| maslovalex/scalate | scalate-util/src/main/scala/org/fusesource/scalate/util/IOUtil.scala | Scala | apache-2.0 | 7,472 |
trait I {
def f = {}
}
trait T extends I {
abstract override def f = {}
}
class C extends T {
override def f = {}
println(/* line: 10 */f)
println(this./* line: 10 */f)
println(super./* line: 6 */f)
} | ilinum/intellij-scala | testdata/resolve2/inheritance/trait/ClashAbstractOverrideExtends2.scala | Scala | apache-2.0 | 215 |
/*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.vertx.scala.core.file
import org.vertx.java.core.file.{ FileProps => JFileProps }
import java.util.Date
/**
* Represents properties of a file on the file system<p>
* Instances of FileProps are thread-safe<p>
*
* @author <a href="http://tfox.org">Tim Fox</a>
* @author <a href="http://www.campudus.com/">Joern Bernhardt</a>
* @author Galder Zamarreño
*/
final class FileProps private[scala] (val asJava: JFileProps) extends AnyVal {
/**
* The date the file was created
*/
def creationTime(): Date = asJava.creationTime()
/**
* The date the file was last accessed
*/
def lastAccessTime(): Date = asJava.lastAccessTime()
/**
* The date the file was last modified
*/
def lastModifiedTime(): Date = asJava.lastModifiedTime()
/**
* Is the file a directory?
*/
def isDirectory: Boolean = asJava.isDirectory
/**
* Is the file some other type? (I.e. not a directory, regular file or symbolic link)
*/
def isOther: Boolean = asJava.isOther
/**
* Is the file a regular file?
*/
def isRegularFile: Boolean = asJava.isRegularFile
/**
* Is the file a symbolic link?
*/
def isSymbolicLink: Boolean = asJava.isSymbolicLink
/**
* The size of the file, in bytes
*/
def size(): Long = asJava.size()
}
/** Factory for [[org.vertx.scala.core.file.FileProps]] instances. */
object FileProps {
def apply(internal: JFileProps) = new FileProps(internal)
}
| vert-x/mod-lang-scala | src/main/scala/org/vertx/scala/core/file/FileProps.scala | Scala | apache-2.0 | 2,059 |
import leon.lang._
import leon.annotation._
import leon.lang.synthesis._
import leon.collection._
object Test {
case class Pos(x: Int, y: Int) {
def up = Pos(x, y-1)
def down = Pos(x, y+1)
def left = Pos(x-1, y)
def right = Pos(x+1, y)
// Simple over aproximation of the distance
def distance(o: Pos) = {
val dx = o.x-x
val dy = o.y-y
(dx*dx + dy*dy)
}
def update(a: Action) = a match {
case MoveUp =>
up
case MoveDown =>
down
case MoveLeft =>
left
case MoveRight =>
right
case Noop =>
this
}
def isAtAny(lp: List[Pos]): Boolean = lp match {
case Cons(h,t) => (h == this) || isAtAny(t)
case _ => false
}
}
case class Map(walls: List[Pos], size: Pos) {
// Returns whether a certain position is a valid position to be in given
// the map
def isValidPos(p: Pos): Boolean = {
p.x >= 0 && p.y >= 0 &&
p.x < size.x && p.y < size.y &&
!p.isAtAny(walls)
}
def allValidPos(lp: List[Pos]): Boolean = lp match {
case Cons(h, t) => isValidPos(h) && allValidPos(t)
case Nil() => true
}
}
abstract class Action;
case object MoveUp extends Action
case object MoveDown extends Action
case object MoveLeft extends Action
case object MoveRight extends Action
case object Noop extends Action
case class Game(player: Pos,
monsters: List[Pos],
map: Map) {
def isValidAction(pos: Pos, action: Action) = {
val np = pos.update(action)
map.isValidPos(np)
}
def isDead = {
player.isAtAny(monsters)
}
def isValid = {
map.isValidPos(player) &&
map.allValidPos(monsters)
}
def isRunning = !isDead
}
def step(g: Game)(implicit o: Oracle[Action]): Game = {
if (g.isDead) {
g
} else {
val g1 = stepPlayer(g)
Game(g1.player, stepMonsters(g1, g1.monsters), g1.map)
}
}
def steps(g: Game, b: Int)(implicit o: Oracle[Action]): Game = {
if (b <= 0 || g.isDead) {
g
} else {
steps(step(g)(o.left), b-1)(o.right)
}
}
def gameStep(g: Game)(implicit o: Oracle[Action]): Game = {
val u = display(g)
//withOracle { implicit o: Oracle[Action] => {
steps(g, 1)
//step(step(g))
// step(step(g)(o.left))(o.right)
//} ensuring { g => !g.isDead }}
}
def play(g: Game)(implicit o: Oracle[Action]): Game = {
val r = gameStep(g)(o.left)
if (r.isDead) {
r
} else {
play(r)(o.left)
}
}
def stepMonster(g: Game, oldPos: Pos): Pos = {
val a = choose { a: Action =>
bestTowards(g, oldPos, a, g.player)
}
oldPos.update(a)
}
def bestTowards(g: Game, old: Pos, action: Action, target: Pos): Boolean = {
def metric(a: Action): Int = {
old.update(a).distance(target)
}
def betterThan(a: Action, other: Action): Boolean = {
!g.isValidAction(old, other) || (metric(a) <= metric(other))
}
g.isValidAction(old, action) &&
betterThan(action, MoveUp) &&
betterThan(action, MoveDown) &&
betterThan(action, MoveLeft) &&
betterThan(action, MoveRight) &&
betterThan(action, Noop)
}
def stepMonsters(g: Game, lp: List[Pos]): List[Pos] = lp match {
case Cons(h,t) => Cons(stepMonster(g, h), stepMonsters(g, t))
case Nil() => Nil()
}
def stepPlayer(g: Game)(implicit o: Oracle[Action]) = {
val action: Action = ???
val np = g.player.update(action)
Game(if (g.map.isValidPos(np)) np else g.player, g.monsters, g.map)
}
def stepPlayerTmp(g: Game) = {
val np = withOracle{ implicit o: Oracle[Action] => {
val action: Action = ???
g.player.update(action)
} ensuring { g.map.isValidPos(_) }}
Game(if (g.map.isValidPos(np)) np else g.player, g.monsters, g.map)
}
@extern
def display(g: Game): Int = {
print("\\033[2J\\033[1;1H")
print(" ")
for (x <- 0 until g.map.size.x) {
print(x)
}
println
print(" ╔")
for (x <- 0 until g.map.size.x) {
print('═')
}
println('╗')
for (y <- 0 until g.map.size.y) {
print(y+" ║")
for (x <- 0 until g.map.size.x) {
val c = Pos(x,y)
if (c.isAtAny(g.map.walls)) {
print('▒')
} else if (g.player == c) {
if (g.isDead) {
print(Console.RED+Console.BOLD+"☠"+Console.RESET)
} else {
print(Console.GREEN+"☺"+Console.RESET)
}
} else if (c.isAtAny(g.monsters)) {
print(Console.RED+"X"+Console.RESET)
} else {
print(" ")
}
}
println('║')
}
print(" ╚")
for (x <- 0 until g.map.size.x) {
print('═')
}
println('╝')
42
}
@ignore
def foreach[A](l: List[A], f: A => Unit): Unit = l match {
case Cons(h, t) => f(h); foreach(t, f)
case Nil() =>
}
@extern
abstract class OracleSource[T] extends Oracle[T] {
def branch: OracleSource[T]
def value: T
lazy val v: T = value
lazy val l: OracleSource[T] = branch
lazy val r: OracleSource[T] = branch
override def head: T = v
override def left: Oracle[T] = l
override def right: Oracle[T] = r
}
@extern
class Keyboard extends OracleSource[Action] {
def branch = new Keyboard
def value = {
import scala.tools.jline._
var askAgain = false
var action: Action = Noop
val t = new UnixTerminal()
try {
t.init()
do {
if (askAgain) println("?")
askAgain = false
t.readVirtualKey(System.in) match {
case 16 =>
action = MoveUp
case 14 =>
action = MoveDown
case 6 =>
action = MoveRight
case 2 =>
action = MoveLeft
case a =>
println("Got "+a)
askAgain = true
}
} while(askAgain)
} finally {
t.restore()
}
action
}
}
@extern
class Random extends OracleSource[Action] {
def value = {
scala.util.Random.nextInt(4) match {
case 0 =>
MoveUp
case 1 =>
MoveDown
case 2 =>
MoveLeft
case 3 =>
MoveRight
case _ =>
MoveUp
}
}
def branch = new Random
}
@extern
def getOracle(): Oracle[Action] = {
new Keyboard
}
@extern
def pause(): Unit = {
readLine
}
def start() = {
val map = Map(Cons(Pos(2,2), Cons(Pos(2,3), Nil())), Pos(10,10))
val monsters = Cons(Pos(8,5), Cons(Pos(6,2), Nil()))
val init = Game(Pos(0,0), monsters, map)
val res = play(init)(getOracle())
val tmp = display(res)
res
}
}
| epfl-lara/leon | testcases/extern/GameNoSet.scala | Scala | gpl-3.0 | 6,857 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.internals.operators
import monifu.concurrent.cancelables.BooleanCancelable
import monifu.reactive.Ack.{Cancel, Continue}
import monifu.reactive.observers.SynchronousSubscriber
import monifu.reactive.{Ack, Observable, Observer, Subscriber}
import scala.concurrent.Future
import scala.concurrent.duration._
private[reactive] object sample {
/**
* Implementation for `Observable.sample(initialDelay, delay)`.
*/
def once[T](source: Observable[T], initialDelay: FiniteDuration, delay: FiniteDuration): Observable[T] =
once(source, Observable.intervalAtFixedRate(initialDelay, delay))
/**
* Implementation for `Observable.sample(sampler)`.
*/
def once[T,U](source: Observable[T], sampler: Observable[U]): Observable[T] =
Observable.create { subscriber =>
source.onSubscribe(new SampleObserver(
subscriber, sampler, shouldRepeatOnSilence = false))
}
/**
* Implementation for `Observable.sampleRepeated(sampler)`.
*/
def repeated[T,U](source: Observable[T], sampler: Observable[U]): Observable[T] =
Observable.create { subscriber =>
source.onSubscribe(new SampleObserver(
subscriber, sampler, shouldRepeatOnSilence = true))
}
/**
* Implementation for `Observable.sampleRepeated(initialDelay, delay)`.
*/
def repeated[T](source: Observable[T], initialDelay: FiniteDuration, delay: FiniteDuration): Observable[T] =
repeated(source, Observable.intervalAtFixedRate(initialDelay, delay))
private class SampleObserver[T,U]
(downstream: Subscriber[T], sampler: Observable[U], shouldRepeatOnSilence: Boolean)
extends SynchronousSubscriber[T] {
implicit val scheduler = downstream.scheduler
@volatile private[this] var hasValue = false
// MUST BE written before `hasValue = true`
private[this] var lastValue: T = _
// to be written in onComplete/onError, to be read from tick
@volatile private[this] var upstreamIsDone = false
// MUST BE written to before `upstreamIsDone = true`
private[this] var upstreamError: Throwable = null
// MUST BE canceled by the sampler
private[this] val downstreamConnection = BooleanCancelable()
def onNext(elem: T): Ack =
if (downstreamConnection.isCanceled) Cancel else {
lastValue = elem
hasValue = true
Continue
}
def onError(ex: Throwable): Unit = {
upstreamError = ex
upstreamIsDone = true
}
def onComplete(): Unit = {
upstreamIsDone = true
}
sampler.onSubscribe(new Observer[U] {
private[this] var samplerIsDone = false
def onNext(elem: U): Future[Ack] = {
if (samplerIsDone) Cancel else {
if (upstreamIsDone)
signalComplete(upstreamError)
else if (!hasValue)
Continue
else {
hasValue = shouldRepeatOnSilence
val ack = downstream.onNext(lastValue)
notifyUpstreamOnCancel(ack, downstreamConnection)
ack
}
}
}
def onError(ex: Throwable): Unit = {
signalComplete(ex)
}
def onComplete(): Unit = {
signalComplete()
}
private def signalComplete(ex: Throwable = null): Cancel = {
if (!samplerIsDone) {
samplerIsDone = true
if (ex != null) downstream.onError(ex) else
downstream.onComplete()
}
Cancel
}
private def notifyUpstreamOnCancel(ack: Future[Ack], c: BooleanCancelable): Unit = {
if (ack.isCompleted) {
if (ack != Continue && ack.value.get != Continue.IsSuccess)
c.cancel()
}
else ack.onComplete {
case Continue.IsSuccess => ()
case _ => c.cancel()
}
}
})
}
}
| sergius/monifu | monifu/shared/src/main/scala/monifu/reactive/internals/operators/sample.scala | Scala | apache-2.0 | 4,472 |
package support
import scala.io.Source
import scala.reflect.runtime.universe._
import scala.tools.reflect.ToolBox
object TreeTraversals {
def raw(tree: Tree) = showRaw(tree)
def asTree(file: String): Tree = {
// Get a toolbox to create Scala trees.
val toolbox = runtimeMirror(getClass.getClassLoader).mkToolBox()
// Remove package declarations - the toolbox parser does not like them.
val regexp = "package".r
val source = regexp.replaceAllIn(Source.fromFile(file).getLines.mkString("\\n"), "")
// Parse into a tree.
toolbox.parse(source)
}
def getMethods(file: String): List[DefDef] =
getMethods(asTree(file))
def getMethods(tree: Tree): List[DefDef] = {
val traverser = new MethodTraverser
traverser.traverse(tree)
traverser.defdefs
}
def getMethod(tree: Tree, name: String): Option[DefDef] = {
getMethods(tree).find {
case DefDef(_, TermName(n), _, _, _, _) if n == name => true
case _ => false
}
}
def getMethod(file: String, name: String): Option[DefDef] =
getMethod(asTree(file), name)
def getVals(file: String): List[ValDef] =
getVals(asTree(file))
def getVals(tree: Tree): List[ValDef] = {
val traverser = new ValTraverser
traverser.traverse(tree)
traverser.valdefs
}
def getVal(tree: Tree, name: String): Option[ValDef] = {
getVals(tree).find {
case ValDef(_, TermName(n), _, _) if n == name => true
case _ => false
}
}
def getVars(tree: Tree): List[ValDef] = {
for {
valdef <- getVals(tree)
vardef = valdef if valdef.mods.hasFlag(Flag.MUTABLE)
} yield vardef
}
def getVars(file: String): List[ValDef] =
getVars(asTree(file))
def getTerms(file: String): List[TermName] =
getTerms(asTree(file))
def getTerms(tree: Tree): List[TermName] = {
val traverser = new MethodCallTraverser
traverser.traverse(tree)
traverser.calls
}
def getIfs(tree: Tree): List[If] = {
val traverser = new IfExprTraverser
traverser.traverse(tree)
traverser.ifs
}
def getIfs(file: String): List[If] =
getIfs(asTree(file))
def hasVarDef(tree: Tree): Boolean =
getVars(tree).size != 0
def hasValDef(tree: Tree): Boolean = {
getVals(tree).size != 0
}
def hasWhileLoop(tree: Tree): Boolean = {
val traverser = new MethodCallTraverser
traverser.traverse(tree)
traverser.calls.exists {
case TermName(s) if s.matches(".*while.*|.*doWhile.*") => true
case _ => false
}
}
def hasForEach(tree: Tree): Boolean = {
val traverser = new MethodCallTraverser
traverser.traverse(tree)
traverser.calls.exists {
case TermName(s) if s.matches("foreach") => true
case _ => false
}
}
def hasForComp(tree: Tree): Boolean = {
val traverser = new MethodCallTraverser
traverser.traverse(tree)
traverser.calls.exists {
case TermName(s) if s.matches("map") => true
case _ => false
}
}
def hasLoops(tree: Tree): Boolean =
hasWhileLoop(tree) &&
hasForEach(tree) &&
hasForComp(tree)
def hasIfs(tree: Tree): Boolean =
getIfs(tree).nonEmpty
def doesNotHaveIfs(tree: Tree): Boolean =
getIfs(tree).isEmpty
class MethodTraverser extends Traverser {
var defdefs = List[DefDef]()
override def traverse(tree: Tree): Unit = tree match {
case defdef @ DefDef(_, _, _, _, _, rhs) =>
defdefs = defdef :: defdefs
super.traverse(tree)
case _ => super.traverse(tree)
}
}
class ValTraverser extends Traverser {
var valdefs = List[ValDef]()
override def traverse(tree: Tree): Unit = tree match {
case valdef @ ValDef(mods, _, _, _) =>
if (!mods.hasFlag(Flag.PARAM))
valdefs = valdef :: valdefs
super.traverse(tree)
case _ => super.traverse(tree)
}
}
class MethodCallTraverser extends Traverser {
var calls = List[TermName]()
override def traverse(tree: Tree): Unit = tree match {
case Select(obj, name @ TermName(_)) =>
calls = name :: calls
super.traverse(tree)
case LabelDef(name, _, _) =>
calls = name :: calls
super.traverse(tree)
case _ => super.traverse(tree)
}
}
class IfExprTraverser extends Traverser {
var ifs = List[If]()
override def traverse(tree: Tree): Unit = tree match {
case ife @ If(c, ifb, elb) =>
ifs = ife :: ifs
super.traverse(tree)
case _ => super.traverse(tree)
}
}
}
| BBK-PiJ-2015-67/sdp-portfolio | exercises/week03/src/test/scala/support/TreeTraversals.scala | Scala | unlicense | 4,504 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import java.io.File
import java.nio.file.Files
import java.util
import java.util.Properties
import kafka.server.KafkaConfig
import kafka.utils.Exit
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs
import org.apache.kafka.common.config.types.Password
import org.apache.kafka.common.internals.FatalExitError
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import org.junit.jupiter.api.Assertions._
import scala.jdk.CollectionConverters._
class KafkaTest {
@BeforeEach
def setUp(): Unit = Exit.setExitProcedure((status, _) => throw new FatalExitError(status))
@AfterEach
def tearDown(): Unit = Exit.resetExitProcedure()
@Test
def testGetKafkaConfigFromArgs(): Unit = {
val propertiesFile = prepareDefaultConfig()
// We should load configuration file without any arguments
val config1 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile)))
assertEquals(1, config1.brokerId)
// We should be able to override given property on command line
val config2 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "broker.id=2")))
assertEquals(2, config2.brokerId)
// We should be also able to set completely new property
val config3 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact")))
assertEquals(1, config3.brokerId)
assertEquals(util.Arrays.asList("compact"), config3.logCleanupPolicy)
// We should be also able to set several properties
val config4 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact,delete", "--override", "broker.id=2")))
assertEquals(2, config4.brokerId)
assertEquals(util.Arrays.asList("compact","delete"), config4.logCleanupPolicy)
}
@Test
def testGetKafkaConfigFromArgsNonArgsAtTheEnd(): Unit = {
val propertiesFile = prepareDefaultConfig()
assertThrows(classOf[FatalExitError], () => KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "broker.id=1", "broker.id=2"))))
}
@Test
def testGetKafkaConfigFromArgsNonArgsOnly(): Unit = {
val propertiesFile = prepareDefaultConfig()
assertThrows(classOf[FatalExitError], () => KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "broker.id=1", "broker.id=2"))))
}
@Test
def testGetKafkaConfigFromArgsNonArgsAtTheBegging(): Unit = {
val propertiesFile = prepareDefaultConfig()
assertThrows(classOf[FatalExitError], () => KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "broker.id=1", "--override", "broker.id=2"))))
}
@Test
def testBrokerRoleNodeIdValidation(): Unit = {
// Ensure that validation is happening at startup to check that brokers do not use their node.id as a voter in controller.quorum.voters
val propertiesFile = new Properties
propertiesFile.setProperty(KafkaConfig.ProcessRolesProp, "broker")
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "1")
propertiesFile.setProperty(KafkaConfig.QuorumVotersProp, "1@localhost:9092")
setListenerProps(propertiesFile)
assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(propertiesFile))
// Ensure that with a valid config no exception is thrown
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "2")
KafkaConfig.fromProps(propertiesFile)
}
@Test
def testControllerRoleNodeIdValidation(): Unit = {
// Ensure that validation is happening at startup to check that controllers use their node.id as a voter in controller.quorum.voters
val propertiesFile = new Properties
propertiesFile.setProperty(KafkaConfig.ProcessRolesProp, "controller")
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "1")
propertiesFile.setProperty(KafkaConfig.QuorumVotersProp, "2@localhost:9092")
setListenerProps(propertiesFile)
assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(propertiesFile))
// Ensure that with a valid config no exception is thrown
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "2")
KafkaConfig.fromProps(propertiesFile)
}
@Test
def testColocatedRoleNodeIdValidation(): Unit = {
// Ensure that validation is happening at startup to check that colocated processes use their node.id as a voter in controller.quorum.voters
val propertiesFile = new Properties
propertiesFile.setProperty(KafkaConfig.ProcessRolesProp, "controller,broker")
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "1")
propertiesFile.setProperty(KafkaConfig.QuorumVotersProp, "2@localhost:9092")
setListenerProps(propertiesFile)
assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(propertiesFile))
// Ensure that with a valid config no exception is thrown
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "2")
KafkaConfig.fromProps(propertiesFile)
}
@Test
def testMustContainQuorumVotersIfUsingProcessRoles(): Unit = {
// Ensure that validation is happening at startup to check that if process.roles is set controller.quorum.voters is not empty
val propertiesFile = new Properties
propertiesFile.setProperty(KafkaConfig.ProcessRolesProp, "controller,broker")
propertiesFile.setProperty(KafkaConfig.NodeIdProp, "1")
propertiesFile.setProperty(KafkaConfig.QuorumVotersProp, "")
setListenerProps(propertiesFile)
assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(propertiesFile))
// Ensure that if neither process.roles nor controller.quorum.voters is populated, then an exception is thrown if zookeeper.connect is not defined
propertiesFile.setProperty(KafkaConfig.ProcessRolesProp, "")
assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(propertiesFile))
// Ensure that no exception is thrown once zookeeper.connect is defined
propertiesFile.setProperty(KafkaConfig.ZkConnectProp, "localhost:2181")
KafkaConfig.fromProps(propertiesFile)
}
private def setListenerProps(props: Properties): Unit = {
props.setProperty(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9093")
props.setProperty(KafkaConfig.ControllerListenerNamesProp, "PLAINTEXT")
if (props.getProperty(KafkaConfig.ProcessRolesProp).contains("broker")) {
props.setProperty(KafkaConfig.InterBrokerListenerNameProp, "PLAINTEXT")
props.setProperty(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://localhost:9092")
}
}
@Test
def testKafkaSslPasswords(): Unit = {
val propertiesFile = prepareDefaultConfig()
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "ssl.keystore.password=keystore_password",
"--override", "ssl.key.password=key_password",
"--override", "ssl.truststore.password=truststore_password",
"--override", "ssl.keystore.certificate.chain=certificate_chain",
"--override", "ssl.keystore.key=private_key",
"--override", "ssl.truststore.certificates=truststore_certificates")))
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslKeyPasswordProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslKeystorePasswordProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslTruststorePasswordProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslKeystoreKeyProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslKeystoreCertificateChainProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslTruststoreCertificatesProp).toString)
assertEquals("key_password", config.getPassword(KafkaConfig.SslKeyPasswordProp).value)
assertEquals("keystore_password", config.getPassword(KafkaConfig.SslKeystorePasswordProp).value)
assertEquals("truststore_password", config.getPassword(KafkaConfig.SslTruststorePasswordProp).value)
assertEquals("private_key", config.getPassword(KafkaConfig.SslKeystoreKeyProp).value)
assertEquals("certificate_chain", config.getPassword(KafkaConfig.SslKeystoreCertificateChainProp).value)
assertEquals("truststore_certificates", config.getPassword(KafkaConfig.SslTruststoreCertificatesProp).value)
}
@Test
def testKafkaSslPasswordsWithSymbols(): Unit = {
val password = "=!#-+!?*/\\"\\'^%$=\\\\.,@:;="
val propertiesFile = prepareDefaultConfig()
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile,
"--override", "ssl.keystore.password=" + password,
"--override", "ssl.key.password=" + password,
"--override", "ssl.truststore.password=" + password)))
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslKeyPasswordProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslKeystorePasswordProp).toString)
assertEquals(Password.HIDDEN, config.getPassword(KafkaConfig.SslTruststorePasswordProp).toString)
assertEquals(password, config.getPassword(KafkaConfig.SslKeystorePasswordProp).value)
assertEquals(password, config.getPassword(KafkaConfig.SslKeyPasswordProp).value)
assertEquals(password, config.getPassword(KafkaConfig.SslTruststorePasswordProp).value)
}
private val booleanPropValueToSet = true
private val stringPropValueToSet = "foo"
private val passwordPropValueToSet = "ThePa$$word!"
private val listPropValueToSet = List("A", "B")
@Test
def testZkSslClientEnable(): Unit = {
testZkConfig(KafkaConfig.ZkSslClientEnableProp, "zookeeper.ssl.client.enable",
"zookeeper.client.secure", booleanPropValueToSet, config => Some(config.zkSslClientEnable), booleanPropValueToSet, Some(false))
}
@Test
def testZkSslKeyStoreLocation(): Unit = {
testZkConfig(KafkaConfig.ZkSslKeyStoreLocationProp, "zookeeper.ssl.keystore.location",
"zookeeper.ssl.keyStore.location", stringPropValueToSet, config => config.zkSslKeyStoreLocation, stringPropValueToSet)
}
@Test
def testZkSslTrustStoreLocation(): Unit = {
testZkConfig(KafkaConfig.ZkSslTrustStoreLocationProp, "zookeeper.ssl.truststore.location",
"zookeeper.ssl.trustStore.location", stringPropValueToSet, config => config.zkSslTrustStoreLocation, stringPropValueToSet)
}
@Test
def testZookeeperKeyStorePassword(): Unit = {
testZkConfig(KafkaConfig.ZkSslKeyStorePasswordProp, "zookeeper.ssl.keystore.password",
"zookeeper.ssl.keyStore.password", passwordPropValueToSet, config => config.zkSslKeyStorePassword, new Password(passwordPropValueToSet))
}
@Test
def testZookeeperTrustStorePassword(): Unit = {
testZkConfig(KafkaConfig.ZkSslTrustStorePasswordProp, "zookeeper.ssl.truststore.password",
"zookeeper.ssl.trustStore.password", passwordPropValueToSet, config => config.zkSslTrustStorePassword, new Password(passwordPropValueToSet))
}
@Test
def testZkSslKeyStoreType(): Unit = {
testZkConfig(KafkaConfig.ZkSslKeyStoreTypeProp, "zookeeper.ssl.keystore.type",
"zookeeper.ssl.keyStore.type", stringPropValueToSet, config => config.zkSslKeyStoreType, stringPropValueToSet)
}
@Test
def testZkSslTrustStoreType(): Unit = {
testZkConfig(KafkaConfig.ZkSslTrustStoreTypeProp, "zookeeper.ssl.truststore.type",
"zookeeper.ssl.trustStore.type", stringPropValueToSet, config => config.zkSslTrustStoreType, stringPropValueToSet)
}
@Test
def testZkSslProtocol(): Unit = {
testZkConfig(KafkaConfig.ZkSslProtocolProp, "zookeeper.ssl.protocol",
"zookeeper.ssl.protocol", stringPropValueToSet, config => Some(config.ZkSslProtocol), stringPropValueToSet, Some("TLSv1.2"))
}
@Test
def testZkSslEnabledProtocols(): Unit = {
testZkConfig(KafkaConfig.ZkSslEnabledProtocolsProp, "zookeeper.ssl.enabled.protocols",
"zookeeper.ssl.enabledProtocols", listPropValueToSet.mkString(","), config => config.ZkSslEnabledProtocols, listPropValueToSet.asJava)
}
@Test
def testZkSslCipherSuites(): Unit = {
testZkConfig(KafkaConfig.ZkSslCipherSuitesProp, "zookeeper.ssl.cipher.suites",
"zookeeper.ssl.ciphersuites", listPropValueToSet.mkString(","), config => config.ZkSslCipherSuites, listPropValueToSet.asJava)
}
@Test
def testZkSslEndpointIdentificationAlgorithm(): Unit = {
// this property is different than the others
// because the system property values and the Kafka property values don't match
val kafkaPropName = KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp
assertEquals("zookeeper.ssl.endpoint.identification.algorithm", kafkaPropName)
val sysProp = "zookeeper.ssl.hostnameVerification"
val expectedDefaultValue = "HTTPS"
val propertiesFile = prepareDefaultConfig()
// first make sure there is the correct default value
val emptyConfig = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile)))
assertNull(emptyConfig.originals.get(kafkaPropName)) // doesn't appear in the originals
assertEquals(expectedDefaultValue, emptyConfig.values.get(kafkaPropName)) // but default value appears in the values
assertEquals(expectedDefaultValue, emptyConfig.ZkSslEndpointIdentificationAlgorithm) // and has the correct default value
// next set system property alone
Map("true" -> "HTTPS", "false" -> "").foreach { case (sysPropValue, expected) => {
try {
System.setProperty(sysProp, sysPropValue)
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile)))
assertNull(config.originals.get(kafkaPropName)) // doesn't appear in the originals
assertEquals(expectedDefaultValue, config.values.get(kafkaPropName)) // default value appears in the values
assertEquals(expected, config.ZkSslEndpointIdentificationAlgorithm) // system property impacts the ultimate value of the property
} finally {
System.clearProperty(sysProp)
}
}}
// finally set Kafka config alone
List("https", "").foreach(expected => {
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", s"$kafkaPropName=${expected}")))
assertEquals(expected, config.originals.get(kafkaPropName)) // appears in the originals
assertEquals(expected, config.values.get(kafkaPropName)) // appears in the values
assertEquals(expected, config.ZkSslEndpointIdentificationAlgorithm) // is the ultimate value
})
}
@Test
def testZkSslCrlEnable(): Unit = {
testZkConfig(KafkaConfig.ZkSslCrlEnableProp, "zookeeper.ssl.crl.enable",
"zookeeper.ssl.crl", booleanPropValueToSet, config => Some(config.ZkSslCrlEnable), booleanPropValueToSet, Some(false))
}
@Test
def testZkSslOcspEnable(): Unit = {
testZkConfig(KafkaConfig.ZkSslOcspEnableProp, "zookeeper.ssl.ocsp.enable",
"zookeeper.ssl.ocsp", booleanPropValueToSet, config => Some(config.ZkSslOcspEnable), booleanPropValueToSet, Some(false))
}
@Test
def testConnectionsMaxReauthMsDefault(): Unit = {
val propertiesFile = prepareDefaultConfig()
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile)))
assertEquals(0L, config.valuesWithPrefixOverride("sasl_ssl.oauthbearer.").get(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS).asInstanceOf[Long])
}
@Test
def testConnectionsMaxReauthMsExplicit(): Unit = {
val propertiesFile = prepareDefaultConfig()
val expected = 3600000
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", s"sasl_ssl.oauthbearer.connections.max.reauth.ms=${expected}")))
assertEquals(expected, config.valuesWithPrefixOverride("sasl_ssl.oauthbearer.").get(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS).asInstanceOf[Long])
}
private def testZkConfig[T, U](kafkaPropName: String,
expectedKafkaPropName: String,
sysPropName: String,
propValueToSet: T,
getPropValueFrom: (KafkaConfig) => Option[T],
expectedPropertyValue: U,
expectedDefaultValue: Option[T] = None): Unit = {
assertEquals(expectedKafkaPropName, kafkaPropName)
val propertiesFile = prepareDefaultConfig()
// first make sure there is the correct default value (if any)
val emptyConfig = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile)))
assertNull(emptyConfig.originals.get(kafkaPropName)) // doesn't appear in the originals
if (expectedDefaultValue.isDefined) {
// confirm default value behavior
assertEquals(expectedDefaultValue.get, emptyConfig.values.get(kafkaPropName)) // default value appears in the values
assertEquals(expectedDefaultValue.get, getPropValueFrom(emptyConfig).get) // default value appears in the property
} else {
// confirm no default value behavior
assertNull(emptyConfig.values.get(kafkaPropName)) // doesn't appear in the values
assertEquals(None, getPropValueFrom(emptyConfig)) // has no default value
}
// next set system property alone
try {
System.setProperty(sysPropName, s"$propValueToSet")
// need to create a new Kafka config for the system property to be recognized
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile)))
assertNull(config.originals.get(kafkaPropName)) // doesn't appear in the originals
// confirm default value (if any) overridden by system property
if (expectedDefaultValue.isDefined)
assertEquals(expectedDefaultValue.get, config.values.get(kafkaPropName)) // default value (different from system property) appears in the values
else
assertNull(config.values.get(kafkaPropName)) // doesn't appear in the values
// confirm system property appears in the property
assertEquals(Some(expectedPropertyValue), getPropValueFrom(config))
} finally {
System.clearProperty(sysPropName)
}
// finally set Kafka config alone
val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", s"$kafkaPropName=${propValueToSet}")))
assertEquals(expectedPropertyValue, config.values.get(kafkaPropName)) // appears in the values
assertEquals(Some(expectedPropertyValue), getPropValueFrom(config)) // appears in the property
}
def prepareDefaultConfig(): String = {
prepareConfig(Array("broker.id=1", "zookeeper.connect=somewhere"))
}
def prepareConfig(lines : Array[String]): String = {
val file = File.createTempFile("kafkatest", ".properties")
file.deleteOnExit()
val writer = Files.newOutputStream(file.toPath)
try {
lines.foreach { l =>
writer.write(l.getBytes)
writer.write("\\n".getBytes)
}
file.getAbsolutePath
} finally writer.close()
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/KafkaConfigTest.scala | Scala | apache-2.0 | 20,124 |
// Copyright (C) 2014 Fehmi Can Saglam (@fehmicans) and contributors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reactivemongo.extensions.dao
import scala.concurrent.ExecutionContext.Implicits.global
import reactivemongo.extensions.model.CustomIdModel
import reactivemongo.api.indexes.{ Index, IndexType }
import reactivemongo.extensions.util.Misc.UUID
class CustomIdBsonDao extends {
override val autoIndexes = Seq(
Index(Seq("name" -> IndexType.Ascending), unique = true, background = true),
Index(Seq("age" -> IndexType.Ascending), background = true)
)
} with BsonDao[CustomIdModel, String](MongoContext.db, "customId-" + UUID())
| fehmicansaglam/reactivemongo-extensions | bson/src/test/scala/dao/CustomIdBsonDao.scala | Scala | apache-2.0 | 1,283 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar.compression
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnAccessor, NativeColumnAccessor}
import org.apache.spark.sql.execution.vectorized.WritableColumnVector
import org.apache.spark.sql.types.AtomicType
private[columnar] trait CompressibleColumnAccessor[T <: AtomicType] extends ColumnAccessor {
this: NativeColumnAccessor[T] =>
private var decoder: Decoder[T] = _
abstract override protected def initialize(): Unit = {
super.initialize()
decoder = CompressionScheme(underlyingBuffer.getInt()).decoder(buffer, columnType)
}
abstract override def hasNext: Boolean = super.hasNext || decoder.hasNext
override def extractSingle(row: InternalRow, ordinal: Int): Unit = {
decoder.next(row, ordinal)
}
def decompress(columnVector: WritableColumnVector, capacity: Int): Unit =
decoder.decompress(columnVector, capacity)
}
| esi-mineset/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/CompressibleColumnAccessor.scala | Scala | apache-2.0 | 1,763 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.configuration
import java.io.FileInputStream
import java.security.KeyStore
import com.typesafe.config.ConfigFactory
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}
trait SSLConfiguration {
private val serverConfig = ConfigFactory.load("server.conf")
private val keyStoreResource =
serverConfig.getString("org.apache.predictionio.server.ssl-keystore-resource")
private val password = serverConfig.getString("org.apache.predictionio.server.ssl-keystore-pass")
private val keyAlias = serverConfig.getString("org.apache.predictionio.server.ssl-key-alias")
private val keyStore = {
// Loading keystore from specified file
val clientStore = KeyStore.getInstance("JKS")
val inputStream = new FileInputStream(
getClass().getClassLoader().getResource(keyStoreResource).getFile())
clientStore.load(inputStream, password.toCharArray)
inputStream.close()
clientStore
}
// Creating SSL context
def sslContext: SSLContext = {
val context = SSLContext.getInstance("TLS")
val tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm)
val kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm)
kmf.init(keyStore, password.toCharArray)
tmf.init(keyStore)
context.init(kmf.getKeyManagers, tmf.getTrustManagers, null)
context
}
}
| PredictionIO/PredictionIO | common/src/main/scala/org/apache/predictionio/configuration/SSLConfiguration.scala | Scala | apache-2.0 | 2,199 |
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
import cats._
final case class ContextResponse[F[_], A](context: A, response: Response[F]) {
def mapContext[B](f: A => B): ContextResponse[F, B] =
ContextResponse(f(context), response)
def mapK[G[_]](fk: F ~> G): ContextResponse[G, A] =
ContextResponse(context, response.mapK(fk))
}
// Included to avoid binary compatibility issues with the apply method if/when
// we ever need a companion object in the future.
object ContextResponse {}
| http4s/http4s | core/shared/src/main/scala/org/http4s/ContextResponse.scala | Scala | apache-2.0 | 1,068 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.mockito.Mockito.when
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.BoxValidationFixture
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
class CP672Spec extends WordSpec with MockitoSugar with Matchers with BoxValidationFixture[ComputationsBoxRetriever] {
val boxRetriever = mock[ComputationsBoxRetriever]
override def setUpMocks = {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
}
testBoxIsZeroOrPositive("CP672", CP672.apply)
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP672Spec.scala | Scala | apache-2.0 | 1,199 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.server.mocks
import java.util.concurrent.atomic.AtomicInteger
import akka.NotUsed
import akka.stream.scaladsl.Source
import com.lightbend.lagom.scaladsl.api.Service.pathCall
import com.lightbend.lagom.scaladsl.api.Service.named
import com.lightbend.lagom.scaladsl.api.Service.restCall
import com.lightbend.lagom.scaladsl.api.Descriptor
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.api.deser.DefaultExceptionSerializer
import com.lightbend.lagom.scaladsl.api.transport.Method
import play.api.Environment
import play.api.Mode
object PathProvider {
val PATH = "/some-path"
}
/**
* A simple service tests may implement to provide their needed behavior.
*/
trait SimpleStrictService extends Service {
override def descriptor: Descriptor =
named("simple-strict")
.withCalls(restCall(Method.GET, PathProvider.PATH, simpleGet _))
.withExceptionSerializer(new DefaultExceptionSerializer(Environment.simple(mode = Mode.Dev)))
def simpleGet(): ServiceCall[NotUsed, String]
}
/**
* A simple service that uses Lagom's HeaderFilters. Tests may implement this to provide their needed behavior.
*/
abstract class FilteredStrictService(atomicInteger: AtomicInteger) extends SimpleStrictService {
override def descriptor: Descriptor =
super.descriptor.withHeaderFilter(new VerboseHeaderLagomFilter(atomicInteger))
}
/**
* A simple service tests may implement to provide their needed behavior.
*/
trait SimpleStreamedService extends Service {
override def descriptor: Descriptor =
named("simple-streamed")
.withCalls(pathCall(PathProvider.PATH, streamed _))
.withExceptionSerializer(new DefaultExceptionSerializer(Environment.simple(mode = Mode.Dev)))
def streamed(): ServiceCall[Source[String, NotUsed], Source[String, NotUsed]]
}
| lagom/lagom | service/scaladsl/server/src/test/scala/com/lightbend/lagom/scaladsl/server/mocks/MockServices.scala | Scala | apache-2.0 | 1,971 |
import gruenewa.prelude._
import gruenewa.grid.{Grid, Discovery}
object Simple {
def main(args: Array[String]) {
val callback = (host: String, port: Int) => {
printf("Recieved message from (%s,%d). Testing it...\\n", host, port)
using(Grid.startSession()) { session =>
session.dispatch[Int,Int]((host,port)){1+}(99) match {
case Right(n) => printf("Hurra, it works! 99 + 1 = %d\\n", n)
case Left(e) => println("Oops, a problem... "+e)
}
}
}
using(Discovery.startListener(callback)) { _ =>
Thread.sleep(10*1000)
}
}
}
| gruenewa/gruenewa-grid | samples/Simple.scala | Scala | gpl-3.0 | 597 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util.UUID
import scala.collection.JavaConverters._
import org.apache.spark.sql.{AnalysisException, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, NamedRelation}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.sources.v2.{DataSourceOptions, DataSourceV2, ReadSupport, WriteSupport}
import org.apache.spark.sql.sources.v2.reader.{DataSourceReader, SupportsReportStatistics}
import org.apache.spark.sql.sources.v2.writer.DataSourceWriter
import org.apache.spark.sql.types.StructType
/**
* A logical plan representing a data source v2 scan.
*
* @param source An instance of a [[DataSourceV2]] implementation.
* @param options The options for this scan. Used to create fresh [[DataSourceReader]].
* @param userSpecifiedSchema The user-specified schema for this scan. Used to create fresh
* [[DataSourceReader]].
*/
case class DataSourceV2Relation(
source: DataSourceV2,
output: Seq[AttributeReference],
options: Map[String, String],
tableIdent: Option[TableIdentifier] = None,
userSpecifiedSchema: Option[StructType] = None)
extends LeafNode with MultiInstanceRelation with NamedRelation with DataSourceV2StringFormat {
import DataSourceV2Relation._
override def name: String = {
tableIdent.map(_.unquotedString).getOrElse(s"${source.name}:unknown")
}
override def pushedFilters: Seq[Expression] = Seq.empty
override def simpleString: String = "RelationV2 " + metadataString
def newReader(): DataSourceReader = source.createReader(options, userSpecifiedSchema)
def newWriter(): DataSourceWriter = source.createWriter(options, schema)
override def computeStats(): Statistics = newReader match {
case r: SupportsReportStatistics =>
Statistics(sizeInBytes = r.getStatistics.sizeInBytes().orElse(conf.defaultSizeInBytes))
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
override def newInstance(): DataSourceV2Relation = {
copy(output = output.map(_.newInstance()))
}
}
/**
* A specialization of [[DataSourceV2Relation]] with the streaming bit set to true.
*
* Note that, this plan has a mutable reader, so Spark won't apply operator push-down for this plan,
* to avoid making the plan mutable. We should consolidate this plan and [[DataSourceV2Relation]]
* after we figure out how to apply operator push-down for streaming data sources.
*/
case class StreamingDataSourceV2Relation(
output: Seq[AttributeReference],
source: DataSourceV2,
options: Map[String, String],
reader: DataSourceReader)
extends LeafNode with MultiInstanceRelation with DataSourceV2StringFormat {
override def isStreaming: Boolean = true
override def simpleString: String = "Streaming RelationV2 " + metadataString
override def pushedFilters: Seq[Expression] = Nil
override def newInstance(): LogicalPlan = copy(output = output.map(_.newInstance()))
// TODO: unify the equal/hashCode implementation for all data source v2 query plans.
override def equals(other: Any): Boolean = other match {
case other: StreamingDataSourceV2Relation =>
output == other.output && reader.getClass == other.reader.getClass && options == other.options
case _ => false
}
override def hashCode(): Int = {
Seq(output, source, options).hashCode()
}
override def computeStats(): Statistics = reader match {
case r: SupportsReportStatistics =>
Statistics(sizeInBytes = r.getStatistics.sizeInBytes().orElse(conf.defaultSizeInBytes))
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
object DataSourceV2Relation {
private implicit class SourceHelpers(source: DataSourceV2) {
def asReadSupport: ReadSupport = {
source match {
case support: ReadSupport =>
support
case _ =>
throw new AnalysisException(s"Data source is not readable: $name")
}
}
def asWriteSupport: WriteSupport = {
source match {
case support: WriteSupport =>
support
case _ =>
throw new AnalysisException(s"Data source is not writable: $name")
}
}
def name: String = {
source match {
case registered: DataSourceRegister =>
registered.shortName()
case _ =>
source.getClass.getSimpleName
}
}
def createReader(
options: Map[String, String],
userSpecifiedSchema: Option[StructType]): DataSourceReader = {
val v2Options = new DataSourceOptions(options.asJava)
userSpecifiedSchema match {
case Some(s) =>
asReadSupport.createReader(s, v2Options)
case _ =>
asReadSupport.createReader(v2Options)
}
}
def createWriter(
options: Map[String, String],
schema: StructType): DataSourceWriter = {
val v2Options = new DataSourceOptions(options.asJava)
asWriteSupport.createWriter(UUID.randomUUID.toString, schema, SaveMode.Append, v2Options).get
}
}
def create(
source: DataSourceV2,
options: Map[String, String],
tableIdent: Option[TableIdentifier] = None,
userSpecifiedSchema: Option[StructType] = None): DataSourceV2Relation = {
val reader = source.createReader(options, userSpecifiedSchema)
val ident = tableIdent.orElse(tableFromOptions(options))
DataSourceV2Relation(
source, reader.readSchema().toAttributes, options, ident, userSpecifiedSchema)
}
private def tableFromOptions(options: Map[String, String]): Option[TableIdentifier] = {
options
.get(DataSourceOptions.TABLE_KEY)
.map(TableIdentifier(_, options.get(DataSourceOptions.DATABASE_KEY)))
}
}
| rikima/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Relation.scala | Scala | apache-2.0 | 6,804 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package runtime
final class RichChar(val self: Char) extends AnyVal with IntegralProxy[Char] {
protected def num = scala.math.Numeric.CharIsIntegral
protected def ord = scala.math.Ordering.Char
override def doubleValue = self.toDouble
override def floatValue = self.toFloat
override def longValue = self.toLong
override def intValue = self.toInt
override def byteValue = self.toByte
override def shortValue = self.toShort
override def isValidChar = true
// These method are all overridden and redefined to call out to scala.math to avoid 3 allocations:
// the primitive boxing, the value class boxing and instantiation of the Numeric num.
// We'd like to redefine signum and sign too but forwards binary compatibility doesn't allow us to.
override def abs: Char = self
override def max(that: Char): Char = math.max(self.toInt, that.toInt).toChar
override def min(that: Char): Char = math.min(self.toInt, that.toInt).toChar
def asDigit: Int = Character.digit(self, Character.MAX_RADIX)
def isControl: Boolean = Character.isISOControl(self)
def isDigit: Boolean = Character.isDigit(self)
def isLetter: Boolean = Character.isLetter(self)
def isLetterOrDigit: Boolean = Character.isLetterOrDigit(self)
def isWhitespace: Boolean = Character.isWhitespace(self)
def isSpaceChar: Boolean = Character.isSpaceChar(self)
def isHighSurrogate: Boolean = Character.isHighSurrogate(self)
def isLowSurrogate: Boolean = Character.isLowSurrogate(self)
def isSurrogate: Boolean = isHighSurrogate || isLowSurrogate
def isUnicodeIdentifierStart: Boolean = Character.isUnicodeIdentifierStart(self)
def isUnicodeIdentifierPart: Boolean = Character.isUnicodeIdentifierPart(self)
def isIdentifierIgnorable: Boolean = Character.isIdentifierIgnorable(self)
def isMirrored: Boolean = Character.isMirrored(self)
def isLower: Boolean = Character.isLowerCase(self)
def isUpper: Boolean = Character.isUpperCase(self)
def isTitleCase: Boolean = Character.isTitleCase(self)
def toLower: Char = Character.toLowerCase(self)
def toUpper: Char = Character.toUpperCase(self)
def toTitleCase: Char = Character.toTitleCase(self)
def getType: Int = Character.getType(self)
def getNumericValue: Int = Character.getNumericValue(self)
def getDirectionality: Byte = Character.getDirectionality(self)
def reverseBytes: Char = Character.reverseBytes(self)
// Java 5 Character methods not added:
//
// public static boolean isDefined(char ch)
// public static boolean isJavaIdentifierStart(char ch)
// public static boolean isJavaIdentifierPart(char ch)
}
| lrytz/scala | src/library/scala/runtime/RichChar.scala | Scala | apache-2.0 | 3,247 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.sexp.formats
import org.ensime.sexp._
trait BasicFormats {
implicit object UnitFormat extends SexpFormat[Unit] {
def write(x: Unit) = SexpNil
def read(value: Sexp) = ()
}
implicit object BooleanFormat extends SexpFormat[Boolean] {
// all non-nil Sexps are technically "true"
private val SexpTrue = SexpSymbol("t")
def write(x: Boolean) = if (x) SexpTrue else SexpNil
def read(value: Sexp) = value match {
case SexpNil => false
case _ => true
}
}
implicit object CharFormat extends SexpFormat[Char] {
def write(x: Char) = SexpChar(x)
def read(value: Sexp) = value match {
case SexpChar(x) => x
case x => deserializationError(x)
}
}
implicit object StringFormat extends SexpFormat[String] {
def write(x: String) = SexpString(x)
def read(value: Sexp) = value match {
case SexpString(x) => x
case x => deserializationError(x)
}
}
// val allows override
implicit val SymbolFormat = new SexpFormat[Symbol] {
def write(x: Symbol): Sexp = SexpString(x.name)
def read(value: Sexp): Symbol = value match {
case SexpString(x) => Symbol(x)
case x => deserializationError(x)
}
}
/**
* NOTE Emacs will not be able to correctly interpret arbitrary
* precision numbers because - unlike other lisps - it uses a
* reduced form of C double/integer precision. A round-trip via
* Emacs for very large numbers will return `SexpPosInf`.
*
* The built-in Emacs library `'calc` has a few data formats
* http://www.gnu.org/software/emacs/manual/html_mono/calc.html#Data-Type-Formats
* but they fall short and require specific interpretation within
* the `'calc` framework.
*
* If you need Emacs-specific support for arbitrary precision
* numbers, override this implementation with one that adheres to
* the arbitrary precision framework of your choice.
*/
implicit def ViaBigDecimalFormat[T](implicit c: BigDecimalConvertor[T]) =
new SexpFormat[T] {
def write(x: T): Sexp =
if (c.isNaN(x)) SexpNaN
else if (c.isPosInf(x)) SexpPosInf
else if (c.isNegInf(x)) SexpNegInf
else SexpNumber(c.to(x))
def read(value: Sexp): T = value match {
case SexpNumber(x) => c.from(x)
case SexpNaN => c.NaN
case SexpPosInf => c.PosInf
case SexpNegInf => c.NegInf
case x => deserializationError(x)
}
}
// boilerplate for performance (uses ViaBigDecimal)
implicit val IntFormat = SexpFormat[Int]
implicit val LongFormat = SexpFormat[Long]
implicit val FloatFormat = SexpFormat[Float]
implicit val DoubleFormat = SexpFormat[Double]
implicit val ByteFormat = SexpFormat[Byte]
implicit val ShortFormat = SexpFormat[Short]
implicit val BigIntFormat = SexpFormat[BigInt]
implicit val BigDecimalFormat = SexpFormat[BigDecimal]
}
trait SymbolAltFormat {
this: BasicFormats =>
override implicit val SymbolFormat = new SexpFormat[Symbol] {
def write(x: Symbol): Sexp = SexpSymbol(x.name)
def read(value: Sexp): Symbol = value match {
case SexpSymbol(x) => Symbol(x)
case x => deserializationError(x)
}
}
}
| j-mckitrick/ensime-sbt | src/sbt-test/ensime-sbt/ensime-server/s-express/src/main/scala/org/ensime/sexp/formats/BasicFormats.scala | Scala | apache-2.0 | 3,335 |
package com.outr.arango
import com.outr.arango.api.{APIDocumentCollection, APIDocumentDocumentHandle}
import com.outr.arango.model.ArangoCode
import io.circe.Json
import io.youi.client.HttpClient
import profig.JsonUtil
import scala.concurrent.{ExecutionContext, Future}
class ArangoDocument(client: HttpClient, dbName: String, collectionName: String) {
def create(document: Json,
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnNew: Boolean = true,
returnOld: Boolean = false,
silent: Boolean = false,
overwrite: Boolean = false)
(implicit ec: ExecutionContext): Future[List[DocumentInsert]] = {
val c = transactionId match {
case Some(tId) => client.header("x-arango-trx-id", tId)
case None => client
}
APIDocumentCollection.post(
client = c,
collection = collectionName,
body = document,
waitForSync = Some(waitForSync),
returnNew = Some(returnNew),
returnOld = Some(returnOld),
silent = Some(silent),
overwrite = Some(overwrite)
).map { json =>
json.asArray match {
case Some(array) => array.toList.map(json => JsonUtil.fromJson[DocumentInsert](Id.update(json)))
case None => List(JsonUtil.fromJson[DocumentInsert](Id.update(json)))
}
}
}
def insertOne[D](document: D,
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnNew: Boolean = false,
returnOld: Boolean = false,
silent: Boolean = false)
(implicit ec: ExecutionContext, serialization: Serialization[D]): Future[DocumentInsert] = {
val json = serialization.toJson(document)
create(json, transactionId, waitForSync, returnNew, returnOld, silent)(ec).map(_.head)
}
def upsertOne[D](document: D,
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnNew: Boolean = false,
returnOld: Boolean = false,
silent: Boolean = false)
(implicit ec: ExecutionContext, serialization: Serialization[D]): Future[DocumentInsert] = {
val json = serialization.toJson(document)
create(json, transactionId, waitForSync, returnNew, returnOld, silent, overwrite = true)(ec).map(_.head)
}
def insert[D](documents: List[D],
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnNew: Boolean = false,
returnOld: Boolean = false,
silent: Boolean = false)
(implicit ec: ExecutionContext, serialization: Serialization[D]): Future[List[DocumentInsert]] = {
val json = Json.arr(documents.map(serialization.toJson): _*)
create(json, transactionId, waitForSync, returnNew, returnOld, silent)(ec)
}
def upsert[D](documents: List[D],
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnNew: Boolean = false,
returnOld: Boolean = false,
silent: Boolean = false)
(implicit ec: ExecutionContext, serialization: Serialization[D]): Future[List[DocumentInsert]] = {
val json = Json.arr(documents.map(serialization.toJson): _*)
create(json, transactionId, waitForSync, returnNew, returnOld, silent, overwrite = true)(ec)
}
def get[D](id: Id[D], transactionId: Option[String] = None)(implicit ec: ExecutionContext, serialization: Serialization[D]): Future[Option[D]] = {
val c = transactionId match {
case Some(tId) => client.header("x-arango-trx-id", tId)
case None => client
}
APIDocumentDocumentHandle
.get(c, collectionName, id._key)
.map(serialization.fromJson)
.map(Some.apply)
.recover {
case exc: ArangoException if exc.error.errorCode == ArangoCode.ArangoDocumentNotFound => None
}
}
def deleteOne[D](id: Id[D],
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnOld: Boolean = false,
silent: Boolean = false)
(implicit ec: ExecutionContext): Future[Id[D]] = {
val c = transactionId match {
case Some(tId) => client.header("x-arango-trx-id", tId)
case None => client
}
APIDocumentDocumentHandle.delete(
client = c,
collectionName = id.collection,
documentHandle = id.value,
waitForSync = Some(waitForSync),
returnOld = Some(returnOld),
silent = Some(silent),
IfMatch = None
).map { json =>
Id.extract[D](json)
}
}
def delete[D](ids: List[Id[D]],
transactionId: Option[String] = None,
waitForSync: Boolean = false,
returnOld: Boolean = false,
ignoreRevs: Boolean = true)
(implicit ec: ExecutionContext): Future[List[Id[D]]] = {
val c = transactionId match {
case Some(tId) => client.header("x-arango-trx-id", tId)
case None => client
}
APIDocumentCollection.delete(
client = c,
body = JsonUtil.toJson(ids),
collection = collectionName,
waitForSync = Some(waitForSync),
returnOld = Some(returnOld),
ignoreRevs = Some(ignoreRevs)
).map { json =>
json.asArray.getOrElse(throw new RuntimeException(s"Not an array: $json")).toList.map(Id.extract[D])
}
}
} | outr/arangodb-scala | driver/src/main/scala/com/outr/arango/ArangoDocument.scala | Scala | mit | 5,584 |
package org.oxygen.redio.runtime
import java.util.concurrent.ThreadFactory
object ThreadManager extends ThreadFactory
{
override def newThread(task: Runnable): Thread = new ScriptTask(task)
}
| chenzhuoyu/RedIO | src/main/scala/org/oxygen/redio/runtime/ThreadManager.scala | Scala | lgpl-2.1 | 195 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.{MockFrs102AccountsRetriever, AccountsMoneyValidationFixture}
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
class AC64Spec extends AccountsMoneyValidationFixture[Frs102AccountsBoxRetriever] with MockFrs102AccountsRetriever {
testAccountsMoneyValidationWithMin("AC64", 0, AC64.apply)
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC64Spec.scala | Scala | apache-2.0 | 1,001 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import scala.reflect.runtime.universe.typeTag
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.ScalaReflectionLock
import org.apache.spark.sql.catalyst.expressions.Expression
/**
* :: DeveloperApi ::
* The data type representing `java.math.BigDecimal` values.
* A Decimal that must have fixed precision (the maximum number of digits) and scale (the number
* of digits on right side of dot).
*
* The precision can be up to 38, scale can also be up to 38 (less or equal to precision).
*
* The default precision and scale is (10, 0).
*
* Please use [[DataTypes.createDecimalType()]] to create a specific instance.
*/
@DeveloperApi
case class DecimalType(precision: Int, scale: Int) extends FractionalType {
if (scale > precision) {
throw new AnalysisException(
s"Decimal scale ($scale) cannot be greater than precision ($precision).")
}
if (precision > DecimalType.MAX_PRECISION) {
throw new AnalysisException(s"DecimalType can only support precision up to 38")
}
// default constructor for Java
def this(precision: Int) = this(precision, 0)
def this() = this(10)
private[sql] type InternalType = Decimal
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[InternalType] }
private[sql] val numeric = Decimal.DecimalIsFractional
private[sql] val fractional = Decimal.DecimalIsFractional
private[sql] val ordering = Decimal.DecimalIsFractional
private[sql] val asIntegral = Decimal.DecimalAsIfIntegral
override def typeName: String = s"decimal($precision,$scale)"
override def toString: String = s"DecimalType($precision,$scale)"
override def sql: String = typeName.toUpperCase
/**
* Returns whether this DecimalType is wider than `other`. If yes, it means `other`
* can be casted into `this` safely without losing any precision or range.
*/
private[sql] def isWiderThan(other: DataType): Boolean = other match {
case dt: DecimalType =>
(precision - scale) >= (dt.precision - dt.scale) && scale >= dt.scale
case dt: IntegralType =>
isWiderThan(DecimalType.forType(dt))
case _ => false
}
/**
* Returns whether this DecimalType is tighter than `other`. If yes, it means `this`
* can be casted into `other` safely without losing any precision or range.
*/
private[sql] def isTighterThan(other: DataType): Boolean = other match {
case dt: DecimalType =>
(precision - scale) <= (dt.precision - dt.scale) && scale <= dt.scale
case dt: IntegralType =>
isTighterThan(DecimalType.forType(dt))
case _ => false
}
/**
* The default size of a value of the DecimalType is 8 bytes (precision <= 18) or 16 bytes.
*/
override def defaultSize: Int = if (precision <= Decimal.MAX_LONG_DIGITS) 8 else 16
override def simpleString: String = s"decimal($precision,$scale)"
private[spark] override def asNullable: DecimalType = this
}
/** Extra factory methods and pattern matchers for Decimals */
object DecimalType extends AbstractDataType {
import scala.math.min
val MAX_PRECISION = 38
val MAX_SCALE = 38
val SYSTEM_DEFAULT: DecimalType = DecimalType(MAX_PRECISION, 18)
val USER_DEFAULT: DecimalType = DecimalType(10, 0)
// The decimal types compatible with other numeric types
private[sql] val ByteDecimal = DecimalType(3, 0)
private[sql] val ShortDecimal = DecimalType(5, 0)
private[sql] val IntDecimal = DecimalType(10, 0)
private[sql] val LongDecimal = DecimalType(20, 0)
private[sql] val FloatDecimal = DecimalType(14, 7)
private[sql] val DoubleDecimal = DecimalType(30, 15)
private[sql] val BigIntDecimal = DecimalType(38, 0)
private[sql] def forType(dataType: DataType): DecimalType = dataType match {
case ByteType => ByteDecimal
case ShortType => ShortDecimal
case IntegerType => IntDecimal
case LongType => LongDecimal
case FloatType => FloatDecimal
case DoubleType => DoubleDecimal
}
private[sql] def bounded(precision: Int, scale: Int): DecimalType = {
DecimalType(min(precision, MAX_PRECISION), min(scale, MAX_SCALE))
}
override private[sql] def defaultConcreteType: DataType = SYSTEM_DEFAULT
override private[sql] def acceptsType(other: DataType): Boolean = {
other.isInstanceOf[DecimalType]
}
override private[sql] def simpleString: String = "decimal"
private[sql] object Fixed {
def unapply(t: DecimalType): Option[(Int, Int)] = Some((t.precision, t.scale))
}
private[sql] object Expression {
def unapply(e: Expression): Option[(Int, Int)] = e.dataType match {
case t: DecimalType => Some((t.precision, t.scale))
case _ => None
}
}
/**
* Returns if dt is a DecimalType that fits inside an int
*/
def is32BitDecimalType(dt: DataType): Boolean = {
dt match {
case t: DecimalType =>
t.precision <= Decimal.MAX_INT_DIGITS
case _ => false
}
}
/**
* Returns if dt is a DecimalType that fits inside a long
*/
def is64BitDecimalType(dt: DataType): Boolean = {
dt match {
case t: DecimalType =>
t.precision <= Decimal.MAX_LONG_DIGITS
case _ => false
}
}
/**
* Returns if dt is a DecimalType that doesn't fit inside a long
*/
def isByteArrayDecimalType(dt: DataType): Boolean = {
dt match {
case t: DecimalType =>
t.precision > Decimal.MAX_LONG_DIGITS
case _ => false
}
}
def unapply(t: DataType): Boolean = t.isInstanceOf[DecimalType]
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[DecimalType]
}
| gioenn/xSpark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala | Scala | apache-2.0 | 6,453 |
package io.peregrine.view
import io.peregrine._
class MustacheViewRendererSpec extends ShouldSpec {
val renderer = new MustacheViewRenderer {
override lazy val location: String = "/mustache"
}
"#format" should "be 'mustache'" in {
renderer.format should equal("mustache")
}
"#render" should "form a correct template based on a string" in {
val result = renderer.render("test_str", View("mustache", "test_str", "STRING_TEST"))
removeWhiteSpaces(result) should equal("Result expected: STRING_TEST")
}
"#render" should "form a correct template based on an object" in {
case class User(name: String, age: Int, opt: Option[String] = None)
val u1 = User("John", 12, None)
val result = renderer.render("test_obj", View("mustache", "test_obj", u1))
removeWhiteSpaces(result) should equal(
"""Name: John
|Age: 12
|Opt:"""stripMargin
)
val u2 = User("Frank", 21, Some("wow"))
val result2 = renderer.render("test_obj", View("mustache", "test_obj", u2))
removeWhiteSpaces(result2) should equal(
"""Name: Frank
|Age: 21
|Opt: wow
""".stripMargin.trim)
}
"#render" should "form a correct template based on an collection" in {
case class User(name: String, age: Int, opt: Option[String] = None)
val seq = Seq(
User("John", 12, None),
User("Frank", 21, Some("wow"))
)
val result = renderer.render("test_seq", View("mustache", "test_seq", seq))
removeWhiteSpaces(result) should equal(
"""Name: John
| Age: 12
| Opt:
| Name: Frank
| Age: 21
| Opt: wow
""".stripMargin.trim)
}
}
| dvarelap/stilt | src/test/scala/io/peregrine/view/HbsViewRendererSpec.scala | Scala | apache-2.0 | 1,676 |
package text.kanji
/**
* @author K.Sakamoto
* Created on 2016/07/26
*/
object PrimarySchool5thGradeKanjiCharacter extends KanjiCharacter {
override val kanji: Seq[String] = readKanjiCSV("primary_school_5th_grade")
}
| ktr-skmt/FelisCatusZero | src/main/scala/text/kanji/PrimarySchool5thGradeKanjiCharacter.scala | Scala | apache-2.0 | 233 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.api.python.PythonEvalType
import org.apache.spark.sql.catalyst.expressions.{Alias, Ascending, AttributeReference, PythonUDF, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.{Expand, Generate, ScriptInputOutputSchema, ScriptTransformation, Window => WindowPlan}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{count, explode, sum, year}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.test.SQLTestData.TestData
import org.apache.spark.sql.types.{IntegerType, LongType, StructField, StructType}
class DataFrameSelfJoinSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("join - join using self join") {
val df = Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str")
// self join
checkAnswer(
df.join(df, "int"),
Row(1, "1", "1") :: Row(2, "2", "2") :: Row(3, "3", "3") :: Nil)
}
test("join - self join") {
val df1 = testData.select(testData("key")).as("df1")
val df2 = testData.select(testData("key")).as("df2")
checkAnswer(
df1.join(df2, $"df1.key" === $"df2.key"),
sql("SELECT a.key, b.key FROM testData a JOIN testData b ON a.key = b.key")
.collect().toSeq)
}
test("join - self join auto resolve ambiguity with case insensitivity") {
val df = Seq((1, "1"), (2, "2")).toDF("key", "value")
checkAnswer(
df.join(df, df("key") === df("Key")),
Row(1, "1", 1, "1") :: Row(2, "2", 2, "2") :: Nil)
checkAnswer(
df.join(df.filter($"value" === "2"), df("key") === df("Key")),
Row(2, "2", 2, "2") :: Nil)
}
test("join - using aliases after self join") {
val df = Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str")
checkAnswer(
df.as("x").join(df.as("y"), $"x.str" === $"y.str").groupBy("x.str").count(),
Row("1", 1) :: Row("2", 1) :: Row("3", 1) :: Nil)
checkAnswer(
df.as("x").join(df.as("y"), $"x.str" === $"y.str").groupBy("y.str").count(),
Row("1", 1) :: Row("2", 1) :: Row("3", 1) :: Nil)
}
test("[SPARK-6231] join - self join auto resolve ambiguity") {
val df = Seq((1, "1"), (2, "2")).toDF("key", "value")
checkAnswer(
df.join(df, df("key") === df("key")),
Row(1, "1", 1, "1") :: Row(2, "2", 2, "2") :: Nil)
checkAnswer(
df.join(df.filter($"value" === "2"), df("key") === df("key")),
Row(2, "2", 2, "2") :: Nil)
checkAnswer(
df.join(df, df("key") === df("key") && df("value") === 1),
Row(1, "1", 1, "1") :: Nil)
val left = df.groupBy("key").agg(count("*"))
val right = df.groupBy("key").agg(sum("key"))
checkAnswer(
left.join(right, left("key") === right("key")),
Row(1, 1, 1, 1) :: Row(2, 1, 2, 2) :: Nil)
}
private def assertAmbiguousSelfJoin(df: => DataFrame): Unit = {
val e = intercept[AnalysisException](df)
assert(e.message.contains("ambiguous"))
}
test("SPARK-28344: fail ambiguous self join - column ref in join condition") {
val df1 = spark.range(3)
val df2 = df1.filter($"id" > 0)
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "false",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
// `df1("id") > df2("id")` is always false.
checkAnswer(df1.join(df2, df1("id") > df2("id")), Nil)
// Alias the dataframe and use qualified column names can fix ambiguous self-join.
val aliasedDf1 = df1.alias("left")
val aliasedDf2 = df2.as("right")
checkAnswer(
aliasedDf1.join(aliasedDf2, $"left.id" > $"right.id"),
Seq(Row(2, 1)))
}
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2, df1("id") > df2("id")))
}
}
test("SPARK-28344: fail ambiguous self join - Dataset.colRegex as column ref") {
val df1 = spark.range(3)
val df2 = df1.filter($"id" > 0)
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2, df1.colRegex("id") > df2.colRegex("id")))
}
}
test("SPARK-28344: fail ambiguous self join - Dataset.col with nested field") {
val df1 = spark.read.json(Seq("""{"a": {"b": 1, "c": 1}}""").toDS())
val df2 = df1.filter($"a.b" > 0)
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2, df1("a.b") > df2("a.c")))
}
}
test("SPARK-28344: fail ambiguous self join - column ref in Project") {
val df1 = spark.range(3)
val df2 = df1.filter($"id" > 0)
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "false",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
// `df2("id")` actually points to the column of `df1`.
checkAnswer(df1.join(df2).select(df2("id")), Seq(0, 0, 1, 1, 2, 2).map(Row(_)))
// Alias the dataframe and use qualified column names can fix ambiguous self-join.
val aliasedDf1 = df1.alias("left")
val aliasedDf2 = df2.as("right")
checkAnswer(
aliasedDf1.join(aliasedDf2).select($"right.id"),
Seq(1, 1, 1, 2, 2, 2).map(Row(_)))
}
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2).select(df2("id")))
}
}
test("SPARK-28344: fail ambiguous self join - join three tables") {
val df1 = spark.range(3)
val df2 = df1.filter($"id" > 0)
val df3 = df1.filter($"id" <= 2)
val df4 = spark.range(1)
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "false",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
// `df2("id") < df3("id")` is always false
checkAnswer(df1.join(df2).join(df3, df2("id") < df3("id")), Nil)
// `df2("id")` actually points to the column of `df1`.
checkAnswer(
df1.join(df4).join(df2).select(df2("id")),
Seq(0, 0, 1, 1, 2, 2).map(Row(_)))
// `df4("id")` is not ambiguous.
checkAnswer(
df1.join(df4).join(df2).select(df4("id")),
Seq(0, 0, 0, 0, 0, 0).map(Row(_)))
// Alias the dataframe and use qualified column names can fix ambiguous self-join.
val aliasedDf1 = df1.alias("x")
val aliasedDf2 = df2.as("y")
val aliasedDf3 = df3.as("z")
checkAnswer(
aliasedDf1.join(aliasedDf2).join(aliasedDf3, $"y.id" < $"z.id"),
Seq(Row(0, 1, 2), Row(1, 1, 2), Row(2, 1, 2)))
checkAnswer(
aliasedDf1.join(df4).join(aliasedDf2).select($"y.id"),
Seq(1, 1, 1, 2, 2, 2).map(Row(_)))
}
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2).join(df3, df2("id") < df3("id")))
assertAmbiguousSelfJoin(df1.join(df4).join(df2).select(df2("id")))
}
}
test("SPARK-28344: don't fail if there is no ambiguous self join") {
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true") {
val df = Seq(1, 1, 2, 2).toDF("a")
val w = Window.partitionBy(df("a"))
checkAnswer(
df.select(df("a").alias("x"), sum(df("a")).over(w)),
Seq((1, 2), (1, 2), (2, 4), (2, 4)).map(Row.fromTuple))
val joined = df.join(spark.range(1)).select($"a")
checkAnswer(
joined.select(joined("a").alias("x"), sum(joined("a")).over(w)),
Seq((1, 2), (1, 2), (2, 4), (2, 4)).map(Row.fromTuple))
}
}
test("SPARK-33071/SPARK-33536: Avoid changing dataset_id of LogicalPlan in join() " +
"to not break DetectAmbiguousSelfJoin") {
val emp1 = Seq[TestData](
TestData(1, "sales"),
TestData(2, "personnel"),
TestData(3, "develop"),
TestData(4, "IT")).toDS()
val emp2 = Seq[TestData](
TestData(1, "sales"),
TestData(2, "personnel"),
TestData(3, "develop")).toDS()
val emp3 = emp1.join(emp2, emp1("key") === emp2("key")).select(emp1("*"))
assertAmbiguousSelfJoin(emp1.join(emp3, emp1.col("key") === emp3.col("key"),
"left_outer").select(emp1.col("*"), emp3.col("key").as("e2")))
}
test("df.show() should also not change dataset_id of LogicalPlan") {
val df = Seq[TestData](
TestData(1, "sales"),
TestData(2, "personnel"),
TestData(3, "develop"),
TestData(4, "IT")).toDF()
val ds_id1 = df.logicalPlan.getTagValue(Dataset.DATASET_ID_TAG)
df.show(0)
val ds_id2 = df.logicalPlan.getTagValue(Dataset.DATASET_ID_TAG)
assert(ds_id1 === ds_id2)
}
test("SPARK-34200: ambiguous column reference should consider attribute availability") {
withTable("t") {
sql("CREATE TABLE t USING json AS SELECT 1 a, 2 b")
val df1 = spark.table("t")
val df2 = df1.select("a")
checkAnswer(df1.join(df2, df1("b") === 2), Row(1, 2, 1))
}
}
test("SPARK-35454: __dataset_id and __col_position should be correctly set") {
val ds = Seq[TestData](
TestData(1, "sales"),
TestData(2, "personnel"),
TestData(3, "develop"),
TestData(4, "IT")).toDS()
var dsIdSetOpt = ds.logicalPlan.getTagValue(Dataset.DATASET_ID_TAG)
assert(dsIdSetOpt.get.size === 1)
var col1DsId = -1L
val col1 = ds.col("key")
col1.expr.foreach {
case a: AttributeReference =>
col1DsId = a.metadata.getLong(Dataset.DATASET_ID_KEY)
assert(dsIdSetOpt.get.contains(col1DsId))
assert(a.metadata.getLong(Dataset.COL_POS_KEY) === 0)
}
val df = ds.toDF()
dsIdSetOpt = df.logicalPlan.getTagValue(Dataset.DATASET_ID_TAG)
assert(dsIdSetOpt.get.size === 2)
var col2DsId = -1L
val col2 = df.col("key")
col2.expr.foreach {
case a: AttributeReference =>
col2DsId = a.metadata.getLong(Dataset.DATASET_ID_KEY)
assert(dsIdSetOpt.get.contains(a.metadata.getLong(Dataset.DATASET_ID_KEY)))
assert(a.metadata.getLong(Dataset.COL_POS_KEY) === 0)
}
assert(col1DsId !== col2DsId)
}
test("SPARK-35454: fail ambiguous self join - toDF") {
val df1 = spark.range(3).toDF()
val df2 = df1.filter($"id" > 0).toDF()
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2, df1.col("id") > df2.col("id")))
}
}
test("SPARK-35454: fail ambiguous self join - join four tables") {
val df1 = spark.range(3).select($"id".as("a"), $"id".as("b"))
val df2 = df1.filter($"a" > 0).select("b")
val df3 = df1.filter($"a" <= 2).select("b")
val df4 = df1.filter($"b" <= 2)
val df5 = spark.range(1)
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "false",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
// `df2("b") < df4("b")` is always false
checkAnswer(df1.join(df2).join(df3).join(df4, df2("b") < df4("b")), Nil)
// `df2("b")` actually points to the column of `df1`.
checkAnswer(
df1.join(df2).join(df5).join(df4).select(df2("b")),
Seq(0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2).map(Row(_)))
// `df5("id")` is not ambiguous.
checkAnswer(
df1.join(df5).join(df3).select(df5("id")),
Seq(0, 0, 0, 0, 0, 0, 0, 0, 0).map(Row(_)))
// Alias the dataframe and use qualified column names can fix ambiguous self-join.
val aliasedDf1 = df1.alias("w")
val aliasedDf2 = df2.as("x")
val aliasedDf3 = df3.as("y")
val aliasedDf4 = df3.as("z")
checkAnswer(
aliasedDf1.join(aliasedDf2).join(aliasedDf3).join(aliasedDf4, $"x.b" < $"y.b"),
Seq(Row(0, 0, 1, 2, 0), Row(0, 0, 1, 2, 1), Row(0, 0, 1, 2, 2),
Row(1, 1, 1, 2, 0), Row(1, 1, 1, 2, 1), Row(1, 1, 1, 2, 2),
Row(2, 2, 1, 2, 0), Row(2, 2, 1, 2, 1), Row(2, 2, 1, 2, 2)))
checkAnswer(
aliasedDf1.join(df5).join(aliasedDf3).select($"y.b"),
Seq(0, 0, 0, 1, 1, 1, 2, 2, 2).map(Row(_)))
}
withSQLConf(
SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED.key -> "true",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assertAmbiguousSelfJoin(df1.join(df2).join(df3).join(df4, df2("b") < df4("b")))
assertAmbiguousSelfJoin(df1.join(df2).join(df5).join(df4).select(df2("b")))
}
}
test("SPARK-36874: DeduplicateRelations should copy dataset_id tag " +
"to avoid ambiguous self join") {
// Test for Project
val df1 = Seq((1, 2, "A1"), (2, 1, "A2")).toDF("key1", "key2", "value")
val df2 = df1.filter($"value" === "A2")
assertAmbiguousSelfJoin(df1.join(df2, df1("key1") === df2("key2")))
assertAmbiguousSelfJoin(df2.join(df1, df1("key1") === df2("key2")))
// Test for SerializeFromObject
val df3 = spark.sparkContext.parallelize(1 to 10).map(x => (x, x)).toDF
val df4 = df3.filter($"_1" <=> 0)
assertAmbiguousSelfJoin(df3.join(df4, df3("_1") === df4("_2")))
assertAmbiguousSelfJoin(df4.join(df3, df3("_1") === df4("_2")))
// Test For Aggregate
val df5 = df1.groupBy($"key1").agg(count($"value") as "count")
val df6 = df5.filter($"key1" > 0)
assertAmbiguousSelfJoin(df5.join(df6, df5("key1") === df6("count")))
assertAmbiguousSelfJoin(df6.join(df5, df5("key1") === df6("count")))
// Test for MapInPandas
val mapInPandasUDF = PythonUDF("mapInPandasUDF", null,
StructType(Seq(StructField("x", LongType), StructField("y", LongType))),
Seq.empty,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
true)
val df7 = df1.mapInPandas(mapInPandasUDF)
val df8 = df7.filter($"x" > 0)
assertAmbiguousSelfJoin(df7.join(df8, df7("x") === df8("y")))
assertAmbiguousSelfJoin(df8.join(df7, df7("x") === df8("y")))
// Test for FlatMapGroupsInPandas
val flatMapGroupsInPandasUDF = PythonUDF("flagMapGroupsInPandasUDF", null,
StructType(Seq(StructField("x", LongType), StructField("y", LongType))),
Seq.empty,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
true)
val df9 = df1.groupBy($"key1").flatMapGroupsInPandas(flatMapGroupsInPandasUDF)
val df10 = df9.filter($"x" > 0)
assertAmbiguousSelfJoin(df9.join(df10, df9("x") === df10("y")))
assertAmbiguousSelfJoin(df10.join(df9, df9("x") === df10("y")))
// Test for FlatMapCoGroupsInPandas
val flatMapCoGroupsInPandasUDF = PythonUDF("flagMapCoGroupsInPandasUDF", null,
StructType(Seq(StructField("x", LongType), StructField("y", LongType))),
Seq.empty,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
true)
val df11 = df1.groupBy($"key1").flatMapCoGroupsInPandas(
df1.groupBy($"key2"), flatMapCoGroupsInPandasUDF)
val df12 = df11.filter($"x" > 0)
assertAmbiguousSelfJoin(df11.join(df12, df11("x") === df12("y")))
assertAmbiguousSelfJoin(df12.join(df11, df11("x") === df12("y")))
// Test for AttachDistributedSequence
val df13 = df1.withSequenceColumn("seq")
val df14 = df13.filter($"value" === "A2")
assertAmbiguousSelfJoin(df13.join(df14, df13("key1") === df14("key2")))
assertAmbiguousSelfJoin(df14.join(df13, df13("key1") === df14("key2")))
// Test for Generate
// Ensure that the root of the plan is Generate
val df15 = Seq((1, Seq(1, 2, 3))).toDF("a", "intList").select($"a", explode($"intList"))
.queryExecution.optimizedPlan.find(_.isInstanceOf[Generate]).get.toDF
val df16 = df15.filter($"a" > 0)
assertAmbiguousSelfJoin(df15.join(df16, df15("a") === df16("col")))
assertAmbiguousSelfJoin(df16.join(df15, df15("a") === df16("col")))
// Test for Expand
// Ensure that the root of the plan is Expand
val df17 =
Expand(
Seq(Seq($"key1".expr, $"key2".expr)),
Seq(
AttributeReference("x", IntegerType)(),
AttributeReference("y", IntegerType)()),
df1.queryExecution.logical).toDF
val df18 = df17.filter($"x" > 0)
assertAmbiguousSelfJoin(df17.join(df18, df17("x") === df18("y")))
assertAmbiguousSelfJoin(df18.join(df17, df17("x") === df18("y")))
// Test for Window
val dfWithTS = spark.sql("SELECT timestamp'2021-10-15 01:52:00' time, 1 a, 2 b")
// Ensure that the root of the plan is Window
val df19 = WindowPlan(
Seq(Alias(dfWithTS("time").expr, "ts")()),
Seq(dfWithTS("a").expr),
Seq(SortOrder(dfWithTS("a").expr, Ascending)),
dfWithTS.queryExecution.logical).toDF
val df20 = df19.filter($"a" > 0)
assertAmbiguousSelfJoin(df19.join(df20, df19("a") === df20("b")))
assertAmbiguousSelfJoin(df20.join(df19, df19("a") === df20("b")))
// Test for ScriptTransformation
val ioSchema =
ScriptInputOutputSchema(
Seq(("TOK_TABLEROWFORMATFIELD", ","),
("TOK_TABLEROWFORMATCOLLITEMS", "#"),
("TOK_TABLEROWFORMATMAPKEYS", "@"),
("TOK_TABLEROWFORMATNULL", "null"),
("TOK_TABLEROWFORMATLINES", "\\n")),
Seq(("TOK_TABLEROWFORMATFIELD", ","),
("TOK_TABLEROWFORMATCOLLITEMS", "#"),
("TOK_TABLEROWFORMATMAPKEYS", "@"),
("TOK_TABLEROWFORMATNULL", "null"),
("TOK_TABLEROWFORMATLINES", "\\n")), None, None,
List.empty, List.empty, None, None, false)
// Ensure that the root of the plan is ScriptTransformation
val df21 = ScriptTransformation(
"cat",
Seq(
AttributeReference("x", IntegerType)(),
AttributeReference("y", IntegerType)()),
df1.queryExecution.logical,
ioSchema).toDF
val df22 = df21.filter($"x" > 0)
assertAmbiguousSelfJoin(df21.join(df22, df21("x") === df22("y")))
assertAmbiguousSelfJoin(df22.join(df21, df21("x") === df22("y")))
}
test("SPARK-35937: GetDateFieldOperations should skip unresolved nodes") {
withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
val df = Seq("1644821603").map(i => (i.toInt, i)).toDF("tsInt", "tsStr")
val df1 = df.select(df("tsStr").cast("timestamp")).as("df1")
val df2 = df.select(df("tsStr").cast("timestamp")).as("df2")
df1.join(df2, $"df1.tsStr" === $"df2.tsStr", "left_outer")
val df3 = df1.join(df2, $"df1.tsStr" === $"df2.tsStr", "left_outer")
.select($"df1.tsStr".as("timeStr")).as("df3")
// Before the fix, it throws "UnresolvedException: Invalid call to
// dataType on unresolved object".
val ex = intercept[AnalysisException](
df3.join(df1, year($"df1.timeStr") === year($"df3.tsStr"))
)
assert(ex.message.contains("Column 'df1.timeStr' does not exist."))
}
}
}
| ueshin/apache-spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameSelfJoinSuite.scala | Scala | apache-2.0 | 19,529 |
package coursier.publish.sonatype.logger
trait SonatypeLogger {
def listingProfiles(attempt: Int, total: Int): Unit = ()
def listedProfiles(errorOpt: Option[Throwable]): Unit = ()
}
object SonatypeLogger {
val nop: SonatypeLogger =
new SonatypeLogger {}
}
| alexarchambault/coursier | modules/publish/src/main/scala/coursier/publish/sonatype/logger/SonatypeLogger.scala | Scala | apache-2.0 | 270 |
package com.sksamuel.elastic4s.http.locks
import com.sksamuel.elastic4s.http._
import com.sksamuel.elastic4s.locks.{AcquireGlobalLock, ReleaseGlobalLock}
trait LocksHandlers {
implicit object AcquireGlobalLockHandler extends Handler[AcquireGlobalLock, Boolean] {
val endpoint = "/fs/lock/global/_create"
override def responseHandler: ResponseHandler[Boolean] = new ResponseHandler[Boolean] {
override def handle(response: HttpResponse) = Right(response.statusCode == 201)
}
override def build(request: AcquireGlobalLock): ElasticRequest =
ElasticRequest("PUT", endpoint)
}
implicit object ReleaseGlobalLockHandler extends Handler[ReleaseGlobalLock, Boolean] {
override def responseHandler: ResponseHandler[Boolean] = new ResponseHandler[Boolean] {
override def handle(response: HttpResponse) = Right(response.statusCode == 200)
}
override def build(request: ReleaseGlobalLock): ElasticRequest =
ElasticRequest("DELETE", "/fs/lock/global")
}
}
| Tecsisa/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/locks/LocksHandlers.scala | Scala | apache-2.0 | 1,012 |
/**
* Copyright 2014 Getty Images, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.swagger.akka.samples
import javax.ws.rs.Path
import com.github.swagger.akka._
import com.github.swagger.akka.model.{Contact, Info, License}
import akka.actor.{ActorRefFactory, ActorSystem}
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.http.scaladsl.model.StatusCodes.OK
import akka.http.scaladsl.model.{HttpEntity, HttpResponse}
import akka.http.scaladsl.server.{Directives, Route}
import io.swagger.annotations._
case class Dog(breed: String)
class NestedService(system: ActorSystem) { self =>
val swaggerService = new SwaggerHttpService {
override val apiClasses: Set[Class[_]] = Set(Dogs.getClass)
override val host = "some.domain.com"
override val basePath = "api-doc"
override val unwantedDefinitions = Seq("Function1", "Function1RequestContextFutureRouteResult")
override val info: Info = Info(
description = "Dogs love APIs",
version = "1.0",
title = "Test API Service",
termsOfService = "Lenient",
contact = Some(Contact("Lassie", "http://lassie.com", "lassie@tvland.com")),
license = Some(License("Apache", "http://license.apache.com"))
)
implicit def actorRefFactory: ActorRefFactory = system
}
@Api(value = "/dogs")
@Path(value = "/dogs")
object Dogs extends Directives {
implicit def actorRefFactory: ActorRefFactory = self.system
@ApiOperation(value = "List all of the dogs",
notes = "Dogs are identified by unique strings",
response = classOf[ListReply[Dog]],
httpMethod = "GET",
nickname = "getDogs")
@ApiResponses(
Array(new ApiResponse(code = 200, message = "OK"),
new ApiResponse(code = 404, message = "Dog not found"),
new ApiResponse(code = 500, message = "Internal Server Error")))
def getDogs = path("dogs") {
complete("dogs")
}
@ApiOperation(value = "Options for dogs", notes = "dog notes", response = classOf[Void], httpMethod = "OPTIONS")
@ApiResponses(Array(new ApiResponse(code = 200, message = "OK")))
def optionsRoute: Route = (path("dogs") & options) {
complete(HttpResponse(OK, entity = HttpEntity.empty(`application/json`)))
}
}
}
| helloscala/helloscala | hs-swagger-akka-http/src/test/scala/com/github/swagger/akka/samples/DogsHttpService.scala | Scala | apache-2.0 | 2,853 |
/**
* Created by hanxue on 1/14/14.
*/
import sbt._
import Keys._
object GoogleCloudEngine extends Plugin {
override lazy val settings = Seq(commands ++= Seq(login, listinstances, setproject, setaccount, listcomponents, configlist, configset) )
def login = Command.command("login") { state =>
println("OAuth login to Google Cloud Engine")
val cmd = Seq("gcloud", "auth", "login")
sbt.Process(cmd) ! logger
state
}
def listinstances = Command.command("listinstances") { state =>
println("Listing available GCE machines")
val cmd = Seq("gcutil", "listinstances")
// cmd.lines ! logger
sbt.Process(cmd) ! logger
state
}
def setproject = Command.args("set-project", "<project-name>") { (state, args) =>
val cmd = Seq("gcloud", "config", "set", "project", args(0))
sbt.Process(cmd) ! logger
state
}
def setaccount = Command.args("set-account", "<account-name>") { (state, args) =>
val cmd = Seq("gcloud", "config", "set", "account", args(0))
sbt.Process(cmd) ! logger
state
}
def listcomponents = Command.command("listcomponents") { state =>
sbt.Process(Seq("gcloud", "components", "list")) ! logger
state
}
def configlist = Command.command("components") { state =>
sbt.Process(Seq("gcloud", "config", "list")) ! logger
state
}
def configset = Command.args("set-config", "<option> <value>") { (state, args) =>
val cmd = Seq("gcloud", "config", "set", args(0), args(1))
sbt.Process(cmd) ! logger
state
}
val logger = new sbt.ProcessLogger {
def info(info: => String) = println(info)
def error(err: => String) = { println(err) }
def buffer[T](f: => T) = f
}
}
| hanxue/sbt-cloudengine | src/main/scala/GoogleCloudEngine.scala | Scala | bsd-2-clause | 1,701 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.util.exceptions
class IllegalFileException(msg: String) extends Exception(msg) {}
| hochgi/CM-Well | server/cmwell-util/src/main/scala/cmwell/util/exceptions/IllegalFileException.scala | Scala | apache-2.0 | 714 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package noop.interpreter;
import com.google.inject.Inject
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.ArrayBuffer
import noop.inject.Injector
import noop.interpreter.testing.TestFailedException
import noop.model._
import noop.types._
/**
* @author alexeagle@google.com (Alex Eagle)
* @author tocman@gmail.com (Jeremie Lenfant-Engelmann)
*/
class InterpreterVisitor @Inject() (val context: Context, injector: Injector,
booleanFactory: BooleanFactory, stringFactory: StringFactory, integerFactory: IntegerFactory)
extends Visitor {
val logger: Logger = LoggerFactory.getLogger(this.getClass());
def visit(assignmentExpression: AssignmentExpression) = {
val currentFrame = context.stack.top;
val identifier = assignmentExpression.lhs.asInstanceOf[IdentifierExpression].identifier;
val obj = currentFrame.lastEvaluated(1);
currentFrame.lastEvaluated.clear();
if (obj == null) {
throw new RuntimeException("cannot assign Void");
}
currentFrame.blockScopes.setValue(identifier, Tuple(null, obj));
}
def visit(block: Block) = {
context.stack.top.lastEvaluated.clear();
}
def visit(booleanLiteralExpression: BooleanLiteralExpression) = {
context.stack.top.lastEvaluated += booleanFactory.create(booleanLiteralExpression.value);
}
def visit(dereferenceExpression: DereferenceExpression) = {
val rhs = context.stack.top.lastEvaluated.pop;
val lhs = context.stack.top.lastEvaluated.pop;
val property = rhs.asInstanceOf[NoopString].value;
val result = lhs.asInstanceOf[NoopObject].propertyMap.get(property) match {
case Some(v) => v;
case None => throw new RuntimeException("No such property " + property);
}
context.stack.top.lastEvaluated += result;
}
def visit(evaluatedExpression: EvaluatedExpression) = {
context.stack.top.lastEvaluated += evaluatedExpression.value;
}
def visit(identifierDeclarationExpression: IdentifierDeclarationExpression) = {
val currentFrame = context.stack.top;
if (currentFrame.lastEvaluated.isEmpty) {
throw new RuntimeException("The right handside didn't evaluate to a proper value");
}
val obj = currentFrame.lastEvaluated.top;
logger.trace("identifierExpression of {} found an initial value {}",
identifierDeclarationExpression.name, obj);
currentFrame.lastEvaluated.clear();
currentFrame.addIdentifier(identifierDeclarationExpression.name,
new Tuple2[NoopType, NoopObject](null, obj));
}
def visit(identifierExpression: IdentifierExpression) = {
val currentFrame = context.stack.top;
val identifier = identifierExpression.identifier;
logger.info("Visiting ID expr: {}", identifier);
if (identifier == "this") {
currentFrame.lastEvaluated += currentFrame.thisRef;
} else if (currentFrame.blockScopes.hasIdentifier(identifier)) {
currentFrame.lastEvaluated += currentFrame.blockScopes.getIdentifier(identifier)._2;
} else if (currentFrame.thisRef.propertyMap.contains(identifier)) {
currentFrame.lastEvaluated += currentFrame.thisRef.propertyMap(identifier);
} else {
currentFrame.lastEvaluated += stringFactory.create(identifier);
}
}
def visit(intLiteralExpression: IntLiteralExpression) = {
context.stack.top.lastEvaluated += integerFactory.create(intLiteralExpression.value);
}
var evaluationStackSize = -1;
def enter(methodInvocationExpression: MethodInvocationExpression) = {
evaluationStackSize = context.stack.top.lastEvaluated.size;
}
def afterArgumentVisit(methodInvocationExpression: MethodInvocationExpression) = {
if (context.stack.top.lastEvaluated.size > evaluationStackSize) {
evaluationStackSize = context.stack.top.lastEvaluated.size;
} else {
throw new RuntimeException("Argument to method " + methodInvocationExpression.name +
" evaluated to Void");
}
}
def visit(methodInvocationExpression: MethodInvocationExpression) = {
val methodInvocationEvaluator = new MethodInvocationEvaluator(methodInvocationExpression, this);
methodInvocationEvaluator.execute(context);
evaluationStackSize = -1;
}
def visit(method: Method) = {
if (method.modifiers.contains(Modifier.native)) {
val obj = context.stack.top.thisRef;
val arguments = new ArrayBuffer[NoopObject];
for (parameter <- method.parameters) {
arguments += context.stack.top.blockScopes.getIdentifier(parameter.name)._2;
}
val returnValue = obj.executeNativeMethod(arguments, method.name);
context.stack.top.lastEvaluated += returnValue;
} else {
method.block.accept(this);
}
}
def visit(operatorExpression: OperatorExpression) = {
}
def visit(returnExpression: ReturnExpression) = {
}
def visit(shouldExpression: ShouldExpression) = {
val lastEvaluated = context.stack.top.lastEvaluated;
val actual = lastEvaluated(0);
val expected = lastEvaluated(1);
if (actual != expected) {
throw new TestFailedException("expected " + actual + " to equal " + expected);
}
context.stack.top.lastEvaluated.clear();
}
def visit(stringLiteralExpression: StringLiteralExpression) = {
context.stack.top.lastEvaluated += stringFactory.create(stringLiteralExpression.value);
}
def visit(whileLoop: WhileLoop) = {
if (context.stack.top.lastEvaluated(0).asInstanceOf[NoopBoolean].value) {
context.stack.top.blockScopes.inScope("while loop") {
whileLoop.body.accept(this);
}
whileLoop.accept(this);
}
context.stack.top.lastEvaluated.clear();
}
def visit(bindingDeclaration: BindingDeclaration) = {
val boundValue = context.stack.top.lastEvaluated.top;
// TODO(alexeagle): collect up all the declarations and make one new child fixture
// fixture.addBinding(bindingDeclaration.noopType, boundValue);
context.stack.top.lastEvaluated.clear();
}
def visit(conditionalAndExpression: ConditionalAndExpression) = {
}
def visit(conditionalOrExpression: ConditionalOrExpression) = {
}
}
| masterx2/noop | interpreter/src/main/scala/noop/interpreter/InterpreterVisitor.scala | Scala | apache-2.0 | 6,689 |
package example
import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import scala.concurrent.Future
import scala.concurrent.duration._
import akka.{ NotUsed, Done }
object Throttling extends App {
implicit val sys = ActorSystem("akka-stream-patterns")
implicit val mat = ActorMaterializer()
import sys.dispatcher
def writeBatchToDatabase(batch: Seq[Int]): Future[Unit] =
Future {
println(s"Writing batch of $batch to database by ${Thread.currentThread().getName}")
}
Source(1 to 1000000)
.grouped(10)
.throttle(elements = 10, per = 1.second, maximumBurst = 10, ThrottleMode.shaping)
.mapAsync(10)(writeBatchToDatabase)
.runWith(Sink.ignore)
}
| diegopacheco/scala-playground | akka-streams-patterns/src/main/scala/example/Throttling.scala | Scala | unlicense | 720 |
package com.github.myyk.cracking
import com.github.myyk.cracking.Chapter16Solutions.AntGrid.{
AntGridResult,
Direction
}
import com.github.myyk.cracking.Chapter16Solutions.{
EnglishIntMaker,
Line,
MasterMindResult,
MutableInteger,
Person,
Point,
TicTacToe,
WordFrequencies,
bestLine,
countDivingBoardsOfKPieces,
countDivingBoardsOfSize,
divide,
factorialZeroes,
isWonTicTacToe,
livingPeople,
livingPeopleBruteForce,
masterMindScore,
masterMindScore2,
multiply,
numberMax,
smallestDifference,
subtract,
_
}
import java.lang.{Integer => JInt}
import scala.jdk.CollectionConverters._
import scala.util.Random
import org.scalatest._
import flatspec._
import matchers._
class Chapter16SolutionsTest extends AnyFlatSpec with should.Matchers {
"swapInPlace" should "swap the two integers without using additional space" in {
val originalA = 123456
val originalB = 67890
val a = new MutableInteger(originalA)
val b = new MutableInteger(originalB)
Chapter16Solutions.swapInPlace(a, b)
a.value shouldBe originalB
b.value shouldBe originalA
}
"WordFrequencies" should "be able to get a word's frequency from a given text" in {
val wordFreqs =
new WordFrequencies("a: b c d, å åå e f g aaa' a a a -aaa- a")
wordFreqs.getFrequency("apple") shouldBe 0
wordFreqs.getFrequency("a") shouldBe 5
wordFreqs.getFrequency("A") shouldBe 5
wordFreqs.getFrequency("aaa") shouldBe 2
wordFreqs.getFrequency("c") shouldBe 1
wordFreqs.getFrequency("å") shouldBe 1
wordFreqs.getFrequency("åå") shouldBe 1
}
"isWonTicTacToe" should "figure out if a board is won or not already" in {
// not super proud of these tests, but they will do for now.
var board = TicTacToe.newBoard
isWonTicTacToe(board) shouldBe false
board(0)(0) = TicTacToe.X
isWonTicTacToe(board) shouldBe false
board(0)(1) = TicTacToe.O
isWonTicTacToe(board) shouldBe false
board(0)(2) = TicTacToe.X
isWonTicTacToe(board) shouldBe false
board(0)(1) = TicTacToe.X
isWonTicTacToe(board) shouldBe true
board = TicTacToe.newBoard
isWonTicTacToe(board) shouldBe false
board(0)(0) = TicTacToe.X
board(1)(1) = TicTacToe.X
board(2)(2) = TicTacToe.X
isWonTicTacToe(board) shouldBe true
board = TicTacToe.newBoard
isWonTicTacToe(board) shouldBe false
board(0)(2) = TicTacToe.X
board(1)(2) = TicTacToe.X
board(2)(2) = TicTacToe.X
isWonTicTacToe(board) shouldBe true
board = TicTacToe.newBoard
isWonTicTacToe(board) shouldBe false
board(0)(0) = TicTacToe.X
board(1)(1) = TicTacToe.X
board(2)(2) = TicTacToe.X
isWonTicTacToe(board) shouldBe true
board = TicTacToe.newBoard
isWonTicTacToe(board) shouldBe false
board(0)(2) = TicTacToe.X
board(1)(1) = TicTacToe.X
board(2)(0) = TicTacToe.X
isWonTicTacToe(board) shouldBe true
}
"factorialZeroes" should "get the number of trailing 0s of n!" in {
for (i <- 0 to 4) {
factorialZeroes(i) shouldBe 0
}
for (i <- 5 to 9) {
factorialZeroes(i) shouldBe 1
}
for (i <- 10 to 14) {
factorialZeroes(i) shouldBe 2
}
factorialZeroes(25) shouldBe 6
factorialZeroes(50) shouldBe 12
}
"smallestDifference" should "find the smallest difference between any two numbers in the arrays" in {
smallestDifference(
Array(1, 3, 15, 11, 2),
Array(23, 127, 235, 19, 8)
) shouldBe 3
smallestDifference(
Array(1, 3, 15, 2),
Array(23, 127, 235, 19, 8)
) shouldBe 4
smallestDifference(Array(1, 3, 15, 2), Array(23, 127, 235, 3, 8)) shouldBe 0
smallestDifference(Array(1), Array(23, 127, 235, 312, 8)) shouldBe 7
}
def testNumberMax(a: Int, b: Int): Unit = {
numberMax(a, b) shouldBe (a max b)
numberMax(b, a) shouldBe (a max b)
}
"numberMax" should "find the max between two numbers" in {
testNumberMax(0, 0)
testNumberMax(1, 0)
testNumberMax(-1, 0)
testNumberMax(-1, -2)
testNumberMax(123, 321)
testNumberMax(Int.MaxValue - 1, Int.MaxValue)
testNumberMax(Int.MinValue + 1, Int.MaxValue)
}
"englishInt" should "get a word representation of an integer" in {
val maker = new EnglishIntMaker()
maker.englishInt(0) shouldBe "Zero"
maker.englishInt(1000) shouldBe "One Thousand"
maker.englishInt(100) shouldBe "One Hundred"
maker.englishInt(101) shouldBe "One Hundred One"
maker.englishInt(1234) shouldBe "One Thousand, Two Hundred Thirty Four"
maker.englishInt(
-1234
) shouldBe "Negative One Thousand, Two Hundred Thirty Four"
maker.englishInt(
9341234
) shouldBe "Nine Million, Three Hundred Forty One Thousand, Two Hundred Thirty Four"
maker.englishInt(
Int.MaxValue
) shouldBe "Two Billion, One Hundred Forty Seven Million, Four Hundred Eighty Three Thousand, Six Hundred Forty Seven"
maker.englishInt(
Int.MinValue
) shouldBe "Negative Two Billion, One Hundred Forty Seven Million, Four Hundred Eighty Three Thousand, Six Hundred Forty Eight"
maker.englishInt(
Int.MinValue + 1
) shouldBe "Negative Two Billion, One Hundred Forty Seven Million, Four Hundred Eighty Three Thousand, Six Hundred Forty Seven"
}
def testOperations(a: Int, b: Int): Unit = {
testOperationsHelper(a, b)
testOperationsHelper(b, a)
}
def testOperationsHelper(a: Int, b: Int): Unit = {
subtract(a, b) shouldBe (a - b)
if (a.abs < 10000 && b.abs < 10000) {
multiply(a, b) shouldBe (a * b)
}
if (b != 0 && a.abs < 10000) {
divide(a, b) shouldBe (a / b)
}
}
"operations" should "do subtraction, multiplication and division properly" in {
testOperations(0, 0)
testOperations(1, 0)
testOperations(-1, 1)
testOperations(-1, 0)
testOperations(Integer.MAX_VALUE, 2)
testOperations(Integer.MIN_VALUE + 1, 2)
testOperations(123, 32)
testOperations(123, -32)
testOperations(Integer.MAX_VALUE, Integer.MAX_VALUE)
testOperations(Integer.MIN_VALUE, Integer.MIN_VALUE)
for (_ <- 1 to 100) {
testOperations(Random.nextInt(), Random.nextInt())
}
}
def testLivingPeople(people: Set[Person]): Unit = {
livingPeople(people.asJava) shouldBe livingPeopleBruteForce(people.asJava)
}
"livingPeople" should "return the year where the most people were living" in {
livingPeople(
Set(new Person(1900, 2000), new Person(1910, 1910)).asJava
) shouldBe 1910
val people = for (_: Int <- (1 to 10000).toSet) yield {
val birth = 1900 + Random.nextInt(100)
new Person(birth, birth + Random.nextInt(100))
}
testLivingPeople(people)
}
"countDivingBoardsOfKPieces" should "return the number of ways to build a diving board of with k boards" in {
countDivingBoardsOfKPieces(5, 10, 4).asScala.toSet shouldBe Set(
20,
25,
30,
35,
40
)
countDivingBoardsOfKPieces(3, 7, 4).asScala.toSet shouldBe Set(
12,
16,
20,
24,
28
)
countDivingBoardsOfKPieces(10, 10, 4).asScala.toSet shouldBe Set(40)
}
"countDivingBoardsOfSize" should "return the number of ways to build a diving board of size k" in {
countDivingBoardsOfSize(5, 10, 200) shouldBe Chapter7Solutions.coinsCount(
Set(5, 10).map(JInt.valueOf).asJava,
200
)
}
"bestLine" should "find a line that goes through the most points" in {
bestLine(Set(new Point(1, 1), new Point(2, 2)).asJava) shouldBe new Line(
new Point(1, 1),
new Point(2, 2)
)
bestLine(Set(new Point(1, 1), new Point(3, 3)).asJava) shouldBe new Line(
new Point(1, 1),
new Point(2, 2)
)
bestLine(Set(new Point(0, 1), new Point(0, 3)).asJava) shouldBe new Line(
new Point(0, 3),
new Point(0, 3)
)
bestLine(
Set(
new Point(1, 1),
new Point(2, 2),
new Point(3, 3),
new Point(0, 1),
new Point(0, 3)
).asJava
) shouldBe new Line(new Point(0, 0), new Point(1, 1))
}
"masterMindScore" should "compute the game score" in {
masterMindScore("RGBY", "GGRR") shouldBe new MasterMindResult(1, 1)
masterMindScore2("RGBY", "GGRR") shouldBe new MasterMindResult(1, 1)
}
"subSortIndexes" should "give the indexes of the minimum subarray to get a sorted array" in {
subSortIndexes(
Array(1, 2, 4, 7, 10, 11, 7, 12, 6, 7, 16, 18, 19)
) shouldBe (3, 9)
subSortIndexes(
Array(1, 2, 4, 7, 10, 11, 7, 12, 7, 7, 16, 18, 19)
) shouldBe (4, 9)
subSortIndexes(
Array(1, 2, 4, 7, 10, 11, 7, 12, 7, 7, 16, 18, 5)
) shouldBe (3, 12)
subSortIndexes(
Array(3, 2, 4, 7, 10, 11, 7, 12, 6, 7, 16, 18, 19)
) shouldBe (0, 9)
}
def testMaxContiguousSequenceSum(array: Array[Int], expected: Int): Unit = {
maxContiguousSequenceSum(array) shouldBe expected
maxContiguousSequenceSum2(array) shouldBe expected
}
"maxContiguousSequenceSum" should "get the maximum sum of a contiguous subarray" in {
testMaxContiguousSequenceSum(Array(2, -8, 3, -2, 4, -10), 5)
testMaxContiguousSequenceSum(Array(-10), -10)
testMaxContiguousSequenceSum(Array(-10, -2), -2)
testMaxContiguousSequenceSum(Array(-2, -10), -2)
}
"doesPatternMatch" should "determine if the pattern matches the value" in {
doesPatternMatch("", "catcatgocatgo") shouldBe false
doesPatternMatch("a", "catcatgocatgo") shouldBe true
doesPatternMatch("b", "catcatgocatgo") shouldBe true
doesPatternMatch("ab", "catcatgocatgo") shouldBe true
doesPatternMatch("aabab", "catcatgocatgo") shouldBe true
doesPatternMatch("aabac", "catcatgocatgo") shouldBe false
}
"findPondSizes" should "find the sizes of the various ponds in the topography" in {
findPondSizes(Array(Array(1))).asScala.toSet shouldBe Set()
findPondSizes(Array(Array(0))).asScala.toSet shouldBe Set(1)
findPondSizes(Array(Array(0, 0), Array(0, 0))).asScala.toSet shouldBe Set(4)
findPondSizes(
Array(
Array(0, 2, 1, 0),
Array(0, 1, 0, 1),
Array(1, 1, 0, 1),
Array(0, 1, 0, 1)
)
).asScala.toSet shouldBe Set(1, 2, 4)
}
def testSumSwap(a: Array[Int], b: Array[Int]): Unit = {
val result = sumSwap(a, b)
val swappedA = -result(0) :: result(1) :: a.toList
val swappedB = result(0) :: -result(1) :: b.toList
swappedA.sum shouldBe swappedB.sum
}
"sumSwap" should "try to find integers to swap to get arrays of the same sum" in {
testSumSwap(Array(4, 1, 2, 1, 1, 2), Array(3, 6, 3, 3))
}
def printAntWalk(grid: AntGridResult): Unit = {
println(
s"ant = (${grid.ant._1}, ${grid.ant._2}), direction = ${grid.direction}"
)
for {
col <- grid.isBlack.indices
row <- grid.isBlack(0).indices
} {
if (grid.ant._1 == col && grid.ant._2 == row) {
if (grid.isBlack(col)(row)) {
print('X')
} else {
print('O')
}
} else if (grid.isBlack(col)(row)) {
print('x')
} else {
print('o')
}
if (row == grid.isBlack(0).length - 1) {
println()
}
}
}
"antWalk" should "walk the ant according to it's rules and return the result" in {
// Useful to manually validate, but too verbose otherwise.
// val grid = new AntGrid()
// for (i <- 0 to 20) {
// println(s"------ k = ${i} ${"-"*25}")
// printAntWalk(grid.getResult)
// grid.moveAnt()
// }
val expectedArray = Array(
Array(false, false, true),
Array(true, true, true),
Array(true, true, false)
)
antWalk(10) shouldBe new AntGridResult(
(0, 0),
expectedArray,
Direction.Left
)
}
"rand7" should "return random numbers 0 until 7" in {
val values = for {
_ <- 0 until 1000
} yield {
rand7()
}
// println(values.groupBy { a => a }.map{case(a, b) => (a, b.size)}.toList.sortBy{case (a, b) => a})
values.toSet shouldBe (0 until 7).toSet
}
"findPairsWithSum" should "find all the pairs with the sum in the array" in {
findPairsWithSum(
Array(2, -3, 5, -7, 8, -1, 0, 1),
1
).asScala.toMap shouldBe Map(-1 -> 2, -7 -> 8, 0 -> 1)
}
"LRUCache" should "work like a LRU cache with a max size" in {
val cache = new LRUCache[Integer, Integer](5)
// test basic fill
for (i <- 1 to 5) {
cache.put(i, i) shouldBe null
cache.containsKey(i) shouldBe true
cache.get(i) shouldBe i
}
// test evict all
for (i <- 6 to 10) {
cache.put(i, i) shouldBe null
cache.containsKey(i) shouldBe true
cache.get(i) shouldBe i
}
for (i <- 1 to 5) {
cache.containsKey(i) shouldBe false
}
// test update beginning
cache.put(6, 8)
cache.get(6) shouldBe 8
for (i <- 7 to 10) {
cache.containsKey(i) shouldBe true
cache.get(i) shouldBe i
}
cache.get(6) shouldBe 8
cache.put(6, 6)
// test update end
cache.put(10, 11)
cache.get(10) shouldBe 11
for (i <- 6 to 9) {
cache.containsKey(i) shouldBe true
cache.get(i) shouldBe i
}
cache.get(10) shouldBe 11
}
def testCalculate(expression: String, result: Double): Unit = {
calculate(expression) shouldBe result
calculate2(expression) shouldBe result
}
"calculate" should "compute the value of an expression" in {
testCalculate("2", 2)
testCalculate("2*3", 6)
testCalculate("3/2", 1.5)
testCalculate("3+2", 5)
testCalculate("2-3", -1)
testCalculate("2*3+5/6*3+15", 23.5)
testCalculate("1/0", Double.PositiveInfinity)
testCalculate("2*2-5/0", Double.NegativeInfinity)
testCalculate("2*2-5/0+20", Double.NegativeInfinity)
}
}
| myyk/cracking-the-coding-interview-6th | src/test/scala/com/github/myyk/cracking/Chapter16SolutionsTest.scala | Scala | apache-2.0 | 13,782 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.planner.logical
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans.{Apply, AllNodesScan, IdName}
import org.neo4j.cypher.internal.compiler.v2_3.planner.{CardinalityEstimation, PlannerQuery}
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
class LogicalPlanIdentificationBuilderTest extends CypherFunSuite {
val solved = CardinalityEstimation.lift(PlannerQuery.empty, Cardinality(1))
test("plan is put into ID map") {
val A = AllNodesScan(IdName("a"), Set.empty)(solved)
val map = LogicalPlanIdentificationBuilder(A)
map.keys.toList should equal(List(A))
map.values.toList should equal(map.values.toList.distinct) // Ids must be unique
map.values shouldNot contain(null)
}
test("plan and it's children are identified") {
val A = AllNodesScan(IdName("a"), Set.empty)(solved)
val B = AllNodesScan(IdName("b"), Set.empty)(solved)
val AB = Apply(A, B)(solved)
val map = LogicalPlanIdentificationBuilder(AB)
map.keys.toSet should equal(Set(A, B, AB))
map.values.toList should equal(map.values.toList.distinct)
map.values shouldNot contain(null)
}
test("plan and decedents") {
val A = AllNodesScan(IdName("a"), Set.empty)(solved)
val B = AllNodesScan(IdName("b"), Set.empty)(solved)
val AB = Apply(A, B)(solved)
val C = AllNodesScan(IdName("c"), Set.empty)(solved)
val D = AllNodesScan(IdName("d"), Set.empty)(solved)
val CD = Apply(C, D)(solved)
val ABCD = Apply(AB, CD)(solved)
val map = LogicalPlanIdentificationBuilder(ABCD)
map.keys.toSet should equal(Set(A, B, C, D, AB, CD, ABCD))
map.values.toList should equal(map.values.toList.distinct)
map.values shouldNot contain(null)
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/planner/logical/LogicalPlanIdentificationBuilderTest.scala | Scala | apache-2.0 | 2,583 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.BlockManagerId
/**
* Low-level task scheduler interface, currently implemented exclusively by TaskSchedulerImpl.
* This interface allows plugging in different task schedulers. Each TaskScheduler schedules tasks
* for a single SparkContext. These schedulers get sets of tasks submitted to them from the
* DAGScheduler for each stage, and are responsible for sending the tasks to the cluster, running
* them, retrying if there are failures, and mitigating stragglers. They return events to the
* DAGScheduler.
*/
private[spark] trait TaskScheduler {
private val appId = "spark-application-" + System.currentTimeMillis
def rootPool: Pool
def schedulingMode: SchedulingMode
def start(): Unit
// Invoked after system has successfully initialized (typically in spark context).
// Yarn uses this to bootstrap allocation of resources based on preferred locations,
// wait for slave registrations, etc.
def postStartHook() { }
// Disconnect from the cluster.
def stop(): Unit
// Submit a sequence of tasks to run.
def submitTasks(taskSet: TaskSet): Unit
// Cancel a stage.
def cancelTasks(stageId: Int, interruptThread: Boolean)
// Set the DAG scheduler for upcalls. This is guaranteed to be set before submitTasks is called.
def setDAGScheduler(dagScheduler: DAGScheduler): Unit
// Get the default level of parallelism to use in the cluster, as a hint for sizing jobs.
def defaultParallelism(): Int
/**
* Update metrics for in-progress tasks and let the master know that the BlockManager is still
* alive. Return true if the driver knows about the given block manager. Otherwise, return false,
* indicating that the block manager should re-register.
*/
def executorHeartbeatReceived(execId: String, taskMetrics: Array[(Long, TaskMetrics)],
blockManagerId: BlockManagerId): Boolean
/**
* Get an application ID associated with the job.
*
* @return An application ID
*/
def applicationId(): String = appId
/**
* Process a lost executor
*/
def executorLost(executorId: String, reason: ExecutorLossReason): Unit
/**
* Get an application's attempt ID associated with the job.
*
* @return An application's Attempt ID
*/
def applicationAttemptId(): Option[String]
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala | Scala | apache-2.0 | 3,250 |
package com.greencatsoft.d3.event
import scala.scalajs.js
import org.scalajs.dom.Node
import com.greencatsoft.d3.selection.Selection
@js.native
trait BehaviorFactory[A <: Node, B <: Selection[A, B]] extends js.Object {
def drag(): Drag[A, B] = js.native
def zoom(): Zoom[A, B] = js.native
} | greencatsoft/scalajs-d3 | src/main/scala/com/greencatsoft/d3/event/BehaviorFactory.scala | Scala | apache-2.0 | 300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.util
import java.io.InputStream
import java.nio.ByteBuffer
import spark.storage.BlockManager
/**
* Reads data from a ByteBuffer, and optionally cleans it up using BlockManager.dispose()
* at the end of the stream (e.g. to close a memory-mapped file).
*/
private[spark]
class ByteBufferInputStream(private var buffer: ByteBuffer, dispose: Boolean = false)
extends InputStream {
override def read(): Int = {
if (buffer == null || buffer.remaining() == 0) {
cleanUp()
-1
} else {
buffer.get() & 0xFF
}
}
override def read(dest: Array[Byte]): Int = {
read(dest, 0, dest.length)
}
override def read(dest: Array[Byte], offset: Int, length: Int): Int = {
if (buffer == null || buffer.remaining() == 0) {
cleanUp()
-1
} else {
val amountToGet = math.min(buffer.remaining(), length)
buffer.get(dest, offset, amountToGet)
amountToGet
}
}
override def skip(bytes: Long): Long = {
if (buffer != null) {
val amountToSkip = math.min(bytes, buffer.remaining).toInt
buffer.position(buffer.position + amountToSkip)
if (buffer.remaining() == 0) {
cleanUp()
}
amountToSkip
} else {
0L
}
}
/**
* Clean up the buffer, and potentially dispose of it using BlockManager.dispose().
*/
private def cleanUp() {
if (buffer != null) {
if (dispose) {
BlockManager.dispose(buffer)
}
buffer = null
}
}
}
| wgpshashank/spark | core/src/main/scala/spark/util/ByteBufferInputStream.scala | Scala | apache-2.0 | 2,294 |
package spoiwo.model
import spoiwo.model.enums.{CellFill, CellHorizontalAlignment, CellReadingOrder, CellVerticalAlignment}
object CellStyle {
val Default: CellStyle = CellStyle()
def apply(
borders: CellBorders = null,
dataFormat: CellDataFormat = null,
font: Font = null,
fillPattern: CellFill = null,
fillForegroundColor: Color = null,
fillBackgroundColor: Color = null,
readingOrder: CellReadingOrder = null,
horizontalAlignment: CellHorizontalAlignment = null,
verticalAlignment: CellVerticalAlignment = null,
hidden: java.lang.Boolean = null,
indention: java.lang.Integer = null,
locked: java.lang.Boolean = null,
rotation: java.lang.Integer = null,
wrapText: java.lang.Boolean = null
): CellStyle =
CellStyle(
borders = Option(borders),
dataFormat = Option(dataFormat),
font = Option(font),
fillPattern = Option(fillPattern),
fillForegroundColor = Option(fillForegroundColor),
fillBackgroundColor = Option(fillBackgroundColor),
readingOrder = Option(readingOrder),
horizontalAlignment = Option(horizontalAlignment),
verticalAlignment = Option(verticalAlignment),
hidden = Option(hidden).map(_.booleanValue),
indention = Option(indention).map(_.shortValue),
locked = Option(locked).map(_.booleanValue),
rotation = Option(rotation).map(_.shortValue),
wrapText = Option(wrapText).map(_.booleanValue)
)
}
case class CellStyle private (
borders: Option[CellBorders],
dataFormat: Option[CellDataFormat],
font: Option[Font],
fillPattern: Option[CellFill],
fillForegroundColor: Option[Color],
fillBackgroundColor: Option[Color],
readingOrder: Option[CellReadingOrder],
horizontalAlignment: Option[CellHorizontalAlignment],
verticalAlignment: Option[CellVerticalAlignment],
hidden: Option[Boolean],
indention: Option[Short],
locked: Option[Boolean],
rotation: Option[Short],
wrapText: Option[Boolean]
) {
override def toString: String =
"CellStyle(" + List(
borders.map("borders=" + _),
dataFormat.map("data format=" + _),
font.map("font=" + _),
fillPattern.map("fill pattern=" + _),
fillForegroundColor.map("fill foreground color=" + _),
fillBackgroundColor.map("fill background color=" + _),
readingOrder.map("reading order=" + _),
horizontalAlignment.map("horizontal alignment=" + _),
verticalAlignment.map("vertical alignment=" + _),
hidden.map("hidden=" + _),
indention.map("indention=" + _),
locked.map("locked=" + _),
rotation.map("rotation=" + _),
wrapText.map("wrap text=" + _)
).flatten.mkString(", ") + ")"
def withBorders(borders: CellBorders): CellStyle =
copy(borders = Option(borders))
def withoutBorders: CellStyle =
copy(borders = None)
def withDataFormat(dataFormat: CellDataFormat): CellStyle =
copy(dataFormat = Option(dataFormat))
def withoutDataFormat: CellStyle =
copy(dataFormat = None)
def withFont(font: Font): CellStyle =
copy(font = Option(font))
def withoutFont: CellStyle =
copy(font = None)
def withFillPattern(fillPattern: CellFill): CellStyle =
copy(fillPattern = Option(fillPattern))
def withoutFillPattern: CellStyle =
copy(fillPattern = None)
def withFillForegroundColor(fillForegroundColor: Color): CellStyle =
copy(fillForegroundColor = Option(fillForegroundColor))
def withoutFillForegroundColor: CellStyle =
copy(fillForegroundColor = None)
def withFillBackgroundColor(fillBackgroundColor: Color): CellStyle =
copy(fillBackgroundColor = Option(fillBackgroundColor))
def withoutFillBackgroundColor: CellStyle =
copy(fillBackgroundColor = None)
def withReadingOrder(readingOrder: CellReadingOrder): CellStyle =
copy(readingOrder = Option(readingOrder))
def withoutReadingOrder: CellStyle =
copy(readingOrder = None)
def withHorizontalAlignment(horizontalAlignment: CellHorizontalAlignment): CellStyle =
copy(horizontalAlignment = Option(horizontalAlignment))
def withoutHorizontalAlignment: CellStyle =
copy(horizontalAlignment = None)
def withVerticalAlignment(verticalAlignment: CellVerticalAlignment): CellStyle =
copy(verticalAlignment = Option(verticalAlignment))
def withoutVerticalAlignment: CellStyle =
copy(verticalAlignment = None)
def withHidden: CellStyle =
copy(hidden = Some(true))
def withoutHidden: CellStyle =
copy(hidden = Some(false))
def withIndention(indention: Short): CellStyle =
copy(indention = Option(indention))
def withoutIndention: CellStyle =
copy(indention = None)
def withLocked: CellStyle =
copy(locked = Some(true))
def withoutLocked: CellStyle =
copy(locked = Some(false))
def withRotation(rotation: Short): CellStyle =
copy(rotation = Option(rotation))
def withoutRotation: CellStyle =
copy(rotation = None)
def withWrapText: CellStyle =
copy(wrapText = Some(true))
def withoutWrapText: CellStyle =
copy(wrapText = Some(false))
private def dw[T](current: Option[T], default: Option[T]): Option[T] =
if (current.isDefined) current else default
private def defaultFont(defaultCellStyle: CellStyle): Option[Font] =
if (defaultCellStyle.font.isEmpty) {
font
} else if (font.isEmpty) {
defaultCellStyle.font
} else {
Option(font.get.defaultWith(defaultCellStyle.font.get))
}
def defaultWith(defaultCellStyle: CellStyle): CellStyle = CellStyle(
borders = dw(borders, defaultCellStyle.borders),
dataFormat = dw(dataFormat, defaultCellStyle.dataFormat),
font = defaultFont(defaultCellStyle),
fillPattern = dw(fillPattern, defaultCellStyle.fillPattern),
fillForegroundColor = dw(fillForegroundColor, defaultCellStyle.fillForegroundColor),
fillBackgroundColor = dw(fillBackgroundColor, defaultCellStyle.fillBackgroundColor),
readingOrder = dw(readingOrder, defaultCellStyle.readingOrder),
horizontalAlignment = dw(horizontalAlignment, defaultCellStyle.horizontalAlignment),
verticalAlignment = dw(verticalAlignment, defaultCellStyle.verticalAlignment),
hidden = dw(hidden, defaultCellStyle.hidden),
indention = dw(indention, defaultCellStyle.indention),
locked = dw(locked, defaultCellStyle.locked),
rotation = dw(rotation, defaultCellStyle.rotation),
wrapText = dw(wrapText, defaultCellStyle.wrapText)
)
}
| norbert-radyk/spoiwo | core/src/main/scala/spoiwo/model/CellStyle.scala | Scala | mit | 6,509 |
package com.zkay
/*
* The substitution model of evaluation (pg 30, listing 1.12)
*/
object Listing1_12 {
def f(a: Int) = sum_of_squares(a + 1, a * 2)
def sum_of_squares(a: Int, b: Int) = square(a) + square(b)
def square(a: Int) = a * a
/*
f(5) is the same as..
sum_of_squares(5 + 1, 5 * 2) is the same as..
square(6) + square(10) is the same as..
6*6+10*10 is the same as..
36+100 is the same as..
136
*/
} | zkay/bookclub_notes | FunctionalAndReactiveDomainModeling/chapter1/src/main/scala/Listing1_12.scala | Scala | apache-2.0 | 434 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.net.InetAddress
import java.nio.ByteBuffer
import java.util.Collections
import java.util.concurrent._
import com.yammer.metrics.core.Gauge
import kafka.api.{ControlledShutdownRequest, RequestOrResponse}
import kafka.metrics.KafkaMetricsGroup
import kafka.server.QuotaId
import kafka.utils.{Logging, NotNothing}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.InvalidRequestException
import org.apache.kafka.common.network.{ListenerName, Send}
import org.apache.kafka.common.protocol.{ApiKeys, Protocol, SecurityProtocol}
import org.apache.kafka.common.record.{RecordBatch, MemoryRecords}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.Time
import org.apache.log4j.Logger
import scala.reflect.ClassTag
object RequestChannel extends Logging {
val AllDone = Request(processor = 1, connectionId = "2", Session(KafkaPrincipal.ANONYMOUS, InetAddress.getLocalHost),
buffer = shutdownReceive, startTimeNanos = 0, listenerName = new ListenerName(""),
securityProtocol = SecurityProtocol.PLAINTEXT)
private val requestLogger = Logger.getLogger("kafka.request.logger")
private def shutdownReceive: ByteBuffer = {
val emptyProduceRequest = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, 0, 0,
Collections.emptyMap[TopicPartition, MemoryRecords]).build()
val emptyRequestHeader = new RequestHeader(ApiKeys.PRODUCE.id, emptyProduceRequest.version, "", 0)
emptyProduceRequest.serialize(emptyRequestHeader)
}
case class Session(principal: KafkaPrincipal, clientAddress: InetAddress) {
val sanitizedUser = QuotaId.sanitize(principal.getName)
}
case class Request(processor: Int, connectionId: String, session: Session, private var buffer: ByteBuffer,
startTimeNanos: Long, listenerName: ListenerName, securityProtocol: SecurityProtocol) {
// These need to be volatile because the readers are in the network thread and the writers are in the request
// handler threads or the purgatory threads
@volatile var requestDequeueTimeNanos = -1L
@volatile var apiLocalCompleteTimeNanos = -1L
@volatile var responseCompleteTimeNanos = -1L
@volatile var responseDequeueTimeNanos = -1L
@volatile var apiRemoteCompleteTimeNanos = -1L
@volatile var recordNetworkThreadTimeCallback: Option[Long => Unit] = None
val requestId = buffer.getShort()
// TODO: this will be removed once we remove support for v0 of ControlledShutdownRequest (which
// depends on a non-standard request header)
val requestObj: RequestOrResponse = if (requestId == ApiKeys.CONTROLLED_SHUTDOWN_KEY.id)
ControlledShutdownRequest.readFrom(buffer)
else
null
// if we failed to find a server-side mapping, then try using the
// client-side request / response format
val header: RequestHeader =
if (requestObj == null) {
buffer.rewind
try RequestHeader.parse(buffer)
catch {
case ex: Throwable =>
throw new InvalidRequestException(s"Error parsing request header. Our best guess of the apiKey is: $requestId", ex)
}
} else
null
val bodyAndSize: RequestAndSize =
if (requestObj == null)
try {
// For unsupported version of ApiVersionsRequest, create a dummy request to enable an error response to be returned later
if (header.apiKey == ApiKeys.API_VERSIONS.id && !Protocol.apiVersionSupported(header.apiKey, header.apiVersion)) {
new RequestAndSize(new ApiVersionsRequest.Builder().build(), 0)
}
else
AbstractRequest.getRequest(header.apiKey, header.apiVersion, buffer)
} catch {
case ex: Throwable =>
throw new InvalidRequestException(s"Error getting request for apiKey: ${header.apiKey} and apiVersion: ${header.apiVersion}", ex)
}
else
null
buffer = null
def requestDesc(details: Boolean): String = {
if (requestObj != null)
requestObj.describe(details)
else
s"$header -- ${body[AbstractRequest].toString(details)}"
}
def body[T <: AbstractRequest](implicit classTag: ClassTag[T], nn: NotNothing[T]): T = {
bodyAndSize.request match {
case r: T => r
case r =>
throw new ClassCastException(s"Expected request with type ${classTag.runtimeClass}, but found ${r.getClass}")
}
}
trace("Processor %d received request : %s".format(processor, requestDesc(true)))
def requestThreadTimeNanos = {
if (apiLocalCompleteTimeNanos == -1L) apiLocalCompleteTimeNanos = Time.SYSTEM.nanoseconds
math.max(apiLocalCompleteTimeNanos - requestDequeueTimeNanos, 0L)
}
def updateRequestMetrics(networkThreadTimeNanos: Long) {
val endTimeNanos = Time.SYSTEM.nanoseconds
// In some corner cases, apiLocalCompleteTimeNanos may not be set when the request completes if the remote
// processing time is really small. This value is set in KafkaApis from a request handling thread.
// This may be read in a network thread before the actual update happens in KafkaApis which will cause us to
// see a negative value here. In that case, use responseCompleteTimeNanos as apiLocalCompleteTimeNanos.
if (apiLocalCompleteTimeNanos < 0)
apiLocalCompleteTimeNanos = responseCompleteTimeNanos
// If the apiRemoteCompleteTimeNanos is not set (i.e., for requests that do not go through a purgatory), then it is
// the same as responseCompleteTimeNans.
if (apiRemoteCompleteTimeNanos < 0)
apiRemoteCompleteTimeNanos = responseCompleteTimeNanos
def nanosToMs(nanos: Long) = math.max(TimeUnit.NANOSECONDS.toMillis(nanos), 0)
val requestQueueTime = nanosToMs(requestDequeueTimeNanos - startTimeNanos)
val apiLocalTime = nanosToMs(apiLocalCompleteTimeNanos - requestDequeueTimeNanos)
val apiRemoteTime = nanosToMs(apiRemoteCompleteTimeNanos - apiLocalCompleteTimeNanos)
val apiThrottleTime = nanosToMs(responseCompleteTimeNanos - apiRemoteCompleteTimeNanos)
val responseQueueTime = nanosToMs(responseDequeueTimeNanos - responseCompleteTimeNanos)
val responseSendTime = nanosToMs(endTimeNanos - responseDequeueTimeNanos)
val totalTime = nanosToMs(endTimeNanos - startTimeNanos)
val fetchMetricNames =
if (requestId == ApiKeys.FETCH.id) {
val isFromFollower = body[FetchRequest].isFromFollower
Seq(
if (isFromFollower) RequestMetrics.followFetchMetricName
else RequestMetrics.consumerFetchMetricName
)
}
else Seq.empty
val metricNames = fetchMetricNames :+ ApiKeys.forId(requestId).name
metricNames.foreach { metricName =>
val m = RequestMetrics.metricsMap(metricName)
m.requestRate.mark()
m.requestQueueTimeHist.update(requestQueueTime)
m.localTimeHist.update(apiLocalTime)
m.remoteTimeHist.update(apiRemoteTime)
m.throttleTimeHist.update(apiThrottleTime)
m.responseQueueTimeHist.update(responseQueueTime)
m.responseSendTimeHist.update(responseSendTime)
m.totalTimeHist.update(totalTime)
}
// Records network handler thread usage. This is included towards the request quota for the
// user/client. Throttling is only performed when request handler thread usage
// is recorded, just before responses are queued for delivery.
// The time recorded here is the time spent on the network thread for receiving this request
// and sending the response. Note that for the first request on a connection, the time includes
// the total time spent on authentication, which may be significant for SASL/SSL.
recordNetworkThreadTimeCallback.foreach(record => record(networkThreadTimeNanos))
if (requestLogger.isDebugEnabled) {
val detailsEnabled = requestLogger.isTraceEnabled
def nanosToMs(nanos: Long) = TimeUnit.NANOSECONDS.toMicros(math.max(nanos, 0)).toDouble / TimeUnit.MILLISECONDS.toMicros(1)
val totalTimeMs = nanosToMs(endTimeNanos - startTimeNanos)
val requestQueueTimeMs = nanosToMs(requestDequeueTimeNanos - startTimeNanos)
val apiLocalTimeMs = nanosToMs(apiLocalCompleteTimeNanos - requestDequeueTimeNanos)
val apiRemoteTimeMs = nanosToMs(apiRemoteCompleteTimeNanos - apiLocalCompleteTimeNanos)
val apiThrottleTimeMs = nanosToMs(responseCompleteTimeNanos - apiRemoteCompleteTimeNanos)
val responseQueueTimeMs = nanosToMs(responseDequeueTimeNanos - responseCompleteTimeNanos)
val responseSendTimeMs = nanosToMs(endTimeNanos - responseDequeueTimeNanos)
requestLogger.debug("Completed request:%s from connection %s;totalTime:%f,requestQueueTime:%f,localTime:%f,remoteTime:%f,throttleTime:%f,responseQueueTime:%f,sendTime:%f,securityProtocol:%s,principal:%s,listener:%s"
.format(requestDesc(detailsEnabled), connectionId, totalTimeMs, requestQueueTimeMs, apiLocalTimeMs, apiRemoteTimeMs, apiThrottleTimeMs, responseQueueTimeMs, responseSendTimeMs, securityProtocol, session.principal, listenerName.value))
}
}
}
object Response {
def apply(request: Request, responseSend: Send): Response = {
require(request != null, "request should be non null")
require(responseSend != null, "responseSend should be non null")
new Response(request, Some(responseSend), SendAction)
}
def apply(request: Request, response: AbstractResponse): Response = {
require(request != null, "request should be non null")
require(response != null, "response should be non null")
apply(request, response.toSend(request.connectionId, request.header))
}
}
case class Response(request: Request, responseSend: Option[Send], responseAction: ResponseAction) {
request.responseCompleteTimeNanos = Time.SYSTEM.nanoseconds
if (request.apiLocalCompleteTimeNanos == -1L) request.apiLocalCompleteTimeNanos = Time.SYSTEM.nanoseconds
def processor: Int = request.processor
}
trait ResponseAction
case object SendAction extends ResponseAction
case object NoOpAction extends ResponseAction
case object CloseConnectionAction extends ResponseAction
}
class RequestChannel(val numProcessors: Int, val queueSize: Int) extends KafkaMetricsGroup {
private var responseListeners: List[(Int) => Unit] = Nil
private val requestQueue = new ArrayBlockingQueue[RequestChannel.Request](queueSize)
private val responseQueues = new Array[BlockingQueue[RequestChannel.Response]](numProcessors)
for(i <- 0 until numProcessors)
responseQueues(i) = new LinkedBlockingQueue[RequestChannel.Response]()
newGauge(
"RequestQueueSize",
new Gauge[Int] {
def value = requestQueue.size
}
)
newGauge("ResponseQueueSize", new Gauge[Int]{
def value = responseQueues.foldLeft(0) {(total, q) => total + q.size()}
})
for (i <- 0 until numProcessors) {
newGauge("ResponseQueueSize",
new Gauge[Int] {
def value = responseQueues(i).size()
},
Map("processor" -> i.toString)
)
}
/** Send a request to be handled, potentially blocking until there is room in the queue for the request */
def sendRequest(request: RequestChannel.Request) {
requestQueue.put(request)
}
/** Send a response back to the socket server to be sent over the network */
def sendResponse(response: RequestChannel.Response) {
responseQueues(response.processor).put(response)
for(onResponse <- responseListeners)
onResponse(response.processor)
}
/** Get the next request or block until specified time has elapsed */
def receiveRequest(timeout: Long): RequestChannel.Request =
requestQueue.poll(timeout, TimeUnit.MILLISECONDS)
/** Get the next request or block until there is one */
def receiveRequest(): RequestChannel.Request =
requestQueue.take()
/** Get a response for the given processor if there is one */
def receiveResponse(processor: Int): RequestChannel.Response = {
val response = responseQueues(processor).poll()
if (response != null)
response.request.responseDequeueTimeNanos = Time.SYSTEM.nanoseconds
response
}
def addResponseListener(onResponse: Int => Unit) {
responseListeners ::= onResponse
}
def shutdown() {
requestQueue.clear()
}
}
object RequestMetrics {
val metricsMap = new scala.collection.mutable.HashMap[String, RequestMetrics]
val consumerFetchMetricName = ApiKeys.FETCH.name + "Consumer"
val followFetchMetricName = ApiKeys.FETCH.name + "Follower"
(ApiKeys.values().toList.map(e => e.name)
++ List(consumerFetchMetricName, followFetchMetricName)).foreach(name => metricsMap.put(name, new RequestMetrics(name)))
}
class RequestMetrics(name: String) extends KafkaMetricsGroup {
val tags = Map("request" -> name)
val requestRate = newMeter("RequestsPerSec", "requests", TimeUnit.SECONDS, tags)
// time a request spent in a request queue
val requestQueueTimeHist = newHistogram("RequestQueueTimeMs", biased = true, tags)
// time a request takes to be processed at the local broker
val localTimeHist = newHistogram("LocalTimeMs", biased = true, tags)
// time a request takes to wait on remote brokers (currently only relevant to fetch and produce requests)
val remoteTimeHist = newHistogram("RemoteTimeMs", biased = true, tags)
// time a request is throttled (only relevant to fetch and produce requests)
val throttleTimeHist = newHistogram("ThrottleTimeMs", biased = true, tags)
// time a response spent in a response queue
val responseQueueTimeHist = newHistogram("ResponseQueueTimeMs", biased = true, tags)
// time to send the response to the requester
val responseSendTimeHist = newHistogram("ResponseSendTimeMs", biased = true, tags)
val totalTimeHist = newHistogram("TotalTimeMs", biased = true, tags)
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/network/RequestChannel.scala | Scala | apache-2.0 | 14,813 |
package com.vwo.oldmonk
package object diffs extends ProvidesLongestCommonSubsequence
| wingify/Oldmonk | src/main/scala/com/vwo/oldmonk/diffs/package.scala | Scala | gpl-3.0 | 87 |
/**
* Copyright 2016 Rad Gruchalski (radek@gruchalski.com)
* Licensed under the Apache License, Version 2.0.
*/
package uk.co.appministry.scathon.models.v2
import uk.co.appministry.scathon.models.util.EnumUtils
import uk.co.appministry.scathon.models.v2.util.VersionUtils
import org.joda.time.{DateTime, DateTimeZone}
trait MarathonApiObject
object EventTypes extends Enumeration {
type EventType = Value
val api_post_event = Value("api_post_event")
val status_update_event = Value("status_update_event")
val framework_message_event = Value("framework_message_event")
val subscribe_event = Value("subscribe_event")
val unsubscribe_event = Value("unsubscribe_event")
val add_health_check_event = Value("add_health_check_event")
val remove_health_check_event = Value("remove_health_check_event")
val failed_health_check_event = Value("failed_health_check_event")
val health_status_changed_event = Value("health_status_changed_event")
val unhealthy_task_kill_event = Value("unhealthy_task_kill_event")
val group_change_success = Value("group_change_success")
val group_change_failed = Value("group_change_failed")
val deployment_success = Value("deployment_success")
val deployment_failed = Value("deployment_failed")
val deployment_info = Value("deployment_info")
val deployment_step_success = Value("deployment_step_success")
val deployment_step_failure = Value("deployment_step_failure")
}
object StatusUpdateEventTypes extends Enumeration {
type StatusUpdateEventType = Value
val TASK_STAGING = Value("TASK_STAGING")
val TASK_STARTING = Value("TASK_STARTING")
val TASK_RUNNING = Value("TASK_RUNNING")
val TASK_FINISHED = Value("TASK_FINISHED")
val TASK_FAILED = Value("TASK_FAILED")
val TASK_KILLED = Value("TASK_KILLED")
val TASK_LOST = Value("TASK_LOST")
}
object PortMappingTypes extends Enumeration {
type PortMappingType = Value
val TCP = Value("tcp")
val UDP = Value("udp")
}
object ProtocolTypes extends Enumeration {
type ProtocolType = Value
val HTTP = Value("HTTP")
val HTTPS = Value("HTTPS")
}
object DockerNetworkTypes extends Enumeration {
type DockerNetworkType = Value
val BRIDGE = Value("BRIDGE")
val HOST = Value("HOST")
}
object ContainerTypes extends Enumeration {
type ContainerType = Value
val DOCKER = Value("DOCKER")
val MESOS = Value("MESOS")
}
object DeploymentActionTypes extends Enumeration {
type DeploymentActionType = Value
val START_APPLICATION = Value("StartApplication")
val STOP_APPLICATION = Value("StopApplication")
val SCALE_APPLICATION = Value("ScaleApplication")
val RESTART_APPLICATION = Value("RestartApplication")
val RESOLVE_ARTIFACTS = Value("ResolveArtifacts")
val KILL_ALL_OLD_TASKS_OF = Value("KillAllOldTasksOf")
}
trait EnumParser {
implicit val enumEventTypesFormat = EnumUtils.enumFormat(EventTypes)
implicit val enumStatusUpdateEventTypesFormat = EnumUtils.enumFormat(StatusUpdateEventTypes)
implicit val enumPortMappingTypesFormat = EnumUtils.enumFormat(PortMappingTypes)
implicit val enumProtocolTypesFormat = EnumUtils.enumFormat(ProtocolTypes)
implicit val enumDockerNetworkTypesFormat = EnumUtils.enumFormat(DockerNetworkTypes)
implicit val enumContainerTypesFormat = EnumUtils.enumFormat(ContainerTypes)
implicit val enumDeploymentActionTypesFormat = EnumUtils.enumFormat(DeploymentActionTypes)
}
object Version {
def apply():DateTime = {
new DateTime(DateTimeZone.UTC)
}
def apply(dt: DateTime):String = {
VersionUtils.format.print(dt)
}
def apply(value: String): DateTime = {
VersionUtils.format.parseDateTime(value)
}
}
trait VersionParser {
implicit val versionFormat = VersionUtils.dateTimeformat
} | AppMinistry/scathon | scathon-models/src/main/scala/uk/co/appministry/scathon/models/v2/RestApi.Shared.scala | Scala | apache-2.0 | 3,710 |
/*
Copyright 2016 ScalABM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.economicsl.agora.markets.auctions.pricing
import org.economicsl.agora.markets.tradables.{LimitPrice, Price}
import org.economicsl.agora.markets.tradables.orders.{Order, Persistent}
/** Class defining a discriminatory pricing rule that computes the price as a weighted average.
*
* @param weight the weight placed on the limit `Price` of the incoming `Order` when computing the `Price`.
* @tparam I the incoming `Order` type.
* @tparam E the existing `Order with Persistent` type.
*/
class WeightedAveragePricingRule[-I <: Order with LimitPrice, -E <: Order with LimitPrice with Persistent](weight: Double)
extends DiscriminatoryPricingRule[I, E] {
require(0 <= weight && weight <= 1, "Price must be individually rational!")
def apply(incoming: I, existing: E): Price = {
Price(weight * incoming.limit.value + (1 - weight) * existing.limit.value)
}
}
/** Companion object for the `WeightedAveragePricingRule` class.
*
* Provides auxiliary constructor.
*/
object WeightedAveragePricingRule {
def apply[I <: Order with LimitPrice, E <: Order with LimitPrice with Persistent]
(weight: Double): WeightedAveragePricingRule[I, E] = {
new WeightedAveragePricingRule[I, E](weight)
}
} | EconomicSL/agora | src/main/scala/org/economicsl/agora/markets/auctions/pricing/WeightedAveragePricingRule.scala | Scala | apache-2.0 | 1,797 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.tailhq.dynaml.prototype
/**
* Trait which outlines basic behavior
* of a subset utility measure.
*/
trait Measure[T] {
def evaluate(data: List[T]): Double
}
| mandar2812/DynaML | dynaml-core/src/main/scala/io/github/tailhq/dynaml/prototype/Measure.scala | Scala | apache-2.0 | 982 |
/*
* Copyright 2017 Ahmad Mozafarnia
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ir.bama.models
import java.time.LocalDateTime
import ir.bama.models.CarCategory.CarCategory
import ir.bama.models.CarChassis.CarChassis
import ir.bama.models.CarDifferential.CarDifferential
import ir.bama.models.CarFuelType.CarFuelType
import ir.bama.models.CarGearBox.CarGearBox
import ir.bama.models.CarStatus.CarStatus
import ir.bama.models.PaymentPeriod.PaymentPeriod
import ir.bama.models.PaymentType.PaymentType
import ir.bama.models.SellAdStatus.SellAdStatus
import ir.bama.utils.{Dates, Enums}
import play.api.libs.json._
/**
* @author ahmad
*/
case class SellAd(id: Option[Long], seller: Option[Seller[_]], city: Option[City],
venue: String, phoneNumber: Option[String],
submissionDates: Option[Seq[LocalDateTime]], lastSubmissionDate: LocalDateTime,
count: Int, soldCount: Int, status: SellAdStatus,
payment: Payment, car: Car, stats: Option[SellAdStats]) extends Identifiable
object SellAd {
implicit val dateFormat: Format[LocalDateTime] = Dates.dateFormat
implicit val format: OFormat[SellAd] = Json.format[SellAd]
}
abstract class Payment(val `type`: PaymentType) {
val initialPrice: Long
val finalPrice: Long
}
object Payment {
private val reads: Reads[Payment] = Reads[Payment] {
case o: JsObject =>
(o \\ "type").toOption.map {
PaymentType.format.reads(_) flatMap {
case PaymentType.CREDIT => CreditPayment.format.reads(o)
case PaymentType.INSTALLMENT => InstallmentPayment.format.reads(o)
}
} getOrElse JsError("Could not find property: type")
case _ => JsError("Json object expected")
}
private val writes: Writes[Payment] = Writes[Payment] { o =>
(o match {
case x: CreditPayment => Json.toJson(x)(CreditPayment.format).as[JsObject] + ("initialPrice" -> JsNumber(x.initialPrice))
case x: InstallmentPayment => Json.toJson(x)(InstallmentPayment.format).as[JsObject] ++ Json.obj(
"initialPrice" -> JsNumber(x.initialPrice),
"finalPrice" -> JsNumber(x.finalPrice)
)
}).as[JsObject] + ("type" -> Json.toJson(o.`type`))
}
implicit val format: Format[Payment] = Format(reads, writes)
}
object PaymentType extends Enumeration {
val CREDIT = Value("CREDIT")
val INSTALLMENT = Value("INSTALLMENT")
type PaymentType = Value
implicit val format: Format[PaymentType] = Enums.enumFormat(PaymentType)
}
object PaymentPeriod extends Enumeration {
val DAILY = new Period("DAILY", 1)
val WEEKLY = new Period("WEEKLY", 7)
val MONTHLY = new Period("MONTHLY", 30)
type PaymentPeriod = Value
protected class Period(name: String, factor: Int) extends Val(name) {
def toDays(count: Int): Int = count * factor
}
implicit def valueToPeriod(value: Value): Period = value.asInstanceOf[Period]
implicit val format: Format[PaymentPeriod] = Enums.enumFormat(PaymentPeriod)
}
case class CreditPayment(finalPrice: Long) extends Payment(PaymentType.CREDIT) {
override val initialPrice: Long = finalPrice
}
object CreditPayment {
implicit val format: OFormat[CreditPayment] = Json.format[CreditPayment]
}
case class InstallmentPayment(prepayments: Option[Seq[Long]], period: PaymentPeriod, ticks: Int,
numberOfInstallments: Int, amountPerInstallment: Long)
extends Payment(PaymentType.INSTALLMENT) {
override lazy val initialPrice: Long = prepayments.map(_.sum).getOrElse(0L)
override lazy val finalPrice: Long = initialPrice + numberOfInstallments * amountPerInstallment
}
object InstallmentPayment {
implicit val format: OFormat[InstallmentPayment] = Json.format[InstallmentPayment]
def apply(ip: Long, fp: Long, period: PaymentPeriod, ticks: Int,
numberOfInstallments: Int, amountPerInstallment: Long): InstallmentPayment =
new InstallmentPayment(None, period, ticks, numberOfInstallments, amountPerInstallment) {
override lazy val initialPrice: Long = ip
override lazy val finalPrice: Long = fp
}
}
object SellAdStatus extends Enumeration {
val SUBMITTED = Value("SUBMITTED")
val RESUBMITTED = Value("RESUBMITTED")
val CANCELLED = Value("CANCELLED")
val SOLD_OUT = Value("SOLD_OUT")
type SellAdStatus = Value
implicit val format: Format[SellAdStatus] = Enums.enumFormat(SellAdStatus)
}
case class Car(model: Option[CarModel], year: Int,
chassis: CarChassis, differential: CarDifferential,
category: CarCategory, status: CarStatus, mileage: Int,
gearBox: CarGearBox, fuelType: CarFuelType,
bodyDescription: String, bodyColor: String, cabinColor: String,
photos: Option[Seq[String]])
object Car {
implicit val format: OFormat[Car] = Json.format[Car]
}
case class CarBrand(id: Option[Long], name: String)
object CarBrand {
implicit val format: OFormat[CarBrand] = Json.format[CarBrand]
def id(id: Long) = CarBrand(Some(id), null)
}
case class CarModel(id: Option[Long], brand: Option[CarBrand], name: String)
object CarModel {
implicit val format: OFormat[CarModel] = Json.format[CarModel]
def id(id: Long) = CarModel(Some(id), None, null)
}
object CarChassis extends Enumeration {
val SEDAN = Value("SEDAN")
val SUV = Value("SUV")
val PICKUP = Value("PICKUP")
val COUPE = Value("COUPE")
val CONVERTIBLE = Value("CONVERTIBLE")
val VAN = Value("VAN")
type CarChassis = Value
implicit val format: Format[CarChassis] = Enums.enumFormat(CarChassis)
}
object CarCategory extends Enumeration {
val NORMAL = Value("NORMAL")
val FREE_ZONE = Value("FREE_ZONE")
val TEMPORARY = Value("TEMPORARY")
val COLLECTIBLE = Value("COLLECTIBLE")
type CarCategory = Value
implicit val format: Format[CarCategory] = Enums.enumFormat(CarCategory)
}
object CarStatus extends Enumeration {
val NEW = Value("NEW")
val USED = Value("USED")
val CARD_INDEX = Value("CARD_INDEX")
val DRAFT = Value("DRAFT")
type CarStatus = Value
implicit val format: Format[CarStatus] = Enums.enumFormat(CarStatus)
}
object CarGearBox extends Enumeration {
val MANUAL = Value("MANUAL")
val AUTO = Value("AUTO")
type CarGearBox = Value
implicit val format: Format[CarGearBox] = Enums.enumFormat(CarGearBox)
}
object CarFuelType extends Enumeration {
val GASOLINE = Value("GASOLINE")
val HYBRID = Value("HYBRID")
val BI_FUEL = Value("BI_FUEL")
val DIESEL = Value("DIESEL")
type CarFuelType = Value
implicit val format: Format[CarFuelType] = Enums.enumFormat(CarFuelType)
}
object CarDifferential extends Enumeration {
val FWD = Value("FWD")
val RWD = Value("RWD")
val `4WD` = Value("4WD")
type CarDifferential = Value
implicit val format: Format[CarDifferential] = Enums.enumFormat(CarDifferential)
}
case class SellAdStats(adId: Long, adViews: Int, phoneNumberViews: Int)
object SellAdStats {
implicit val format: OFormat[SellAdStats] = Json.format[SellAdStats]
}
| ahmadmo/bama-api-demo | app/ir/bama/models/SellAd.scala | Scala | apache-2.0 | 7,547 |
package org.openmole.buildsystem
import sbt._
import scala.util.matching.Regex
/**
* Created with IntelliJ IDEA.
* User: luft
* Date: 6/5/13
* Time: 3:43 PM
*/
object OMKeys {
val bundleType = SettingKey[Set[String]]("bundle-type") //Default setting for osgiprojects is default.
val openMoleStandardVer = SettingKey[String]("openmole-version")
val assemblyPath = SettingKey[File]("assemblyPath", "A setting to control assembly outputs directory.")
val assemblyDependenciesPath = SettingKey[File]("assemblyDependenciesPath", "A setting to control assembly outputs directory for dependencies.")
val install = TaskKey[Unit]("install", "Builds bundles and adds them to the local repo")
val installRemote = TaskKey[Unit]("install-remote", "Builds bundles and adds them to the openmole nexus server")
val assemble = TaskKey[File]("assemble", "The path with assembled project")
val setExecutable = SettingKey[Seq[String]]("setExecutable", "Sets the path relative to the assemble folder executable")
val downloads = SettingKey[Seq[(URL, String)]]("downloads", "A project setting that describes a urls to download")
val resourcesAssemble = TaskKey[Seq[(File, File)]]("resourcesAssemble", "A set of (in,out) tuples that specify where to find the resource (in) and what sub-path of assembly to put it in (out)")
val ignoreTransitive = SettingKey[Boolean]("ignoreTransitive")
val dependencyFilter = SettingKey[(ModuleID, Artifact) ⇒ Boolean]("dependency-filter", "Tells copyDependencies to ignore certain dependencies.")
val dependencyName = SettingKey[ModuleID ⇒ String]("dependency-map", "A map that is run against dependencies to be copied.")
val scalatestVersion = SettingKey[String]("scalatest-version", "Version of scalatest.")
val junitVersion = SettingKey[String]("junit-version", "Version of junit.")
object Osgi {
val singleton = SettingKey[Boolean]("osgi-singleton")
val openMOLEScope = SettingKey[Seq[String]]("openmole-scope")
val bundleDependencies = TaskKey[Seq[File]]("bundle-dependencies")
}
}
| openmole/openmole | build-system/src/main/scala/org/openmole/buildsystem/OMKeys.scala | Scala | agpl-3.0 | 2,073 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.dataload
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
/**
* Test Class for table block size
*
*/
class TestTableLevelBlockSize extends QueryTest with BeforeAndAfterAll{
val testData1 = s"$resourcesPath/dimSample.csv"
val testData2 = s"$resourcesPath/example-data.csv"
override def beforeAll {
sql("DROP TABLE IF EXISTS table_blocksize1")
sql("DROP TABLE IF EXISTS table_blocksize2")
sql("DROP TABLE IF EXISTS table_blocksize3")
}
test("Value test: set table level blocksize value beyong [1,2048]") {
try {
sql(
"""
CREATE TABLE IF NOT EXISTS table_blocksize1
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'
TBLPROPERTIES('table_blocksize'='4096 MB')
""")
assert(false)
} catch {
case e : MalformedCarbonCommandException => {
assert(e.getMessage.equals("Invalid table_blocksize value found: 4096, " +
"only int value from 1 MB to 2048 MB is supported."))
}
}
}
test("Value test: set table level blocksize in not int value") {
try {
sql(
"""
CREATE TABLE IF NOT EXISTS table_blocksize2
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'
TBLPROPERTIES('table_blocksize'='10Y4 MB')
""")
assert(false)
} catch {
case e : MalformedCarbonCommandException => {
assert(e.getMessage.equals("Invalid table_blocksize value found: 10y4, " +
"only int value from 1 MB to 2048 MB is supported."))
}
}
}
test("Function test: set table level blocksize load and agg query") {
sql(
"""
CREATE TABLE IF NOT EXISTS table_blocksize3
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'
TBLPROPERTIES('table_blocksize'='512 MB')
""")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(s"""
LOAD DATA LOCAL INPATH '$testData2' into table table_blocksize3
""")
checkAnswer(
sql("""
SELECT country, count(salary) AS amount
FROM table_blocksize3
WHERE country IN ('china','france')
GROUP BY country
"""),
Seq(Row("china", 849), Row("france", 101))
)
}
override def afterAll {
sql("DROP TABLE IF EXISTS table_blocksize1")
sql("DROP TABLE IF EXISTS table_blocksize2")
sql("DROP TABLE IF EXISTS table_blocksize3")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
}
}
| JihongMA/incubator-carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestTableLevelBlockSize.scala | Scala | apache-2.0 | 4,089 |
/*******************************************************************************
* Copyright (c) 2014 Łukasz Szpakowski.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
******************************************************************************/
package pl.luckboy.purfuncor.frontend.typer
import scalaz._
import scalaz.Scalaz._
import pl.luckboy.purfuncor.frontend.resolver.GlobalSymbol
import pl.luckboy.purfuncor.frontend.resolver.LocalSymbol
trait TypeInferenceEnvironmental[E, L, M, N]
{
def copyEnvironment(env: E): E
def globalVarTypeFromEnvironment(env: E)(sym: GlobalSymbol): Type[N]
def lambdaInfosFromEnvironment(env: E)(sym: Option[GlobalSymbol]): Map[Int, InferenceLambdaInfo[LocalSymbol, GlobalSymbol]]
def getLambdaInfoFromEnvironment(env: E)(lambdaIdx: Int): Option[InferenceLambdaInfo[M, N]]
def globalTypeTableFromEnvironment(env: E): TypeTable[L, N]
def withCurrentCombinatorLocation(env: E)(loc: Option[L]): E
}
| luckboy/Purfuncor | src/main/scala/pl/luckboy/purfuncor/frontend/typer/TypeInferenceEnvironmental.scala | Scala | mpl-2.0 | 1,124 |
package org.apache.spark.carbondata.restructure.vectorreader
import java.math.{BigDecimal, RoundingMode}
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
class AddColumnTestCases extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sqlContext.setConf("carbon.enable.vector.reader", "true")
sql("DROP TABLE IF EXISTS addcolumntest")
sql("drop table if exists hivetable")
sql(
"CREATE TABLE addcolumntest(intField int,stringField string,timestampField timestamp," +
"decimalField decimal(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data4.csv' INTO TABLE addcolumntest " +
s"options('FILEHEADER'='intField,stringField,timestampField,decimalField')")
sql(
"Alter table addcolumntest add columns(charField string) TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='charField', 'DEFAULT.VALUE.charfield'='def')")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE addcolumntest " +
s"options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
}
test("test like query on new column") {
checkAnswer(sql("select charField from addcolumntest where charField like 'd%'"), Row("def"))
}
test("test is not null filter on new column") {
checkAnswer(sql("select charField from addcolumntest where charField is not null"),
Seq(Row("abc"), Row("def")))
}
test("test is null filter on new column") {
checkAnswer(sql("select charField from addcolumntest where charField is null"), Seq())
}
test("test equals filter on new column") {
checkAnswer(sql("select charField from addcolumntest where charField = 'abc'"), Row("abc"))
}
test("test add dictionary column and test greaterthan/lessthan filter on new column") {
sql(
"Alter table addcolumntest add columns(intnewField int) TBLPROPERTIES" +
"('DICTIONARY_INCLUDE'='intnewField', 'DEFAULT.VALUE.intNewField'='5')")
checkAnswer(sql("select charField from addcolumntest where intnewField > 2"),
Seq(Row("abc"), Row("def")))
checkAnswer(sql("select charField from addcolumntest where intnewField < 2"), Seq())
}
test("test add msr column and check aggregate") {
sql(
"alter table addcolumntest add columns(msrField decimal(5,2))TBLPROPERTIES ('DEFAULT.VALUE" +
".msrfield'= '123.45')")
checkAnswer(sql("select sum(msrField) from addcolumntest"),
Row(new BigDecimal("246.90").setScale(2, RoundingMode.HALF_UP)))
}
test("test compaction after adding new column") {
sql("Alter table addcolumntest compact 'major'")
checkExistence(sql("show segments for table addcolumntest"), true, "0Compacted")
checkExistence(sql("show segments for table addcolumntest"), true, "1Compacted")
checkExistence(sql("show segments for table addcolumntest"), true, "0.1Success")
checkAnswer(sql("select charField from addcolumntest"), Seq(Row("abc"), Row("def")))
}
test("test add and drop column with data loading") {
sql("DROP TABLE IF EXISTS carbon_table")
sql(
"CREATE TABLE carbon_table(intField int,stringField string,charField string,timestampField " +
"timestamp,decimalField decimal(6,2))STORED BY 'carbondata' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='charField')")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE carbon_table " +
s"options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
sql("Alter table carbon_table drop columns(timestampField)")
sql("select * from carbon_table").collect
sql("Alter table carbon_table add columns(timestampField timestamp)")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data5.csv' INTO TABLE carbon_table " +
s"options('FILEHEADER'='intField,stringField,charField,decimalField,timestampField')")
sql("DROP TABLE IF EXISTS carbon_table")
}
test("test add/drop and change datatype") {
sql("DROP TABLE IF EXISTS carbon_table")
sql(
"CREATE TABLE carbon_table(intField int,stringField string,charField string,timestampField " +
"timestamp,decimalField decimal(6,2))STORED BY 'carbondata' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='charField')")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE carbon_table " +
s"options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
sql("Alter table carbon_table drop columns(charField)")
sql("select * from carbon_table").collect
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data4.csv' INTO TABLE carbon_table " +
s"options('FILEHEADER'='intField,stringField,timestampField,decimalField')")
sql(
"Alter table carbon_table add columns(charField string) TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='charField')")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data2.csv' INTO TABLE carbon_table " +
s"options('FILEHEADER'='intField,stringField,timestampField,decimalField,charField')")
sql("select * from carbon_table").collect
sql("ALTER TABLE carbon_table CHANGE decimalField decimalField decimal(22,6)")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data3.csv' INTO TABLE carbon_table " +
s"options('FILEHEADER'='intField,stringField,timestampField,decimalField,charField')")
sql("DROP TABLE IF EXISTS carbon_table")
}
override def afterAll {
sql("DROP TABLE IF EXISTS addcolumntest")
sql("drop table if exists hivetable")
sqlContext.setConf("carbon.enable.vector.reader", "false")
}
}
| mayunSaicmotor/incubator-carbondata | integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala | Scala | apache-2.0 | 5,716 |
package org.raisercostin.jedi
import java.io.File
import org.apache.commons.codec.digest.DigestUtils
import java.util.UUID
import org.slf4j.LoggerFactory
import java.nio.file.Files
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.Path
import java.nio.file.attribute.FileAttributeView
import scala.annotation.tailrec
/**
* A versioned location needs to be resolved since the version refers to the content.
* For a versioned location you can get an etag that can be used later to detect if file was changed.
* This might be needed for implementing caching mechanisms.
*
* Type of etags:
* - weak - they only indicate two representations are semantically equivalent.
* - strong
* -- shallow - see apache implementation http://httpd.apache.org/docs/2.0/mod/core.html#fileetag based on
* --- inode - they vary from system to system - see inode in java http://www.javacodex.com/More-Examples/1/8
* --- mtime
* --- size
* --- all
* -- deep
*
* Resources:
* - https://bitworking.org/news/150/REST-Tip-Deep-etags-give-you-more-benefits
* - https://unix.stackexchange.com/questions/192800/does-the-inode-change-when-renaming-or-moving-a-file
* - http://bitworking.org/news/ETags__This_stuff_matters
* - https://www.infoq.com/articles/java7-nio2
*/
trait VersionedLocation extends ResolvedLocationState {
/**In worst case every location is considered to have a different version indifferent of content
Two files with same version should likely be identical.
Problems:
- files with same content on different servers
=> Compute a fast md5 on size, start, end?
=> Make sure they are replicated with the same **controlled** name, timestamp etc.
Solution)
A file could have a name like: <name>-<changeTimestamp>-<counter>.<extension>
- When reading the file with specific name and latest changeTimestamp is returned. Version is the <changeTimestamp>-<counter>.
- On replicated they should see the same "change" so a file with identical name.
- A separator is needed for versioned files that implement this policy.
*/
def version: String = UUID.randomUUID().toString()
def versionOfContent: String = ???
/**The default etag is based on the strongETag.*/
def etag: String = strongETag
/**The efficient strong tag is a shallow one.*/
def strongETag: String = shallowETag
/**A not so efficient strong tag that is based on the content.*/
def strongDeepETag: String = DigestUtils.sha1Hex(versionOfContent)
/**The shallowETag shouldn't need access to content. The default one is a sha1Hex of the `version`.*/
def shallowETag: String = DigestUtils.sha1Hex(version)
/**A weak ETag doesn't change if two representations are semantically equivalent.
* After removal of a timestamp from content for example.
* It is hard to compute and is not sure what it means.*/
@deprecated("Use strongETag since a weak etag is not clear how to be computed.", "0.33")
def weakETag: String = throw new RuntimeException("Is not clear how to compute them.")
}
trait FileVersionedLocation extends VersionedLocation { self: FileAbsoluteBaseLocation =>
override def uniqueId: String = attributes.inode.getOrElse(DigestUtils.sha1Hex(canonicalOverSymLinks))
def canonical = toFile.getCanonicalPath
def canonicalOverSymLinks = {
target(toPath,10).right.get.toFile().getCanonicalPath
}
//TODO transform it into a Stream or better an Observable?
@tailrec private def target(path: Path, maxDepth: Int): Either[Path, Path] =
if (maxDepth <= 0)
Left(path)
else if (Files.isSymbolicLink(path))
target(Files.readSymbolicLink(path), maxDepth - 1)
else
Right(path)
def versionFromUniqueId: String = uniqueId
def versionFromModificationTime: String = attributes.basic.lastModifiedTime().toMillis().toString
private def versionFromSize: String = toFile.length().toString
//inspired by http://httpd.apache.org/docs/2.0/mod/core.html#fileetag
override def version: String = versionFromUniqueId + "-" + versionFromModificationTime + "-" + versionFromSize
} | raisercostin/jedi-io | src/main/scala/org/raisercostin/jedi/VersionedLocation.scala | Scala | apache-2.0 | 4,071 |
package scalan.compilation.lms.scalac
import scala.lms.common._
import scalan.compilation.lms.arrays.{ScalaGenArrayMutation, ScalaGenFatArrayLoopsFusionOpt}
import scalan.compilation.lms.common._
import scalan.compilation.lms.{BaseCodegen, LmsBackendFacade}
class ScalaCoreCodegen[BackendCake <: LmsBackendFacade](backend: BackendCake) extends
BaseCodegen[BackendCake]
with ScalaGenObjectOrientedOps //from scalan.compilation.lms.common
with ScalaGenArrayOps
with ScalaGenListOps // todo may be our ScalaGenLstOps should extend lms's ScalaGenListOps?
with ScalaGenLstOps //from scalan.compilation.lms.common
with ScalaGenNumericOps with ScalaGenPrimitiveOps with ScalaGenEqual with ScalaGenOrderingOps
with ScalaGenBooleanOps with ScalaGenStructExt with ScalaGenStringOps
with ScalaGenEitherOps //from scalan.compilation.lms.common
with ScalaGenTupleOps
with ScalaGenFatArrayLoopsFusionOpt
with ScalaGenArrayMutation
with ScalaGenIfThenElseFat with LoopFusionOpt with ScalaGenCastingOps with ScalaGenMathOps
with ScalaGenHashMapOps with ScalaGenIterableOps with ScalaGenWhile with ScalaGenIfThenElse
with ScalaGenVariables with ScalaGenArrayBuilderOps with ScalaGenExceptionOps with ScalaGenFunctions
with ScalaGenRangeOps
with ScalaGenMiscOpsExt
with ScalaGenExtNumOps with ScalaGenSystemOps //from scalan.compilation.lms.common
with ScalaGenArrayOpsExt {
val IR: BackendCake = backend
import IR._
//def codeExtension: String = "scala"
override def shouldApplyFusion(currentScope: List[Stm])(result: List[Exp[Any]]): Boolean = true
private def isTuple2(name: String) = name.startsWith("Tuple2")
override def remap[A](m: Manifest[A]) =
if (m.equals(LmsType.wildCard)) "_"
else if (isTuple2(m.runtimeClass.getSimpleName)) {
if (m.typeArguments.length == 2) src"(${m.typeArguments(0)}, ${m.typeArguments(1)})"
else m.toString
}
else super.remap(m)
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case Struct(ClassTag(name), elems) if isTuple2(name) =>
emitValDef(sym, "(" + elems.map(e => quote(e._2)).mkString(",") + ")")
case _ => super.emitNode(sym, rhs)
}
}
| PCMNN/scalan-ce | lms-backend/core/src/main/scala/scalan/compilation/lms/scalac/ScalaCoreCodegen.scala | Scala | apache-2.0 | 2,216 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import kafka.utils.nonthreadsafe
import kafka.api.ApiUtils._
import kafka.common.{ErrorMapping, TopicAndPartition}
import kafka.consumer.ConsumerConfig
import kafka.network.RequestChannel
import kafka.message.MessageSet
import java.util.concurrent.atomic.AtomicInteger
import java.nio.ByteBuffer
import scala.collection.immutable.Map
case class PartitionFetchInfo(offset: Long, fetchSize: Int)
object FetchRequest {
val CurrentVersion = 0.shortValue
val DefaultMaxWait = 0
val DefaultMinBytes = 0
val DefaultCorrelationId = 0
def readFrom(buffer: ByteBuffer): FetchRequest = {
val versionId = buffer.getShort
val correlationId = buffer.getInt
val clientId = readShortString(buffer)
val replicaId = buffer.getInt
val maxWait = buffer.getInt
val minBytes = buffer.getInt
val topicCount = buffer.getInt
val pairs = (1 to topicCount).flatMap(_ => {
val topic = readShortString(buffer)
val partitionCount = buffer.getInt
(1 to partitionCount).map(_ => {
val partitionId = buffer.getInt
val offset = buffer.getLong
val fetchSize = buffer.getInt
(TopicAndPartition(topic, partitionId), PartitionFetchInfo(offset, fetchSize))
})
})
FetchRequest(versionId, correlationId, clientId, replicaId, maxWait, minBytes, Map(pairs:_*))
}
}
case class FetchRequest(versionId: Short = FetchRequest.CurrentVersion,
correlationId: Int = FetchRequest.DefaultCorrelationId,
clientId: String = ConsumerConfig.DefaultClientId,
replicaId: Int = Request.OrdinaryConsumerId,
maxWait: Int = FetchRequest.DefaultMaxWait,
minBytes: Int = FetchRequest.DefaultMinBytes,
requestInfo: Map[TopicAndPartition, PartitionFetchInfo])
extends RequestOrResponse(Some(RequestKeys.FetchKey)) {
/**
* Partitions the request info into a map of maps (one for each topic).
*/
lazy val requestInfoGroupedByTopic = requestInfo.groupBy(_._1.topic)
/**
* Public constructor for the clients
*/
def this(correlationId: Int,
clientId: String,
maxWait: Int,
minBytes: Int,
requestInfo: Map[TopicAndPartition, PartitionFetchInfo]) {
this(versionId = FetchRequest.CurrentVersion,
correlationId = correlationId,
clientId = clientId,
replicaId = Request.OrdinaryConsumerId,
maxWait = maxWait,
minBytes= minBytes,
requestInfo = requestInfo)
}
def writeTo(buffer: ByteBuffer) {
buffer.putShort(versionId)
buffer.putInt(correlationId)
writeShortString(buffer, clientId)
buffer.putInt(replicaId)
buffer.putInt(maxWait)
buffer.putInt(minBytes)
buffer.putInt(requestInfoGroupedByTopic.size) // topic count
requestInfoGroupedByTopic.foreach {
case (topic, partitionFetchInfos) =>
writeShortString(buffer, topic)
buffer.putInt(partitionFetchInfos.size) // partition count
partitionFetchInfos.foreach {
case (TopicAndPartition(_, partition), PartitionFetchInfo(offset, fetchSize)) =>
buffer.putInt(partition)
buffer.putLong(offset)
buffer.putInt(fetchSize)
}
}
}
def sizeInBytes: Int = {
2 + /* versionId */
4 + /* correlationId */
shortStringLength(clientId) +
4 + /* replicaId */
4 + /* maxWait */
4 + /* minBytes */
4 + /* topic count */
requestInfoGroupedByTopic.foldLeft(0)((foldedTopics, currTopic) => {
val (topic, partitionFetchInfos) = currTopic
foldedTopics +
shortStringLength(topic) +
4 + /* partition count */
partitionFetchInfos.size * (
4 + /* partition id */
8 + /* offset */
4 /* fetch size */
)
})
}
def isFromFollower = Request.isValidBrokerId(replicaId)
def isFromOrdinaryConsumer = replicaId == Request.OrdinaryConsumerId
def isFromLowLevelConsumer = replicaId == Request.DebuggingConsumerId
def numPartitions = requestInfo.size
override def toString(): String = {
describe(true)
}
override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = {
val fetchResponsePartitionData = requestInfo.map {
case (topicAndPartition, data) =>
(topicAndPartition, FetchResponsePartitionData(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), -1, MessageSet.Empty))
}
val errorResponse = FetchResponse(correlationId, fetchResponsePartitionData)
requestChannel.sendResponse(new RequestChannel.Response(request, new FetchResponseSend(request.connectionId, errorResponse)))
}
override def describe(details: Boolean): String = {
val fetchRequest = new StringBuilder
fetchRequest.append("Name: " + this.getClass.getSimpleName)
fetchRequest.append("; Version: " + versionId)
fetchRequest.append("; CorrelationId: " + correlationId)
fetchRequest.append("; ClientId: " + clientId)
fetchRequest.append("; ReplicaId: " + replicaId)
fetchRequest.append("; MaxWait: " + maxWait + " ms")
fetchRequest.append("; MinBytes: " + minBytes + " bytes")
if(details)
fetchRequest.append("; RequestInfo: " + requestInfo.mkString(","))
fetchRequest.toString()
}
}
@nonthreadsafe
class FetchRequestBuilder() {
private val correlationId = new AtomicInteger(0)
private val versionId = FetchRequest.CurrentVersion
private var clientId = ConsumerConfig.DefaultClientId
private var replicaId = Request.OrdinaryConsumerId
private var maxWait = FetchRequest.DefaultMaxWait
private var minBytes = FetchRequest.DefaultMinBytes
private val requestMap = new collection.mutable.HashMap[TopicAndPartition, PartitionFetchInfo]
def addFetch(topic: String, partition: Int, offset: Long, fetchSize: Int) = {
requestMap.put(TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize))
this
}
def clientId(clientId: String): FetchRequestBuilder = {
this.clientId = clientId
this
}
/**
* Only for internal use. Clients shouldn't set replicaId.
*/
private[kafka] def replicaId(replicaId: Int): FetchRequestBuilder = {
this.replicaId = replicaId
this
}
def maxWait(maxWait: Int): FetchRequestBuilder = {
this.maxWait = maxWait
this
}
def minBytes(minBytes: Int): FetchRequestBuilder = {
this.minBytes = minBytes
this
}
def build() = {
val fetchRequest = FetchRequest(versionId, correlationId.getAndIncrement, clientId, replicaId, maxWait, minBytes, requestMap.toMap)
requestMap.clear()
fetchRequest
}
}
| tempbottle/kafka | core/src/main/scala/kafka/api/FetchRequest.scala | Scala | apache-2.0 | 7,526 |
package scalan.compilation.lms.cxx.sharedptr
import java.io.File
import scalan.compilation.lms.linalgebra.{LinAlgBridge, LinAlgCxxShptrLmsBackend}
import scalan.{JNIExtractorOps, JNIExtractorOpsExp}
import scalan.compilation.GraphVizConfig
import scalan.compilation.lms.JNIBridge
import scalan.compilation.lms.cxx.LmsCompilerCxx
import scalan.it.BaseItTests
import scalan.linalgebra._
trait JNILinAlgProg extends LinearAlgebraExamples with JNIExtractorOps {
lazy val ddmvm_jni = JNI_Wrap(ddmvm)
lazy val dsmvm_jni = JNI_Wrap(dsmvm)
lazy val sdmvm_jni = JNI_Wrap(sdmvm)
lazy val ssmvm_jni = JNI_Wrap(ssmvm)
lazy val fdmvm_jni = JNI_Wrap(fdmvm)
lazy val fsmvm_jni = JNI_Wrap(fsmvm)
}
class JNILinAlgItTests extends BaseItTests[JNILinAlgProg](???) {
class ProgExp extends LADslExp with JNILinAlgProg with JNIExtractorOpsExp
val prog = new LmsCompilerCxx(new ProgExp) with JNIBridge with LinAlgBridge {
override val lms = new LinAlgCxxShptrLmsBackend
}
implicit val cfg = prog.defaultCompilerConfig
val defaultCompilers = compilers(prog)
val dir = new File(prefix, "mvm-cxx")
test("ddmvm_jni") {
// doesn't compile yet (similar below)
// compileSource(_.ddmvm_jni)
prog.buildExecutable(dir, dir, "ddmvm", prog.scalan.ddmvm_jni, GraphVizConfig.default)
}
test("dsmvm_jni") {
prog.buildExecutable(dir, dir, "dsmvm", prog.scalan.dsmvm_jni, GraphVizConfig.default)
}
test("sdmvm_jni") {
prog.buildExecutable(dir, dir, "sdmvm", prog.scalan.sdmvm_jni, GraphVizConfig.default)
}
test("ssmvm_jni") {
prog.buildExecutable(dir, dir, "ssmvm", prog.scalan.ssmvm_jni, GraphVizConfig.default)
}
test("fdmvm_jni") {
prog.buildExecutable(dir, dir, "fdmvm", prog.scalan.fdmvm_jni, GraphVizConfig.default)
}
test("fsmvm_jni") {
prog.buildExecutable(dir, dir, "fsmvm", prog.scalan.fsmvm_jni, GraphVizConfig.default)
}
}
| PCMNN/scalan-ce | lms-backend/linear-algebra/src/it/scala/scalan/compilation/lms/cxx/sharedptr/JNILinAlgItTests.scala | Scala | apache-2.0 | 1,895 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.util
import scala.collection.JavaConverters._
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions.{AttributeSet, _}
import org.apache.spark.sql.catalyst.planning.{PhysicalOperation, QueryPlanner}
import org.apache.spark.sql.catalyst.plans.logical.{Filter => LogicalFilter, LogicalPlan}
import org.apache.spark.sql.execution.{ExecutedCommand, Filter, Project, SparkPlan}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.TungstenMergeSort
import org.apache.spark.sql.execution.datasources.{DescribeCommand => LogicalDescribeCommand, LogicalRelation}
import org.apache.spark.sql.hive.execution.{DropTable, HiveNativeCommand}
import org.apache.spark.sql.hive.execution.command._
import org.apache.spark.sql.optimizer.CarbonDecoderRelation
import org.apache.spark.sql.optimizer.CarbonMergeSort
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.types.StringType
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.scan.model.QueryDimension
import org.apache.carbondata.spark.CarbonAliasDecoderRelation
import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
import org.apache.spark.sql.execution.Sort
class CarbonStrategies(sqlContext: SQLContext) extends QueryPlanner[SparkPlan] {
override def strategies: Seq[Strategy] = getStrategies
val LOGGER = LogServiceFactory.getLogService("CarbonStrategies")
def getStrategies: Seq[Strategy] = {
val total = sqlContext.planner.strategies :+ CarbonTableScan
total
}
/**
* Carbon strategies for performing late materizlization (decoding dictionary key
* as late as possbile)
*/
private[sql] object CarbonTableScan extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = {
plan match {
// TODO
case PhysicalOperationForPushdown(projectList, predicates, l: LogicalRelation,
sorts, limitValue, groupingExpressions, aggregateExpressions)
if l.relation.isInstanceOf[CarbonDatasourceRelation] =>
if ((plan.isInstanceOf[CarbonPushDownToScan]
&& isStarQuery(plan.asInstanceOf[CarbonPushDownToScan].child))
|| (!plan.isInstanceOf[CarbonPushDownToScan]
&& isStarQuery(plan))) {
carbonRawScanForStarQuery(projectList, predicates, l, sorts,
limitValue, groupingExpressions, aggregateExpressions)(sqlContext) :: Nil
} else {
carbonRawScan(projectList, predicates, l, sorts, limitValue, groupingExpressions,
aggregateExpressions)(sqlContext) :: Nil
}
case InsertIntoCarbonTable(relation: CarbonDatasourceRelation,
_, child: LogicalPlan, _, _) =>
ExecutedCommand(LoadTableByInsert(relation,
child)) :: Nil
case CarbonDictionaryCatalystDecoder(relations, profile, aliasMap, _, child) =>
CarbonDictionaryDecoder(relations,
profile,
aliasMap,
planLater(child))(sqlContext) :: Nil
case CarbonMergeSort(sortExprs, global, child) =>
// TakeOrderedAndProject(
// 10000,
// sortExprs,
// Nonthing,
// planLater(child)) :: Nil
//TungstenMergeSort(sortExprs, global, planLater(child)):: Nil
Sort(sortExprs, global, planLater(child)):: Nil
case _ =>
Nil
}
}
/**
* Create carbon scan
*/
private def carbonRawScan(projectList: Seq[NamedExpression],
predicates: Seq[Expression],
logicalRelation: LogicalRelation, sorts: Seq[QueryDimension],
limitValue: Int = 0, groupingExpressions: Seq[Expression],
aggregateExpressions: Seq[NamedExpression])(sc: SQLContext): SparkPlan = {
val relation = logicalRelation.relation.asInstanceOf[CarbonDatasourceRelation]
val tableName: String =
relation.carbonRelation.metaData.carbonTable.getFactTableName.toLowerCase
// Check out any expressions are there in project list. if they are present then we need to
// decode them as well.
val newProjectList = projectList.map { element =>
element match {
case a@Alias(s: ScalaUDF, name)
if (name.equalsIgnoreCase(CarbonCommonConstants.POSITION_ID) ||
name.equalsIgnoreCase(
CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID)) =>
AttributeReference(name, StringType, true)().withExprId(a.exprId)
case other => other
}
}
val projectSet = AttributeSet(newProjectList.flatMap(_.references))
val filterSet = AttributeSet(predicates.flatMap(_.references))
val scan = CarbonScan(projectSet.toSeq,
relation.carbonRelation,
predicates, true, sorts, limitValue, groupingExpressions, aggregateExpressions)(sqlContext)
newProjectList.map {
case attr: AttributeReference =>
case Alias(attr: AttributeReference, _) =>
case others =>
others.references.map { f =>
val dictionary = relation.carbonRelation.metaData.dictionaryMap.get(f.name)
if (dictionary.isDefined && dictionary.get) {
scan.attributesNeedToDecode.add(f.asInstanceOf[AttributeReference])
}
}
}
val scanWithDecoder =
if (scan.attributesNeedToDecode.size() > 0) {
val decoder = getCarbonDecoder(logicalRelation,
sc,
tableName,
scan.attributesNeedToDecode.asScala.toSeq,
scan)
if (scan.unprocessedExprs.nonEmpty) {
val filterCondToAdd = scan.unprocessedExprs.reduceLeftOption(expressions.And)
filterCondToAdd.map(Filter(_, decoder)).getOrElse(decoder)
} else {
decoder
}
} else {
scan
}
if (projectList.map(_.toAttribute) == scan.columnProjection &&
projectSet.size == projectList.size &&
filterSet.subsetOf(projectSet)) {
// copied from spark pruneFilterProjectRaw
// When it is possible to just use column pruning to get the right projection and
// when the columns of this projection are enough to evaluate all filter conditions,
// just do a scan with no extra project.
scanWithDecoder
} else {
Project(newProjectList, scanWithDecoder)
}
}
/**
* Create carbon scan for star query
*/
private def carbonRawScanForStarQuery(projectList: Seq[NamedExpression],
predicates: Seq[Expression],
logicalRelation: LogicalRelation, sorts: Seq[QueryDimension],
limitValue: Int = 0, groupingExpressions: Seq[Expression],
aggregateExpressions: Seq[NamedExpression])(sc: SQLContext): SparkPlan = {
val relation = logicalRelation.relation.asInstanceOf[CarbonDatasourceRelation]
val tableName: String =
relation.carbonRelation.metaData.carbonTable.getFactTableName.toLowerCase
// Check out any expressions are there in project list. if they are present then we need to
// decode them as well.
val projectExprsNeedToDecode = new java.util.HashSet[Attribute]()
val scan = CarbonScan(projectList.map(_.toAttribute),
relation.carbonRelation,
predicates,
useUnsafeCoversion = false, sorts, limitValue, groupingExpressions,
aggregateExpressions)(sqlContext)
projectExprsNeedToDecode.addAll(scan.attributesNeedToDecode)
val updatedAttrs = scan.columnProjection.map(attr =>
updateDataType(attr.asInstanceOf[AttributeReference], relation, projectExprsNeedToDecode))
scan.columnProjection = updatedAttrs
if (projectExprsNeedToDecode.size() > 0
&& isDictionaryEncoded(projectExprsNeedToDecode.asScala.toSeq, relation)) {
val decoder = getCarbonDecoder(logicalRelation,
sc,
tableName,
projectExprsNeedToDecode.asScala.toSeq,
scan)
if (scan.unprocessedExprs.nonEmpty) {
val filterCondToAdd = scan.unprocessedExprs.reduceLeftOption(expressions.And)
filterCondToAdd.map(Filter(_, decoder)).getOrElse(decoder)
} else {
decoder
}
} else {
if (scan.unprocessedExprs.nonEmpty) {
val filterCondToAdd = scan.unprocessedExprs.reduceLeftOption(expressions.And)
filterCondToAdd.map(Filter(_, scan)).getOrElse(scan)
} else {
scan
}
}
}
def getCarbonDecoder(logicalRelation: LogicalRelation,
sc: SQLContext,
tableName: String,
projectExprsNeedToDecode: Seq[Attribute],
scan: CarbonScan): CarbonDictionaryDecoder = {
val relation = CarbonDecoderRelation(logicalRelation.attributeMap,
logicalRelation.relation.asInstanceOf[CarbonDatasourceRelation])
val attrs = projectExprsNeedToDecode.map { attr =>
val newAttr = AttributeReference(attr.name,
attr.dataType,
attr.nullable,
attr.metadata)(attr.exprId, Seq(tableName))
relation.addAttribute(newAttr)
newAttr
}
CarbonDictionaryDecoder(Seq(relation), IncludeProfile(attrs),
CarbonAliasDecoderRelation(), scan)(sc)
}
def isDictionaryEncoded(projectExprsNeedToDecode: Seq[Attribute],
relation: CarbonDatasourceRelation): Boolean = {
var isEncoded = false
projectExprsNeedToDecode.foreach { attr =>
if (relation.carbonRelation.metaData.dictionaryMap.get(attr.name).getOrElse(false)) {
isEncoded = true
}
}
isEncoded
}
def updateDataType(attr: AttributeReference,
relation: CarbonDatasourceRelation,
allAttrsNotDecode: util.Set[Attribute]): AttributeReference = {
if (relation.carbonRelation.metaData.dictionaryMap.get(attr.name).getOrElse(false) &&
!allAttrsNotDecode.asScala.exists(p => p.name.equals(attr.name))) {
AttributeReference(attr.name,
IntegerType,
attr.nullable,
attr.metadata)(attr.exprId, attr.qualifiers)
} else {
attr
}
}
private def isStarQuery(plan: LogicalPlan) = {
plan match {
case LogicalFilter(condition, l: LogicalRelation)
if l.relation.isInstanceOf[CarbonDatasourceRelation] =>
true
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceRelation] => true
case _ => false
}
}
}
object DDLStrategies extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case DropTable(tableName, ifNotExists)
if CarbonEnv.get.carbonMetastore
.isTablePathExists(toTableIdentifier(tableName.toLowerCase))(sqlContext) =>
val identifier = toTableIdentifier(tableName.toLowerCase)
ExecutedCommand(DropTableCommand(ifNotExists, identifier.database, identifier.table)) :: Nil
case ShowLoadsCommand(databaseName, table, limit) =>
ExecutedCommand(ShowLoads(databaseName, table, limit, plan.output)) :: Nil
case LoadTable(databaseNameOp, tableName, factPathFromUser, dimFilesPath,
options, isOverwriteExist, inputSqlString, dataFrame, _) =>
val isCarbonTable = CarbonEnv.get.carbonMetastore
.tableExists(TableIdentifier(tableName, databaseNameOp))(sqlContext)
if (isCarbonTable || options.nonEmpty) {
ExecutedCommand(LoadTable(databaseNameOp, tableName, factPathFromUser, dimFilesPath,
options, isOverwriteExist, inputSqlString, dataFrame)) :: Nil
} else {
ExecutedCommand(HiveNativeCommand(inputSqlString)) :: Nil
}
case alterTable@AlterTableCompaction(altertablemodel) =>
val isCarbonTable = CarbonEnv.get.carbonMetastore
.tableExists(TableIdentifier(altertablemodel.tableName,
altertablemodel.dbName))(sqlContext)
if (isCarbonTable) {
if (altertablemodel.compactionType.equalsIgnoreCase("minor") ||
altertablemodel.compactionType.equalsIgnoreCase("major")) {
ExecutedCommand(alterTable) :: Nil
} else {
throw new MalformedCarbonCommandException(
"Unsupported alter operation on carbon table")
}
} else {
ExecutedCommand(HiveNativeCommand(altertablemodel.alterSql)) :: Nil
}
case CreateDatabase(dbName, sql) =>
ExecutedCommand(CreateDatabaseCommand(dbName, HiveNativeCommand(sql))) :: Nil
case DropDatabase(dbName, isCascade, sql) =>
if (isCascade) {
ExecutedCommand(DropDatabaseCascadeCommand(dbName, HiveNativeCommand(sql))) :: Nil
} else {
ExecutedCommand(DropDatabaseCommand(dbName, HiveNativeCommand(sql))) :: Nil
}
case UseDatabase(sql) =>
ExecutedCommand(HiveNativeCommand(sql)) :: Nil
case d: HiveNativeCommand =>
try {
val resolvedTable = sqlContext.executePlan(CarbonHiveSyntax.parse(d.sql)).optimizedPlan
planLater(resolvedTable) :: Nil
} catch {
case ce: MalformedCarbonCommandException =>
throw ce
case ae: AnalysisException =>
throw ae
case e: Exception => ExecutedCommand(d) :: Nil
}
case DescribeFormattedCommand(sql, tblIdentifier) =>
val isTable = CarbonEnv.get.carbonMetastore
.tableExists(tblIdentifier)(sqlContext)
if (isTable) {
val describe =
LogicalDescribeCommand(UnresolvedRelation(tblIdentifier, None), isExtended = false)
val resolvedTable = sqlContext.executePlan(describe.table).analyzed
val resultPlan = sqlContext.executePlan(resolvedTable).executedPlan
ExecutedCommand(DescribeCommandFormatted(resultPlan, plan.output, tblIdentifier)) :: Nil
} else {
ExecutedCommand(HiveNativeCommand(sql)) :: Nil
}
case _ =>
Nil
}
def toTableIdentifier(name: String): TableIdentifier = {
val identifier = name.split("\\\\.")
identifier match {
case Array(tableName) => TableIdentifier(tableName, None)
case Array(dbName, tableName) => TableIdentifier(tableName, Some(dbName))
}
}
}
}
object CarbonHiveSyntax {
@transient
protected val sqlParser = new CarbonSqlParser
def parse(sqlText: String): LogicalPlan = {
sqlParser.parse(sqlText)
}
}
| mayunSaicmotor/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonStrategies.scala | Scala | apache-2.0 | 15,711 |
package sbt
import play.api._
import play.core._
trait PlayExceptions {
def filterAnnoyingErrorMessages(message: String): String = {
val overloaded = """(?s)overloaded method value (.*) with alternatives:(.*)cannot be applied to(.*)""".r
message match {
case overloaded(method, _, signature) => "Overloaded method value [" + method + "] cannot be applied to " + signature
case msg => msg
}
}
case class UnexpectedException(message: Option[String] = None, unexpected: Option[Throwable] = None) extends PlayException(
"Unexpected exception",
message.getOrElse {
unexpected.map(t => "%s: %s".format(t.getClass.getSimpleName, t.getMessage)).getOrElse("")
},
unexpected.orNull
)
case class CompilationException(problem: xsbti.Problem) extends PlayException.ExceptionSource(
"Compilation error", filterAnnoyingErrorMessages(problem.message)) {
def line = problem.position.line.map(m => m.asInstanceOf[java.lang.Integer]).orNull
def position = problem.position.pointer.map(m => m.asInstanceOf[java.lang.Integer]).orNull
def input = problem.position.sourceFile.map(scalax.file.Path(_).string).orNull
def sourceName = problem.position.sourceFile.map(_.getAbsolutePath).orNull
}
case class TemplateCompilationException(source: File, message: String, atLine: Int, column: Int) extends PlayException.ExceptionSource(
"Compilation error", message) with FeedbackProvidedException {
def line = atLine
def position = column
def input = scalax.file.Path(source).string
def sourceName = source.getAbsolutePath
}
case class RoutesCompilationException(source: File, message: String, atLine: Option[Int], column: Option[Int]) extends PlayException.ExceptionSource(
"Compilation error", message) with FeedbackProvidedException {
def line = atLine.map(_.asInstanceOf[java.lang.Integer]).orNull
def position = column.map(_.asInstanceOf[java.lang.Integer]).orNull
def input = scalax.file.Path(source).string
def sourceName = source.getAbsolutePath
}
case class AssetCompilationException(source: Option[File], message: String, atLine: Option[Int], column: Option[Int]) extends PlayException.ExceptionSource(
"Compilation error", message) with FeedbackProvidedException {
def line = atLine.map(_.asInstanceOf[java.lang.Integer]).orNull
def position = column.map(_.asInstanceOf[java.lang.Integer]).orNull
def input = source.filter(_.exists()).map(scalax.file.Path(_).string).orNull
def sourceName = source.map(_.getAbsolutePath).orNull
}
}
object PlayExceptions extends PlayExceptions
| noel-yap/setter-for-catan | play-2.1.1/framework/src/sbt-plugin/src/main/scala/PlayExceptions.scala | Scala | apache-2.0 | 2,617 |
package org.aja.tej.examples.spark.rdd.summarizationPatterns
import org.aja.tej.utils.TejUtils
import org.apache.spark.SparkContext
/**
* Created by mageswaran on 12/8/15.
*/
/**
The aggregate-method provides an interface for performing highly customized reductions
and aggregations with a RDD. However, due to the way Scala and Spark execute and
process data, care must be taken to achieve deterministic behavior.
The following list contains a few observations:
• The reduce and combine functions have to be commutative and associative.
• As can be seen from the function definition below, the output of the combiner must
be equal to its input. This is necessary because Spark will chain-execute it.
• The zero value is the initial value of the U component when either seqOp or combOp
are executed for the first element of their domain of influence. Depending on what
you want to achieve, you may have to change it. However, to make your code
deterministic, make sure that your code will yield the same result regardless of the
number or size of partitions.
• Do not assume any execution order for either partition computations or combining
partitions.
• The neutral zeroValue is applied at the beginning of each sequence of reduces
within the individual partitions and again when the output of separate partitions
is combined.
• Why have two separate combine functions? The first functions maps the input
values into the result space. Note that the aggregation data type (1st input and
output) can be different (U != T ). The second function reduces these mapped
values in the result space.
• Why would one want to use two input data types? Let us assume we do an ar-
chaeological site survey using a metal detector. While walking through the site
we take GPS coordinates of important findings based on the output of the metal
detector. Later, we intend to draw an image of a map that highlights these lo-
cations using the aggregate function. In this case the zeroValue could be an area
map with no highlights. The possibly huge set of input data is stored as GPS
coordinates across many partitions. seqOp could convert the GPS coordinates to
map coordinates and put a marker on the map at the respective position. combOp
will receive these highlights as partial maps and combine them into a single final
output map. */
object AggregateExample extends App{
//Goes into constructor
/**
* Place the cursor on top of the API and Ctrl + B ;)
*/
def useCases(sc: SparkContext) = {
println(this.getClass.getSimpleName)
try {
val aggr1 = sc.parallelize(List (1 ,2 ,3 ,4 ,5 ,6) , 3).cache()
//Scala currying is used here
//zeroValue: of the type U you wanted, here Int
//seqOp : Maps input type T to U, here same Int since it is max
//comOp : Combines/reduces the value to the result type U, here again Int
//These operations are send to each partition, by serializing the function literals,
//hence these function literals should be eErializable
//In scala Function literal is a object, Spark makes it serializable by deafult, if I am not wrong!
val res1 = aggr1.aggregate(0)(math.max (_ , _ ) , _ + _ )
println(res1)
//0 + 6 = 6 with 1 partition
//0 + 3 + 6 = 9 with 2 partitions
//0 + 2 + 4 + 6 = 12 with 3 partitions
println("-----------------------------------------------------------")
val aggr2 = sc.parallelize(List("a","b","c","d","e","f") ,2)
val res2 = aggr2.aggregate ("")( _ + _ , _ + _ )
println(res2) //abcdef
// : Does partition level reduction first, then the result of partitions
val res3 = aggr2.aggregate ("x")( _ + _ , _ + _ )
println(res3) //xxabcxdef
//x <- final level addition
//x in xabc <- at partition level
//x in xdef <- at aprtition level
println("-----------------------------------------------------------")
val aggr3 = sc.parallelize ( List ("12" ,"23" ,"345" ,"4567") ,2)
val res4 = aggr3.aggregate("")((x,y) => math.max(x.length , y.length).toString, (x,y)=> x + y)
println(res4) //42
println("-----------------------------------------------------------")
val res5 = aggr3.aggregate("")((x,y)=> math.min (x.length, y.length).toString, (x,y) => x + y)
println(res5) //11
println("-----------------------------------------------------------")
val aggr4 = sc . parallelize ( List ("12" ,"23" ,"345" ,"") ,2)
val res6 = aggr4.aggregate("")((x, y) => math.min(x.length,y.length).toString, (x,y)=>x + y)
println(res6) //10
// The main issue with the code above is that the result of the inner min is a string of
// length 1. The zero in the output is due to the empty string being the last string in the
// list. We see this result because we are not recursively reducing any further within the
// partition for the final string.
println("-----------------------------------------------------------")
val aggr5 = sc . parallelize ( List ("12" ,"23" ,"" ,"345") ,2)
val res7 = aggr5.aggregate("")((x, y) => math.min(x.length,y.length).toString, (x,y)=>x + y)
println(res7) //11
// In contrast to the previous example, this example has the empty string at the beginning
// of the second partition. This results in length of zero being input to the second reduce
// which then upgrades it a length of 1. (Warning: The above example shows bad design
// since the output is dependent on the order of the data inside the partitions.)
} finally {
TejUtils.waitForSparkUI(sc)
}
}
useCases(TejUtils.getSparkContext(this.getClass.getSimpleName/*, "spark://localhost:7077"*/))
}
| Mageswaran1989/aja | src/examples/scala/org/aja/tej/examples/spark/rdd/summarizationPatterns/AggregateExample.scala | Scala | apache-2.0 | 5,769 |
package edu.arizona.sista.processors
import edu.arizona.sista.discourse.rstparser.RelationDirection
import edu.arizona.sista.struct.DirectedGraphEdgeIterator
import org.scalatest._
import edu.arizona.sista.processors.fastnlp.FastNLPProcessor
/**
*
* User: mihais
* Date: 1/7/14
*/
class TestFastNLPProcessor extends FlatSpec with Matchers {
var proc:Processor = new FastNLPProcessor(internStrings = true, withDiscourse = true)
"FastNLPProcessor" should "generate correct dependencies in test sentence 1" in {
val doc = proc.annotate("John Smith went to China.")
// malt can generate only Stanford basic dependencies rather than collapsed ones
// so, for example, we will see "prep" labels rather than "prep_to"
//println(doc.sentences.head.dependencies)
doc.sentences.head.dependencies.get.hasEdge(1, 0, "nn") should be (true)
doc.sentences.head.dependencies.get.hasEdge(2, 1, "nsubj") should be (true)
doc.sentences.head.dependencies.get.hasEdge(2, 3, "prep") should be (true)
doc.sentences.head.dependencies.get.hasEdge(3, 4, "pobj") should be (true)
/*
val it = new DirectedGraphEdgeIterator[String](doc.sentences.head.dependencies.get)
while(it.hasNext) {
val d = it.next()
println(d._1 + " " + d._2 + " " + d._3)
}
*/
}
"FastNLPProcessor" should "generate correct dependencies in test sentence 2" in {
val doc = proc.annotate("He bought some shoes.")
//println(doc.sentences.head.dependencies)
doc.sentences.head.dependencies.get.hasEdge(1, 0, "nsubj") should be (true)
doc.sentences.head.dependencies.get.hasEdge(1, 3, "dobj") should be (true)
doc.sentences.head.dependencies.get.hasEdge(1, 4, "punct") should be (true)
doc.sentences.head.dependencies.get.hasEdge(3, 2, "det") should be (true)
}
"FastNLPProcessor" should "generate correct discourse relations in test 3" in {
val doc = proc.annotate("John Smith went to China. He visited Beijing, on January 10th, 2013.")
doc.clear()
val d = doc.discourseTree.get
d.relationLabel should be ("elaboration")
d.relationDirection should be (RelationDirection.LeftToRight)
d.isTerminal should be (false)
d.children.length should be (2)
}
}
| VivianLuwenHuangfu/processors | src/test/scala/edu/arizona/sista/processors/TestFastNLPProcessor.scala | Scala | apache-2.0 | 2,234 |
package marg.parser
import marg.lang.data.SType
import scala.collection.mutable
class Env(outer: Env) {
val map = mutable.Map[String, SType]()
def find(key: String): Boolean =
map.contains(key) || ((outer != null) && outer.find(key))
def get(key: String): SType =
if (map.contains(key)) map(key)
else if (outer != null) outer.get(key)
else null
def +=(t: (String, SType)): Unit =
if (find(t._1))
if (map.contains(t._1)) map += t
else outer += t
else map += t
}
| 193s/marg | src/main/scala/marg/parser/Env.scala | Scala | gpl-2.0 | 513 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter14
object UpperBounds {
trait Sortable[A <: Ordered[A]] extends Iterable[A] {
def sort: Seq[A] = {
this.toList.sorted
}
}
class Customers extends Sortable[Customer] {
override def iterator = ???
}
class Customer extends Ordered[Customer] {
override def compare(that: Customer) = 0
}
} | tobyweston/learn-scala-java-devs | src/main/scala/s4j/scala/chapter14/UpperBounds.scala | Scala | apache-2.0 | 953 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.serializer.converters
import java.io.{DataInputStream, DataOutputStream}
import com.google.protobuf.ByteString
import com.intel.analytics.bigdl.tensor.Storage
import com.intel.analytics.bigdl.utils.serializer.BigDLDataType
import com.intel.analytics.bigdl.utils.serializer.BigDLDataType.BigDLDataType
/**
* DataReaderWriter defines how to read/write weight data from bin file
*/
trait DataReaderWriter {
def write(outputStream: DataOutputStream, data: Array[_]): Unit
def read(inputStream: DataInputStream, size: Int): Any
def dataType(): BigDLDataType
}
object FloatReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeFloat(d.asInstanceOf[Float]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Float](size)
for (i <- 0 until size) {
data(i) = inputStream.readFloat
}
Storage[Float](data)
}
def dataType(): BigDLDataType = BigDLDataType.FLOAT
}
object DoubleReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeDouble(d.asInstanceOf[Double]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Double](size)
for (i <- 0 until size) {
data(i) = inputStream.readDouble
}
Storage[Double](data)
}
def dataType(): BigDLDataType = BigDLDataType.DOUBLE
}
object CharReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeChar(d.asInstanceOf[Char]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Char](size)
for (i <- 0 until size) {
data(i) = inputStream.readChar
}
Storage[Char](data)
}
def dataType(): BigDLDataType = BigDLDataType.CHAR
}
object BoolReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeBoolean(d.asInstanceOf[Boolean]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Boolean](size)
for (i <- 0 until size) {
data(i) = inputStream.readBoolean
}
Storage[Boolean](data)
}
def dataType(): BigDLDataType = BigDLDataType.BOOL
}
object StringReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(str => {
val value = str.asInstanceOf[String].getBytes("utf-8")
outputStream.writeInt(value.size)
outputStream.write(value)
})
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[String](size)
for (i <- 0 until size) {
val ssize = inputStream.readInt
val buffer = new Array[Byte](ssize)
inputStream.read(buffer)
data(i) = new String(buffer, "utf-8")
}
Storage[String](data)
}
def dataType(): BigDLDataType = BigDLDataType.STRING
}
object IntReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeInt(d.asInstanceOf[Int]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Int](size)
for (i <- 0 until size) {
data(i) = inputStream.readInt
}
Storage[Int](data)
}
def dataType(): BigDLDataType = BigDLDataType.INT
}
object ShortReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeShort(d.asInstanceOf[Short]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Short](size)
for (i <- 0 until size) {
data(i) = inputStream.readShort
}
Storage[Short](data)
}
def dataType(): BigDLDataType = BigDLDataType.SHORT
}
object LongReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(d => outputStream.writeLong(d.asInstanceOf[Long]))
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Long](size)
for (i <- 0 until size) {
data(i) = inputStream.readLong
}
Storage[Long](data)
}
def dataType(): BigDLDataType = BigDLDataType.LONG
}
object ByteStringReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
data.foreach(str => {
val value = str.asInstanceOf[ByteString].toByteArray
outputStream.writeInt(value.size)
outputStream.write(value)
})
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[ByteString](size)
for (i <- 0 until size) {
val ssize = inputStream.readInt
val buffer = new Array[Byte](ssize)
inputStream.read(buffer)
data(i) = ByteString.copyFrom(buffer)
}
Storage[ByteString](data)
}
def dataType(): BigDLDataType = BigDLDataType.BYTESTRING
}
object ByteReaderWriter extends DataReaderWriter {
override def write(outputStream: DataOutputStream, data: Array[_]): Unit = {
outputStream.write(data.asInstanceOf[Array[Byte]])
}
override def read(inputStream: DataInputStream, size: Int): Any = {
val data = new Array[Byte](size)
inputStream.read(data)
Storage[Byte](data)
}
override def dataType(): BigDLDataType = BigDLDataType.BYTE
}
object DataReaderWriter {
def apply(datas : Array[_]): DataReaderWriter = {
datas match {
case flats: Array[Float] => FloatReaderWriter
case dbls: Array[Double] => DoubleReaderWriter
case chs: Array[Char] => CharReaderWriter
case bools: Array[Boolean] => BoolReaderWriter
case strs : Array[String] => StringReaderWriter
case ints : Array[Int] => IntReaderWriter
case shorts : Array[Short] => ShortReaderWriter
case longs : Array[Long] => LongReaderWriter
case bytestrs : Array[ByteString] => ByteStringReaderWriter
case bytes : Array[Byte] => ByteReaderWriter
case _ => throw new RuntimeException("Unsupported Type")
}
}
def apply(dataType : BigDLDataType): DataReaderWriter = {
dataType match {
case BigDLDataType.FLOAT => FloatReaderWriter
case BigDLDataType.DOUBLE => DoubleReaderWriter
case BigDLDataType.CHAR => CharReaderWriter
case BigDLDataType.BOOL => BoolReaderWriter
case BigDLDataType.STRING => StringReaderWriter
case BigDLDataType.INT => IntReaderWriter
case BigDLDataType.SHORT => ShortReaderWriter
case BigDLDataType.LONG => LongReaderWriter
case BigDLDataType.BYTESTRING => ByteStringReaderWriter
case BigDLDataType.BYTE => ByteReaderWriter
case _ => throw new RuntimeException("Unsupported Type")
}
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/serializer/converters/DataReaderWriter.scala | Scala | apache-2.0 | 7,715 |
package com.linkedin.norbert.network.partitioned.loadbalancer
import com.linkedin.norbert.network.common.Endpoint
import java.util.TreeMap
import com.linkedin.norbert.cluster.{Node, InvalidClusterException}
import java.util.concurrent.atomic.AtomicBoolean
import com.linkedin.norbert.network.util.ConcurrentCyclicCounter
/*
* Copyright 2009-2015 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* Similar to both the DefaultPartitionedLoadBalancer and the SimpleConsistentHashedLoadBalancer, but uses a wheel per partition.
* This means that if you shard your data according to some key but maintain replicas, rather than load balancing between the replicas,
* the replicas will also use consistent hashing. This can be useful for a distributed database like system where you'd like
* to shard the data by key, but you'd also like to maximize cache utilization for the replicas.
*/
class PartitionedConsistentHashedLoadBalancerFactory[PartitionedId](numPartitions: Int,
numReplicas: Int,
hashFn: PartitionedId => Int,
endpointHashFn: String => Int,
serveRequestsIfPartitionMissing: Boolean)
extends DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartitions, serveRequestsIfPartitionMissing) {
def this(slicesPerEndpoint: Int, hashFn: PartitionedId => Int, endpointHashFn: String => Int, serveRequestsIfPartitionMissing: Boolean) = {
this(-1, slicesPerEndpoint, hashFn, endpointHashFn, serveRequestsIfPartitionMissing)
}
def getNumPartitions(endpoints: Set[Endpoint]) = {
if (numPartitions == -1) {
endpoints.flatMap(_.node.partitionIds).size
} else {
numPartitions
}
}
protected def calculateHash(id: PartitionedId) = hashFn(id)
@throws(classOf[InvalidClusterException])
override def newLoadBalancer(endpoints: Set[Endpoint]): PartitionedConsistentHashedLoadBalancer[PartitionedId] = {
val partitions = endpoints.foldLeft(Map.empty[Int, Set[Endpoint]]) { (map, endpoint) =>
endpoint.node.partitionIds.foldLeft(map) { (map, partition) =>
map + (partition -> (map.getOrElse(partition, Set.empty[Endpoint]) + endpoint))
}
}
val wheels = partitions.map { case (partition, endpointsForPartition) =>
val wheel = new TreeMap[Int, Endpoint]
endpointsForPartition.foreach { endpoint =>
var r = 0
while (r < numReplicas) {
val node = endpoint.node
val distKey = node.id + ":" + partition + ":" + r + ":" + node.url
wheel.put(endpointHashFn(distKey), endpoint)
r += 1
}
}
(partition, wheel)
}
val nPartitions = if(this.numPartitions == -1) endpoints.flatMap(_.node.partitionIds).size else numPartitions
new PartitionedConsistentHashedLoadBalancer(nPartitions, wheels, hashFn, serveRequestsIfPartitionMissing)
}
}
class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, wheels: Map[Int, TreeMap[Int, Endpoint]], hashFn: PartitionedId => Int, serveRequestsIfPartitionMissing: Boolean = true)
extends PartitionedLoadBalancer[PartitionedId] with DefaultPartitionedLoadBalancerHelper {
import scala.collection.JavaConversions._
val endpoints = wheels.values.flatMap(_.values).toSet
val partitionToNodeMap = generatePartitionToNodeMap(endpoints, numPartitions, serveRequestsIfPartitionMissing)
val partitionIds = wheels.keySet.toSet
def nodesForOneReplica(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None) = {
nodesForPartitions(id, wheels, capability, persistentCapability)
}
def nodesForPartitionedId(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None) = {
val hash = hashFn(id)
val partitionId = hash.abs % numPartitions
wheels.get(partitionId).flatMap { wheel => Option(wheel.foldLeft(Set.empty[Node]) { case (set, (p, e)) => if (e.node.isCapableOf(capability, persistentCapability)) set + e.node else set }) }.get
}
def nodesForPartitions(id: PartitionedId, partitions: Set[Int], capability: Option[Long] = None, persistentCapability: Option[Long] = None) = {
nodesForPartitions(id, wheels.filterKeys(partitions contains _), capability, persistentCapability)
}
private def nodesForPartitions(id: PartitionedId, wheels: Map[Int, TreeMap[Int, Endpoint]], capability: Option[Long], persistentCapability: Option[Long]) = {
if (id == null) {
nodesForPartitions0(partitionToNodeMap filterKeys wheels.containsKey, capability, persistentCapability)
} else {
val hash = hashFn(id)
wheels.foldLeft(Map.empty[Node, Set[Int]]) { case (accumulator, (partitionId, wheel)) =>
val endpoint = PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) )
if(endpoint.isDefined) {
val node = endpoint.get.node
val partitions = accumulator.getOrElse(node, Set.empty[Int]) + partitionId
accumulator + (node -> partitions)
} else if(serveRequestsIfPartitionMissing) {
log.warn("All nodes appear to be unresponsive for partition %s, selecting the original node."
.format(partitionId))
val originalEndpoint = PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.node.isCapableOf(capability, persistentCapability))
val node = originalEndpoint.get.node
val partitions = accumulator.getOrElse(node, Set.empty[Int]) + partitionId
accumulator + (node -> partitions)
} else
throw new InvalidClusterException("Partition %s is unavailable, cannot serve requests.".format(partitionId))
}
}
}
private def nodesForPartitions0(partitionToNodeMap: Map[Int, (IndexedSeq[Endpoint], ConcurrentCyclicCounter, Array[AtomicBoolean])], capability: Option[Long], persistentCapability: Option[Long] = None) = {
partitionToNodeMap.keys.foldLeft(Map.empty[Node, Set[Int]]) { (map, partition) =>
val nodeOption = nodeForPartition(partition, capability, persistentCapability)
if(nodeOption isDefined) {
val n = nodeOption.get
map + (n -> (map.getOrElse(n, Set.empty[Int]) + partition))
} else if(serveRequestsIfPartitionMissing) {
log.warn("Partition %s is unavailable, attempting to continue serving requests to other partitions.".format(partition))
map
} else
throw new InvalidClusterException("Partition %s is unavailable, cannot serve requests.".format(partition))
}
}
def nextNode(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): Option[Node] = {
val hash = hashFn(id)
val partitionId = hash.abs % numPartitions
wheels.get(partitionId).flatMap { wheel =>
PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) )
}.map(_.node)
}
def partitionForId(id: PartitionedId): Int = {
hashFn(id).abs % numPartitions
}
}
| thesiddharth/norbert | network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala | Scala | apache-2.0 | 7,812 |
package com.wunder.pets.validations
import java.util.regex.{Matcher, Pattern}
import org.postgresql.util.PSQLException
trait ValidationError {
def message: String
}
final class IsEmpty(val field: String) extends ValidationError {
override def message: String = s"$field cannot be empty"
}
final class NotGreaterThan[T](val field: String, val lowerBound: T) extends ValidationError {
def message: String = s"$field must be greater than $lowerBound"
}
final class NotLessThan[T](val field: String, val upperBound: T) extends ValidationError {
def message: String = s"$field must be less than $upperBound"
}
final class DuplicateValue(val e: PSQLException) extends ValidationError {
override def message: String = {
val regex = "Key \\((.*)\\)=\\((.*)\\) already exists."
val m: Matcher = Pattern.compile(regex).matcher(e.getServerErrorMessage.getDetail);
if (m.matches) {
s"${m.group(1)} has a duplicate value of ${m.group(2)}"
} else {
"Could not determine field and value."
}
}
}
final class GeneralError(val message: String) extends ValidationError
| wunderteam/battle-pets-api | app/validations/ValidationError.scala | Scala | mit | 1,102 |
/* Title: Pure/PIDE/headless.scala
Author: Makarius
Headless PIDE session and resources from file-system.
*/
package isabelle
import java.io.{File => JFile}
import scala.annotation.tailrec
import scala.collection.mutable
object Headless
{
/** session **/
private def stable_snapshot(
state: Document.State, version: Document.Version, name: Document.Node.Name): Document.Snapshot =
{
val snapshot = state.snapshot(name)
assert(version.id == snapshot.version.id)
snapshot
}
class Use_Theories_Result private[Headless](
val state: Document.State,
val version: Document.Version,
val nodes: List[(Document.Node.Name, Document_Status.Node_Status)],
val nodes_committed: List[(Document.Node.Name, Document_Status.Node_Status)])
{
def nodes_pending: List[(Document.Node.Name, Document_Status.Node_Status)] =
{
val committed = nodes_committed.iterator.map(_._1).toSet
nodes.filter(p => !committed(p._1))
}
def snapshot(name: Document.Node.Name): Document.Snapshot =
stable_snapshot(state, version, name)
def ok: Boolean =
(nodes.iterator ++ nodes_committed.iterator).forall({ case (_, st) => st.ok })
}
class Session private[Headless](
session_name: String,
_session_options: => Options,
override val resources: Resources) extends isabelle.Session(_session_options, resources)
{
session =>
/* options */
def default_check_delay: Time = session_options.seconds("headless_check_delay")
def default_check_limit: Int = session_options.int("headless_check_limit")
def default_nodes_status_delay: Time = session_options.seconds("headless_nodes_status_delay")
def default_watchdog_timeout: Time = session_options.seconds("headless_watchdog_timeout")
def default_commit_cleanup_delay: Time = session_options.seconds("headless_commit_cleanup_delay")
/* temporary directory */
val tmp_dir: JFile = Isabelle_System.tmp_dir("server_session")
val tmp_dir_name: String = File.path(tmp_dir).implode
def master_directory(master_dir: String): String =
proper_string(master_dir) getOrElse tmp_dir_name
override def toString: String = session_name
override def stop(): Process_Result =
{
try { super.stop() }
finally { Isabelle_System.rm_tree(tmp_dir) }
}
/* theories */
private sealed case class Use_Theories_State(
last_update: Time = Time.now(),
nodes_status: Document_Status.Nodes_Status = Document_Status.Nodes_Status.empty,
already_committed: Map[Document.Node.Name, Document_Status.Node_Status] = Map.empty,
result: Promise[Use_Theories_Result] = Future.promise[Use_Theories_Result])
{
def update(new_nodes_status: Document_Status.Nodes_Status): Use_Theories_State =
copy(last_update = Time.now(), nodes_status = new_nodes_status)
def watchdog(watchdog_timeout: Time): Boolean =
watchdog_timeout > Time.zero && Time.now() - last_update > watchdog_timeout
def cancel_result { result.cancel }
def finished_result: Boolean = result.is_finished
def await_result { result.join_result }
def join_result: Use_Theories_Result = result.join
def check_result(
state: Document.State,
version: Document.Version,
dep_theories: List[Document.Node.Name],
beyond_limit: Boolean,
watchdog_timeout: Time,
commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit])
: Use_Theories_State =
{
val already_committed1 =
if (commit.isDefined) {
(already_committed /: dep_theories)({ case (committed, name) =>
def parents_committed: Boolean =
version.nodes(name).header.imports.forall({ case (parent, _) =>
resources.session_base.loaded_theory(parent) || committed.isDefinedAt(parent)
})
if (!committed.isDefinedAt(name) && parents_committed &&
state.node_consolidated(version, name))
{
val snapshot = stable_snapshot(state, version, name)
val status = Document_Status.Node_Status.make(state, version, name)
commit.get.apply(snapshot, status)
committed + (name -> status)
}
else committed
})
}
else already_committed
if (beyond_limit || watchdog(watchdog_timeout) ||
dep_theories.forall(name =>
already_committed1.isDefinedAt(name) ||
state.node_consolidated(version, name) ||
nodes_status.quasi_consolidated(name)))
{
val nodes =
for (name <- dep_theories)
yield { (name -> Document_Status.Node_Status.make(state, version, name)) }
val nodes_committed =
for {
name <- dep_theories
status <- already_committed1.get(name)
} yield (name -> status)
try { result.fulfill(new Use_Theories_Result(state, version, nodes, nodes_committed)) }
catch { case _: IllegalStateException => }
}
copy(already_committed = already_committed1)
}
}
def use_theories(
theories: List[String],
qualifier: String = Sessions.DRAFT,
master_dir: String = "",
unicode_symbols: Boolean = false,
check_delay: Time = default_check_delay,
check_limit: Int = default_check_limit,
watchdog_timeout: Time = default_watchdog_timeout,
nodes_status_delay: Time = default_nodes_status_delay,
id: UUID.T = UUID.random(),
// commit: must not block, must not fail
commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit] = None,
commit_cleanup_delay: Time = default_commit_cleanup_delay,
progress: Progress = No_Progress): Use_Theories_Result =
{
val dependencies =
{
val import_names =
theories.map(thy =>
resources.import_name(qualifier, master_directory(master_dir), thy) -> Position.none)
resources.dependencies(import_names, progress = progress).check_errors
}
val dep_theories = dependencies.theories
val dep_files =
dependencies.loaded_files(false).flatMap(_._2).
map(path => Document.Node.Name(resources.append("", path)))
val use_theories_state = Synchronized(Use_Theories_State())
def check_result_state(beyond_limit: Boolean = false)
{
val state = session.current_state()
state.stable_tip_version match {
case Some(version) =>
use_theories_state.change(
_.check_result(state, version, dep_theories, beyond_limit, watchdog_timeout, commit))
case None =>
}
}
val check_progress =
{
var check_count = 0
Event_Timer.request(Time.now(), repeat = Some(check_delay))
{
if (progress.stopped) use_theories_state.value.cancel_result
else {
check_count += 1
check_result_state(check_limit > 0 && check_count > check_limit)
}
}
}
val consumer =
{
val delay_nodes_status =
Standard_Thread.delay_first(nodes_status_delay max Time.zero) {
progress.nodes_status(use_theories_state.value.nodes_status)
}
val delay_commit_clean =
Standard_Thread.delay_first(commit_cleanup_delay max Time.zero) {
val clean = use_theories_state.value.already_committed.keySet
resources.clean_theories(session, id, clean)
}
val dep_theories_set = dep_theories.toSet
Session.Consumer[Session.Commands_Changed](getClass.getName) {
case changed =>
if (changed.nodes.exists(dep_theories_set)) {
val snapshot = session.snapshot()
val state = snapshot.state
val version = snapshot.version
val theory_progress =
use_theories_state.change_result(st =>
{
val domain =
if (st.nodes_status.is_empty) dep_theories_set
else changed.nodes.iterator.filter(dep_theories_set).toSet
val (nodes_status_changed, nodes_status1) =
st.nodes_status.update(resources, state, version,
domain = Some(domain), trim = changed.assignment)
if (nodes_status_delay >= Time.zero && nodes_status_changed) {
delay_nodes_status.invoke
}
val theory_progress =
(for {
(name, node_status) <- nodes_status1.present.iterator
if changed.nodes.contains(name) && !st.already_committed.isDefinedAt(name)
p1 = node_status.percentage
if p1 > 0 && Some(p1) != st.nodes_status.get(name).map(_.percentage)
} yield Progress.Theory(name.theory, percentage = Some(p1))).toList
(theory_progress, st.update(nodes_status1))
})
theory_progress.foreach(progress.theory(_))
check_result_state()
if (commit.isDefined && commit_cleanup_delay > Time.zero) {
if (use_theories_state.value.finished_result)
delay_commit_clean.revoke
else delay_commit_clean.invoke
}
}
}
}
try {
session.commands_changed += consumer
resources.load_theories(session, id, dep_theories, dep_files, unicode_symbols, progress)
use_theories_state.value.await_result
check_progress.cancel
}
finally {
session.commands_changed -= consumer
resources.unload_theories(session, id, dep_theories)
}
use_theories_state.value.join_result
}
def purge_theories(
theories: List[String],
qualifier: String = Sessions.DRAFT,
master_dir: String = "",
all: Boolean = false): (List[Document.Node.Name], List[Document.Node.Name]) =
{
val nodes =
if (all) None
else Some(theories.map(resources.import_name(qualifier, master_directory(master_dir), _)))
resources.purge_theories(session, nodes)
}
}
/** resources **/
object Resources
{
def apply(base_info: Sessions.Base_Info, log: Logger = No_Logger): Resources =
new Resources(base_info, log = log)
def make(
options: Options,
session_name: String,
session_dirs: List[Path] = Nil,
include_sessions: List[String] = Nil,
progress: Progress = No_Progress,
log: Logger = No_Logger): Resources =
{
val base_info =
Sessions.base_info(options, session_name, dirs = session_dirs,
include_sessions = include_sessions, progress = progress)
apply(base_info, log = log)
}
final class Theory private[Headless](
val node_name: Document.Node.Name,
val node_header: Document.Node.Header,
val text: String,
val node_required: Boolean)
{
override def toString: String = node_name.toString
def node_perspective: Document.Node.Perspective_Text =
Document.Node.Perspective(node_required, Text.Perspective.empty, Document.Node.Overlays.empty)
def make_edits(text_edits: List[Text.Edit]): List[Document.Edit_Text] =
List(node_name -> Document.Node.Deps(node_header),
node_name -> Document.Node.Edits(text_edits),
node_name -> node_perspective)
def node_edits(old: Option[Theory]): List[Document.Edit_Text] =
{
val (text_edits, old_required) =
if (old.isEmpty) (Text.Edit.inserts(0, text), false)
else (Text.Edit.replace(0, old.get.text, text), old.get.node_required)
if (text_edits.isEmpty && node_required == old_required) Nil
else make_edits(text_edits)
}
def purge_edits: List[Document.Edit_Text] =
make_edits(Text.Edit.removes(0, text))
def required(required: Boolean): Theory =
if (required == node_required) this
else new Theory(node_name, node_header, text, required)
}
sealed case class State(
blobs: Map[Document.Node.Name, Document.Blob] = Map.empty,
theories: Map[Document.Node.Name, Theory] = Map.empty,
required: Multi_Map[Document.Node.Name, UUID.T] = Multi_Map.empty)
{
/* blobs */
def doc_blobs: Document.Blobs = Document.Blobs(blobs)
def update_blobs(names: List[Document.Node.Name]): (Document.Blobs, State) =
{
val new_blobs =
names.flatMap(name =>
{
val bytes = Bytes.read(name.path)
def new_blob: Document.Blob =
{
val text = bytes.text
Document.Blob(bytes, text, Symbol.Text_Chunk(text), changed = true)
}
blobs.get(name) match {
case Some(blob) => if (blob.bytes == bytes) None else Some(name -> new_blob)
case None => Some(name -> new_blob)
}
})
val blobs1 = (blobs /: new_blobs)(_ + _)
val blobs2 = (blobs /: new_blobs)({ case (map, (a, b)) => map + (a -> b.unchanged) })
(Document.Blobs(blobs1), copy(blobs = blobs2))
}
def blob_edits(name: Document.Node.Name, old_blob: Option[Document.Blob])
: List[Document.Edit_Text] =
{
val blob = blobs.getOrElse(name, error("Missing blob " + quote(name.toString)))
val text_edits =
old_blob match {
case None => List(Text.Edit.insert(0, blob.source))
case Some(blob0) => Text.Edit.replace(0, blob0.source, blob.source)
}
if (text_edits.isEmpty) Nil
else List(name -> Document.Node.Blob(blob), name -> Document.Node.Edits(text_edits))
}
/* theories */
lazy val theory_graph: Graph[Document.Node.Name, Unit] =
{
val entries =
for ((name, theory) <- theories.toList)
yield ((name, ()), theory.node_header.imports.map(_._1).filter(theories.isDefinedAt(_)))
Graph.make(entries, symmetric = true)(Document.Node.Name.Ordering)
}
def is_required(name: Document.Node.Name): Boolean = required.isDefinedAt(name)
def insert_required(id: UUID.T, names: List[Document.Node.Name]): State =
copy(required = (required /: names)(_.insert(_, id)))
def remove_required(id: UUID.T, names: List[Document.Node.Name]): State =
copy(required = (required /: names)(_.remove(_, id)))
def update_theories(update: List[(Document.Node.Name, Theory)]): State =
copy(theories =
(theories /: update)({ case (thys, (name, thy)) =>
thys.get(name) match {
case Some(thy1) if thy1 == thy => thys
case _ => thys + (name -> thy)
}
}))
def remove_theories(remove: List[Document.Node.Name]): State =
{
require(remove.forall(name => !is_required(name)))
copy(theories = theories -- remove)
}
def unload_theories(session: Session, id: UUID.T, dep_theories: List[Document.Node.Name])
: State =
{
val st1 = remove_required(id, dep_theories)
val theory_edits =
for {
node_name <- dep_theories
theory <- st1.theories.get(node_name)
}
yield {
val theory1 = theory.required(st1.is_required(node_name))
val edits = theory1.node_edits(Some(theory))
(edits, (node_name, theory1))
}
session.update(doc_blobs, theory_edits.flatMap(_._1))
st1.update_theories(theory_edits.map(_._2))
}
def purge_theories(session: Session, nodes: List[Document.Node.Name])
: ((List[Document.Node.Name], List[Document.Node.Name]), State) =
{
val all_nodes = theory_graph.topological_order
val purge = nodes.filterNot(is_required(_)).toSet
val retain = theory_graph.all_preds(all_nodes.filterNot(purge)).toSet
val (retained, purged) = all_nodes.partition(retain)
val purge_edits = purged.flatMap(name => theories(name).purge_edits)
session.update(doc_blobs, purge_edits)
((purged, retained), remove_theories(purged))
}
def frontier_theories(clean: Set[Document.Node.Name]): Set[Document.Node.Name] =
{
@tailrec def frontier(base: List[Document.Node.Name], front: Set[Document.Node.Name])
: Set[Document.Node.Name] =
{
val add = base.filter(b => theory_graph.imm_succs(b).forall(front))
if (add.isEmpty) front
else {
val pre_add = add.map(theory_graph.imm_preds)
val base1 = (pre_add.head /: pre_add.tail)(_ ++ _).toList.filter(clean)
frontier(base1, front ++ add)
}
}
frontier(theory_graph.maximals.filter(clean), Set.empty)
}
}
}
class Resources private[Headless](
val session_base_info: Sessions.Base_Info,
log: Logger = No_Logger)
extends isabelle.Resources(session_base_info.check_base, log = log)
{
resources =>
def options: Options = session_base_info.options
/* dependencies */
def used_theories(
deps: Sessions.Deps, progress: Progress = No_Progress): List[Document.Node.Name] =
{
for {
(_, name) <- deps.used_theories_condition(options, progress = progress)
if !session_base.loaded_theory(name)
} yield name
}
/* session */
def start_session(print_mode: List[String] = Nil, progress: Progress = No_Progress): Session =
{
val session = new Session(session_base_info.session, options, resources)
val session_error = Future.promise[String]
var session_phase: Session.Consumer[Session.Phase] = null
session_phase =
Session.Consumer(getClass.getName) {
case Session.Ready =>
session.phase_changed -= session_phase
session_error.fulfill("")
case Session.Terminated(result) if !result.ok =>
session.phase_changed -= session_phase
session_error.fulfill("Session start failed: return code " + result.rc)
case _ =>
}
session.phase_changed += session_phase
progress.echo("Starting session " + session_base_info.session + " ...")
Isabelle_Process.start(session, options,
logic = session_base_info.session, dirs = session_base_info.dirs, modes = print_mode)
session_error.join match {
case "" => session
case msg => session.stop(); error(msg)
}
}
/* theories */
private val state = Synchronized(Resources.State())
def load_theories(
session: Session,
id: UUID.T,
dep_theories: List[Document.Node.Name],
dep_files: List[Document.Node.Name],
unicode_symbols: Boolean,
progress: Progress)
{
val loaded_theories =
for (node_name <- dep_theories)
yield {
val path = node_name.path
if (!node_name.is_theory) error("Not a theory file: " + path)
progress.expose_interrupt()
val text0 = File.read(path)
val text = if (unicode_symbols) Symbol.decode(text0) else text0
val node_header = resources.check_thy_reader(node_name, Scan.char_reader(text))
new Resources.Theory(node_name, node_header, text, true)
}
val loaded = loaded_theories.length
if (loaded > 1) progress.echo("Loading " + loaded + " theories ...")
state.change(st =>
{
val (doc_blobs1, st1) = st.insert_required(id, dep_theories).update_blobs(dep_files)
val theory_edits =
for (theory <- loaded_theories)
yield {
val node_name = theory.node_name
val theory1 = theory.required(st1.is_required(node_name))
val edits = theory1.node_edits(st1.theories.get(node_name))
(edits, (node_name, theory1))
}
val file_edits =
for { node_name <- dep_files if doc_blobs1.changed(node_name) }
yield st1.blob_edits(node_name, st.blobs.get(node_name))
session.update(doc_blobs1, theory_edits.flatMap(_._1) ::: file_edits.flatten)
st1.update_theories(theory_edits.map(_._2))
})
}
def unload_theories(session: Session, id: UUID.T, dep_theories: List[Document.Node.Name])
{
state.change(_.unload_theories(session, id, dep_theories))
}
def clean_theories(session: Session, id: UUID.T, clean: Set[Document.Node.Name])
{
state.change(st =>
{
val frontier = st.frontier_theories(clean).toList
if (frontier.isEmpty) st
else {
val st1 = st.unload_theories(session, id, frontier)
val (_, st2) = st1.purge_theories(session, frontier)
st2
}
})
}
def purge_theories(session: Session, nodes: Option[List[Document.Node.Name]])
: (List[Document.Node.Name], List[Document.Node.Name]) =
{
state.change_result(st => st.purge_theories(session, nodes getOrElse st.theory_graph.keys))
}
}
}
| larsrh/libisabelle | modules/pide/2019-RC4/src/main/scala/PIDE/headless.scala | Scala | apache-2.0 | 21,448 |
package org.zouzias.akka.remote.examples.actors
import akka.actor.Actor
import com.typesafe.config.ConfigFactory
/**
* Local actor that connects to remote actor
*/
class LocalActor extends Actor {
val config = ConfigFactory.load("remote_application")
val actorName = config.getString("akka.actor.name")
val systemName = config.getString("akka.system.name")
val host = config.getString("akka.remote.netty.tcp.hostname")
val port = config.getString("akka.remote.netty.tcp.port")
// create the remote actor
val remote = context.actorSelection(s"akka.tcp://${systemName}@${host}:${port}/user/${actorName}")
var counter = 0
def receive = {
case "START" =>
remote ! s"Hello from the LocalActor"
case msg: String =>
println(s"LocalActor received message: '$msg'")
println(s"Counter: ${counter}")
if (counter < 5) {
sender ! s"Hello back to you, ${this.actorName}"
counter += 1
}
else{
Thread.sleep(5000L)
counter = 0
sender ! s"Hello back to you again after 5s, ${this.actorName}"
}
}
}
| zouzias/akka-remote-examples | src/main/scala/org/zouzias/akka/remote/examples/actors/LocalActor.scala | Scala | apache-2.0 | 1,102 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.optimize
import org.apache.flink.table.api.{BatchTableEnvironment, TableConfig}
import org.apache.flink.table.plan.optimize.program.{BatchOptimizeContext, FlinkBatchProgram}
import org.apache.flink.util.Preconditions
import org.apache.calcite.plan.volcano.VolcanoPlanner
import org.apache.calcite.rel.RelNode
/**
* Query optimizer for Batch.
*/
class BatchOptimizer(tEnv: BatchTableEnvironment) extends Optimizer {
override def optimize(roots: Seq[RelNode]): Seq[RelNode] = {
// TODO optimize multi-roots as a whole DAG
roots.map(optimizeTree)
}
/**
* Generates the optimized [[RelNode]] tree from the original relational node tree.
*
* @param relNode The original [[RelNode]] tree
* @return The optimized [[RelNode]] tree
*/
private def optimizeTree(relNode: RelNode): RelNode = {
val config = tEnv.getConfig
val programs = config.getCalciteConfig.getBatchProgram
.getOrElse(FlinkBatchProgram.buildProgram(config.getConf))
Preconditions.checkNotNull(programs)
programs.optimize(relNode, new BatchOptimizeContext {
override def getTableConfig: TableConfig = config
override def getVolcanoPlanner: VolcanoPlanner = tEnv.getPlanner.asInstanceOf[VolcanoPlanner]
})
}
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/optimize/BatchOptimizer.scala | Scala | apache-2.0 | 2,097 |
// Copyright (c) 2016 PSForever.net to present
package net.psforever.types
import net.psforever.newcodecs._
import scodec.Codec
import scodec.codecs._
final case class Vector3(x : Float,
y : Float,
z : Float)
object Vector3 {
implicit val codec_pos : Codec[Vector3] = (
("x" | newcodecs.q_float(0.0, 8192.0, 20)) ::
("y" | newcodecs.q_float(0.0, 8192.0, 20)) ::
("z" | newcodecs.q_float(0.0, 1024.0, 16))
).as[Vector3]
implicit val codec_vel : Codec[Vector3] = (
("x" | newcodecs.q_float(-256.0, 256.0, 14)) ::
("y" | newcodecs.q_float(-256.0, 256.0, 14)) ::
("z" | newcodecs.q_float(-256.0, 256.0, 14))
).as[Vector3]
}
| Fate-JH/PSF-Server | common/src/main/scala/net/psforever/types/Vector3.scala | Scala | gpl-3.0 | 722 |
package com.ticketfly.spreedly
import java.io.IOException
import java.net.URLEncoder
import akka.actor.ActorSystem
import akka.util.Timeout
import com.ticketfly.spreedly.util.{BasicHttpRequest, RestDispatcher}
import spray.http._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.reflect.ClassTag
class SpreedlyRestDispatcher(config: SpreedlyConfiguration)
(implicit system: ActorSystem) extends RestDispatcher {
import system.dispatcher
implicit val timeout: Timeout = config.requestTimeout.seconds
protected val spreedlySerializer = new SpreedlyXmlSerializer()
private def encodeUrl(urlPart: String, queryParams: Map[String, String] = Map.empty[String, String]): String = {
val encodedUrlPart = urlPart.split("/").foldLeft("")((memo, part) => s"$memo/${URLEncoder.encode(part, "UTF-8")}")
// In the case of a transcript, do not append the format to the URL
val baseUrl = if (urlPart.contains("transcript")) {
s"${config.apiUrl}$encodedUrlPart"
} else {
s"${config.apiUrl}$encodedUrlPart.xml"
}
if (queryParams.isEmpty) {
baseUrl
} else {
queryParams.foldLeft(s"$baseUrl?") { (memo: String, kv: (String, String)) =>
memo + s"${URLEncoder.encode(kv._1, "UTF-8")}=${URLEncoder.encode(kv._2, "UTF-8")}&"
}.dropRight(1)
}
}
protected def buildHttpRequest(urlPart: String,
method: HttpMethod = HttpMethods.GET,
content: Option[AnyRef] = None,
queryParams: Map[String, String] = Map.empty[String, String]): BasicHttpRequest = {
implicit val timeout: Timeout = config.requestTimeout.seconds
val request = new BasicHttpRequest(encodeUrl(urlPart, queryParams), method)
request.contentType = ContentType(MediaTypes.`application/xml`)
request.headers = List(
HttpHeaders.Authorization(BasicHttpCredentials(config.environmentKey, config.accessSecret)),
HttpHeaders.Accept(MediaTypes.`application/xml`)
)
if (content.nonEmpty) {
request.body = spreedlySerializer.serialize(content.orNull)
}
request
}
protected def execute[T <: AnyRef : ClassTag](httpRequest: BasicHttpRequest): Future[T] = {
httpRequest.execute(config.ssl).map(response => {
spreedlySerializer.deserialize(response.entity.asString)
}) recover {
case e: SpreedlyException => throw e
case e: IOException => throw new SpreedlyException(e)
}
}
def get[T <: AnyRef : ClassTag](url: String, queryParams: Map[String, String] = Map.empty[String, String]): Future[T] = {
execute(buildHttpRequest(url, HttpMethods.GET, None, queryParams))
}
def options[T <: AnyRef : ClassTag](url: String, queryParams: Map[String, String] = Map.empty[String, String]): Future[T] = {
execute(buildHttpRequest(url, HttpMethods.OPTIONS, None, queryParams))
}
def put[T <: AnyRef : ClassTag](url: String,
content: Option[AnyRef] = None,
queryParams: Map[String, String] = Map.empty[String, String]): Future[T] = {
execute(buildHttpRequest(url, HttpMethods.PUT, content, queryParams))
}
def post[T <: AnyRef : ClassTag](url: String,
content: Option[AnyRef] = None,
queryParams: Map[String, String] = Map.empty[String, String]): Future[T] = {
execute(buildHttpRequest(url, HttpMethods.POST, content, queryParams))
}
def delete[T <: AnyRef : ClassTag](url: String, queryParams: Map[String, String] = Map.empty[String, String]): Future[T] = {
execute(buildHttpRequest(url, HttpMethods.DELETE, None, queryParams))
}
}
| Ticketfly/spreedly-scala | src/main/scala/com/ticketfly/spreedly/SpreedlyRestDispatcher.scala | Scala | mit | 3,689 |
import javafx.beans.value.{ChangeListener, ObservableValue}
import javafx.concurrent.Worker.State
import scavlink.test.map.{FlightMap, SimFlight, Timer}
import scalafx.Includes._
import scalafx.application.JFXApp.PrimaryStage
import scalafx.application.{JFXApp, Platform}
import scalafx.scene.Scene
import scalafx.scene.layout._
import scalafx.scene.paint.Color
import scalafx.scene.web._
object MapView extends JFXApp {
val flight = SimFlight.Nothing
// val flight = SimFlight.SunnyvaleTransects
val browser = new WebView {
hgrow = Priority.Always
vgrow = Priority.Always
onAlert = (e: WebEvent[_]) => println("onAlert: " + e)
onResized = (e: WebEvent[_]) => println("onResized: " + e)
onVisibilityChanged = (e: WebEvent[_]) => println("onVisibilityChanged: " + e)
onStatusChanged = (e: WebEvent[_]) => println("onStatusChanged: " + e)
}
stage = new PrimaryStage {
title = "Map"
width = 800
height = 600
scene = new Scene {
fill = Color.LightGray
root = new BorderPane {
hgrow = Priority.Always
vgrow = Priority.Always
center = browser
}
}
}
val startMap = new ChangeListener[State] {
def changed(ov: ObservableValue[_ <: State], oldState: State, newState: State): Unit = {
if (newState == State.SUCCEEDED) {
val flightMap = new FlightMap(engine, flight)
// since callback from javascript to JavaFX didn't work, give the map time to initialize
Timer(2000, false) { Platform.runLater { flightMap.run() } }
}
}
}
val engine = browser.engine
engine.javaScriptEnabled = true
engine.getLoadWorker.stateProperty.addListener(startMap)
engine.load(getClass.getResource("/map/map.html").toString)
}
| nickolasrossi/scavlink | src/it/scala/MapView.scala | Scala | mit | 1,760 |
import scala.collection.mutable
import scala.annotation.tailrec
// The following command:
//
// sc typeclass-scaling.scala -Xmax-inlines 100 -Xprint:typer -color:never -pagewidth 1000 >& x
//
// produces an output file with `wc` measures (lines/words/chars):
//
// 89327 162884 7220258
//
// The command
//
// time sc typeclass-scaling.scala -Xmax-inlines 100
//
// gives (best of three):
//
// real 0m16.593s
// user 1m6.337s
// sys 0m1.344s
object datatypes {
import typeclasses.*
enum E1[T] derives Eq, Pickler {
case C1(x1: T)
}
enum E2[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
}
enum E3[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
}
enum E4[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
}
enum E5[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
}
enum E6[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
}
enum E7[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
}
enum E8[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
}
enum E9[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
}
enum E10[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
}
enum E11[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
case C11(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T)
}
enum E12[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
case C11(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T)
case C12(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T)
}
enum E13[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
case C11(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T)
case C12(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T)
case C13(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T)
}
enum E14[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
case C11(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T)
case C12(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T)
case C13(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T)
case C14(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T, x14: T)
}
enum E15[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
case C11(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T)
case C12(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T)
case C13(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T)
case C14(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T, x14: T)
case C15(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T, x14: T, x15: T)
}
enum E16[T] derives Eq, Pickler {
case C1(x1: T)
case C2(x1: T, x2: T)
case C3(x1: T, x2: T, x3: T)
case C4(x1: T, x2: T, x3: T, x4: T)
case C5(x1: T, x2: T, x3: T, x4: T, x5: T)
case C6(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T)
case C7(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T)
case C8(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T)
case C9(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T)
case C10(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T)
case C11(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T)
case C12(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T)
case C13(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T)
case C14(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T, x14: T)
case C15(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T, x14: T, x15: T)
case C16(x1: T, x2: T, x3: T, x4: T, x5: T, x6: T, x7: T, x8: T, x9: T, x10: T, x11: T, x12: T, x13: T, x14: T, x15: T, x16: T)
}
}
object typeclasses {
import compiletime.summonFrom
// A type class
trait Eq[T] {
def eql(x: T, y: T): Boolean
}
object Eq {
import scala.compiletime.erasedValue
import compiletime.*
import scala.deriving.*
inline def tryEql[TT](x: TT, y: TT): Boolean = summonInline[Eq[TT]].eql(x, y)
inline def eqlElems[Elems <: Tuple](n: Int)(x: Product, y: Product): Boolean =
inline erasedValue[Elems] match {
case _: (elem *: elems1) =>
tryEql[elem](x.productElement(n).asInstanceOf[elem], y.productElement(n).asInstanceOf[elem]) &&
eqlElems[elems1](n + 1)(x, y)
case _: EmptyTuple =>
true
}
inline def eqlCases[Alts](n: Int)(x: Any, y: Any, ord: Int): Boolean =
inline erasedValue[Alts] match {
case _: (alt *: alts1) =>
if (ord == n)
summonFrom {
case m: Mirror.ProductOf[`alt`] =>
eqlElems[m.MirroredElemTypes](0)(x.asInstanceOf[Product], y.asInstanceOf[Product])
}
else eqlCases[alts1](n + 1)(x, y, ord)
case _: EmptyTuple =>
false
}
inline def derived[T](implicit ev: Mirror.Of[T]): Eq[T] = new Eq[T] {
def eql(x: T, y: T): Boolean =
inline ev match {
case m: Mirror.SumOf[T] =>
val ord = m.ordinal(x)
ord == m.ordinal(y) && eqlCases[m.MirroredElemTypes](0)(x, y, ord)
case m: Mirror.ProductOf[T] =>
eqlElems[m.MirroredElemTypes](0)(x.asInstanceOf[Product], y.asInstanceOf[Product])
}
}
implicit object IntEq extends Eq[Int] {
def eql(x: Int, y: Int) = x == y
}
}
// Another type class
trait Pickler[T] {
def pickle(buf: mutable.ListBuffer[Int], x: T): Unit
def unpickle(buf: mutable.ListBuffer[Int]): T
}
object Pickler {
import scala.compiletime.{erasedValue, constValue}
import compiletime.*
import deriving.*
def nextInt(buf: mutable.ListBuffer[Int]): Int = try buf.head finally buf.trimStart(1)
inline def tryPickle[T](buf: mutable.ListBuffer[Int], x: T): Unit = summonInline[Pickler[T]].pickle(buf, x)
inline def pickleElems[Elems <: Tuple](n: Int)(buf: mutable.ListBuffer[Int], x: Product): Unit =
inline erasedValue[Elems] match {
case _: (elem *: elems1) =>
tryPickle[elem](buf, x.productElement(n).asInstanceOf[elem])
pickleElems[elems1](n + 1)(buf, x)
case _: EmptyTuple =>
}
inline def pickleCases[Alts <: Tuple](n: Int)(buf: mutable.ListBuffer[Int], x: Any, ord: Int): Unit =
inline erasedValue[Alts] match {
case _: (alt *: alts1) =>
if (ord == n)
summonFrom {
case m: Mirror.ProductOf[`alt`] => pickleElems[m.MirroredElemTypes](0)(buf, x.asInstanceOf[Product])
}
else pickleCases[alts1](n + 1)(buf, x, ord)
case _: EmptyTuple =>
}
inline def tryUnpickle[T](buf: mutable.ListBuffer[Int]): T = summonInline[Pickler[T]].unpickle(buf)
inline def unpickleElems[Elems <: Tuple](n: Int)(buf: mutable.ListBuffer[Int], elems: Array[Any]): Unit =
inline erasedValue[Elems] match {
case _: (elem *: elems1) =>
elems(n) = tryUnpickle[elem](buf)
unpickleElems[elems1](n + 1)(buf, elems)
case _: EmptyTuple =>
}
inline def unpickleCase[T, Elems <: Tuple](buf: mutable.ListBuffer[Int], m: Mirror.ProductOf[T]): T = {
inline val size = constValue[Tuple.Size[Elems]]
inline if (size == 0)
m.fromProduct(EmptyTuple)
else {
val elems = new Array[Any](size)
unpickleElems[Elems](0)(buf, elems)
m.fromProduct(new Product {
def canEqual(that: Any): Boolean = true
def productArity: Int = size
def productElement(idx: Int): Any = elems(idx)
})
}
}
inline def unpickleCases[T, Alts <: Tuple](n: Int)(buf: mutable.ListBuffer[Int], ord: Int): T =
inline erasedValue[Alts] match {
case _: (alt *: alts1) =>
if (ord == n)
summonFrom {
case m: Mirror.ProductOf[`alt` & T] =>
unpickleCase[`alt` & T, m.MirroredElemTypes](buf, m)
}
else unpickleCases[T, alts1](n + 1)(buf, ord)
case _: EmptyTuple =>
throw new IndexOutOfBoundsException(s"unexpected ordinal number: $ord")
}
inline def derived[T](implicit ev: Mirror.Of[T]): Pickler[T] = new {
def pickle(buf: mutable.ListBuffer[Int], x: T): Unit =
inline ev match {
case m: Mirror.SumOf[T] =>
val ord = m.ordinal(x)
buf += ord
pickleCases[m.MirroredElemTypes](0)(buf, x, ord)
case m: Mirror.ProductOf[T] =>
pickleElems[m.MirroredElemTypes](0)(buf, x.asInstanceOf[Product])
}
def unpickle(buf: mutable.ListBuffer[Int]): T =
inline ev match {
case m: Mirror.SumOf[T] =>
val ord = nextInt(buf)
unpickleCases[T, m.MirroredElemTypes](0)(buf, ord)
case m: Mirror.ProductOf[T] =>
unpickleCase[T, m.MirroredElemTypes](buf, m)
}
}
implicit object IntPickler extends Pickler[Int] {
def pickle(buf: mutable.ListBuffer[Int], x: Int): Unit = buf += x
def unpickle(buf: mutable.ListBuffer[Int]): Int = nextInt(buf)
}
}
}
import datatypes.*
import typeclasses.*
// Tests
object Test extends App {
implicitly[Eq[E1[Int]]]
implicitly[Eq[E2[Int]]]
implicitly[Eq[E3[Int]]]
implicitly[Eq[E4[Int]]]
implicitly[Eq[E5[Int]]]
implicitly[Eq[E6[Int]]]
implicitly[Eq[E7[Int]]]
implicitly[Eq[E8[Int]]]
implicitly[Eq[E9[Int]]]
implicitly[Eq[E10[Int]]]
implicitly[Eq[E11[Int]]]
implicitly[Eq[E12[Int]]]
implicitly[Eq[E13[Int]]]
implicitly[Eq[E14[Int]]]
implicitly[Eq[E15[Int]]]
implicitly[Eq[E16[Int]]]
implicitly[Pickler[E1[Int]]]
implicitly[Pickler[E2[Int]]]
implicitly[Pickler[E3[Int]]]
implicitly[Pickler[E4[Int]]]
implicitly[Pickler[E5[Int]]]
implicitly[Pickler[E6[Int]]]
implicitly[Pickler[E7[Int]]]
implicitly[Pickler[E8[Int]]]
implicitly[Pickler[E9[Int]]]
implicitly[Pickler[E10[Int]]]
implicitly[Pickler[E11[Int]]]
implicitly[Pickler[E12[Int]]]
implicitly[Pickler[E13[Int]]]
implicitly[Pickler[E14[Int]]]
implicitly[Pickler[E15[Int]]]
implicitly[Pickler[E16[Int]]]
}
| dotty-staging/dotty | tests/pos-special/typeclass-scaling.scala | Scala | apache-2.0 | 15,019 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.compiler
import org.junit.Test
import org.junit.Assert._
class FloatTest {
@Test
def `toInt`(): Unit = {
@inline
def test(x: Float, expected: Int): Unit =
assertEquals(x.toInt, expected)
// Specials
test(+0.0f, 0)
test(-0.0f, 0)
test(Float.PositiveInfinity, Int.MaxValue)
test(Float.NegativeInfinity, Int.MinValue)
test(Float.NaN, 0)
// Positive numbers
test(0.3f, 0)
test(0.7f, 0)
test(1.2f, 1)
test(5e12f, Int.MaxValue)
test(2147483646f, 2147483647)
test(2147483500f, 2147483520)
test(65.67f, 65)
// Negative numbers
test(-0.3f, 0)
test(-0.7f, 0)
test(-1.2f, -1)
test(-5e12f, Int.MinValue)
test(-2147483646f, -2147483648)
test(-2147483500f, -2147483520)
test(-65.67f, -65)
}
@Test
def noReverseComparisons_issue3575(): Unit = {
import Float.NaN
@noinline def test_not_==(x: Float, y: Float): Boolean = !(x == y)
@noinline def test_not_!=(x: Float, y: Float): Boolean = !(x != y)
@noinline def test_not_<(x: Float, y: Float): Boolean = !(x < y)
@noinline def test_not_<=(x: Float, y: Float): Boolean = !(x <= y)
@noinline def test_not_>(x: Float, y: Float): Boolean = !(x > y)
@noinline def test_not_>=(x: Float, y: Float): Boolean = !(x >= y)
assertFalse(test_not_==(5, 5))
assertTrue(test_not_==(5, 10))
assertTrue(test_not_==(10, 5))
assertTrue(test_not_==(5, NaN))
assertTrue(test_not_==(NaN, NaN))
assertFalse(test_not_==(0.0f, -0.0f))
assertTrue(test_not_!=(5, 5))
assertFalse(test_not_!=(5, 10))
assertFalse(test_not_!=(10, 5))
assertFalse(test_not_!=(5, NaN))
assertFalse(test_not_!=(NaN, NaN))
assertTrue(test_not_!=(0.0f, -0.0f))
assertTrue(test_not_<(5, 5))
assertFalse(test_not_<(5, 10))
assertTrue(test_not_<(10, 5))
assertTrue(test_not_<(5, NaN))
assertTrue(test_not_<(NaN, NaN))
assertTrue(test_not_<(0.0f, -0.0f))
assertFalse(test_not_<=(5, 5))
assertFalse(test_not_<=(5, 10))
assertTrue(test_not_<=(10, 5))
assertTrue(test_not_<=(5, NaN))
assertTrue(test_not_<=(NaN, NaN))
assertFalse(test_not_<=(0.0f, -0.0f))
assertTrue(test_not_>(5, 5))
assertTrue(test_not_>(5, 10))
assertFalse(test_not_>(10, 5))
assertTrue(test_not_>(5, NaN))
assertTrue(test_not_>(NaN, NaN))
assertTrue(test_not_>(0.0f, -0.0f))
assertFalse(test_not_>=(5, 5))
assertTrue(test_not_>=(5, 10))
assertFalse(test_not_>=(10, 5))
assertTrue(test_not_>=(5, NaN))
assertTrue(test_not_>=(NaN, NaN))
assertFalse(test_not_>=(0.0f, -0.0f))
}
}
| nicolasstucki/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/FloatTest.scala | Scala | apache-2.0 | 2,924 |
package io.scylla.client
import java.nio.ByteBuffer
import io.scylla.client.raw.RawColumn
import org.apache.cassandra.thrift.{Deletion, Mutation => JMut, SlicePredicate, SliceRange}
/**
* Created by edouard_pelosi on 4/23/15.
*/
import scala.collection.JavaConverters._
sealed trait Mutation {
def asJavaWrapper: JMut
}
case class Write(rawColumn: RawColumn[_]) extends Mutation {
override def asJavaWrapper: JMut = new JMut().setColumn_or_supercolumn(rawColumn.asJavaWrapper)
}
case class DeleteColumns(names: Seq[ByteBuffer]) extends Mutation {
override def asJavaWrapper: JMut = new JMut().setDeletion(new Deletion().setPredicate(new SlicePredicate().setColumn_names(names.toList.asJava)))
}
case class DeleteRangeOfColumns(start: Option[ByteBuffer], finish: Option[ByteBuffer]) extends Mutation {
override def asJavaWrapper: JMut = new JMut().setDeletion(new Deletion().setPredicate(new SlicePredicate().setSlice_range(new SliceRange().setStart(start.orNull).setFinish(finish.orNull))))
}
case class DeleteSuperColumn(names: ByteBuffer, subColumns: Seq[ByteBuffer] = Seq.empty, start: Option[ByteBuffer] = None, finish: Option[ByteBuffer] = None) extends Mutation {
override def asJavaWrapper: JMut = {
val slice: Option[SlicePredicate] = {
if (subColumns.nonEmpty) {
Some(new SlicePredicate().setColumn_names(subColumns.toList.asJava))
} else if (start.nonEmpty || finish.nonEmpty) {
Some(new SlicePredicate().setSlice_range(new SliceRange().setStart(start.orNull).setFinish(finish.orNull)))
} else {
None
}
}
new JMut().setDeletion(new Deletion().setSuper_column(names).setPredicate(slice.orNull))
}
}
| le-doude/scylla | src/main/scala/io/scylla/client/Mutation.scala | Scala | apache-2.0 | 1,691 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.collector
import com.twitter.util.{Await, Closable, CloseAwaitably, Future, FuturePool, Time}
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import java.util.concurrent.{ArrayBlockingQueue, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
class QueueFullException(size: Int) extends Exception("Queue is full. MaxSize: %d".format(size))
class QueueClosedException extends Exception("Queue is closed")
/**
* A queue with configurable size and concurrency characteristics.
*
* This queue is backed by ArrayBlockingQueue of `maxSize`. Items are processed by a number of workers
* limited by `maxConcurrency`. Each worker will pull an item from the queue and process it via the
* provided `process` function. Workers will Await on `process` thus backpressure is based on the workers'
* ability to fully process an item.
*
* On close the queue will drain itelf before completing the close Future.
*
* The queue can be awaited on and will not complete until it's been closed and drained.
*/
class ItemQueue[A, B](
maxSize: Int,
maxConcurrency: Int,
process: A => Future[B],
stats: StatsReceiver = DefaultStatsReceiver.scope("ItemQueue")
) extends Closable with CloseAwaitably {
@volatile private[this] var running: Boolean = true
private[this] val queue = new ArrayBlockingQueue[A](maxSize)
private[this] val queueSizeGauge = stats.addGauge("queueSize") { queue.size }
private[this] val activeWorkers = new AtomicInteger(0)
private[this] val activeWorkerGauge = stats.addGauge("activeWorkers") { activeWorkers.get }
private[this] val maxConcurrencyGauge = stats.addGauge("maxConcurrency") { maxConcurrency }
private[this] val failuresCounter = stats.counter("failures")
private[this] val successesCounter = stats.counter("successes")
private[this] val workers = Seq.fill(maxConcurrency) { FuturePool.unboundedPool { loop() } }
private[this] def loop() {
while (running || !queue.isEmpty) {
val item = queue.poll(500, TimeUnit.MILLISECONDS)
if (item != null) {
activeWorkers.incrementAndGet()
val rep = stats.timeFuture("processing_time_ms")(process(item)) onSuccess{ _ =>
successesCounter.incr()
} onFailure { _ =>
failuresCounter.incr()
}
Await.ready(rep)
activeWorkers.decrementAndGet()
}
}
}
def close(deadline: Time): Future[Unit] = closeAwaitably {
running = false
Future.join(workers)
}
private[this] val QueueFull = Future.exception(new QueueFullException(maxSize))
private[this] val QueueClosed = Future.exception(new QueueClosedException)
def add(item: A): Future[Unit] =
if (!running)
QueueClosed
else if (!queue.offer(item))
QueueFull
else
Future.Done
}
| eirslett/zipkin | zipkin-collector/src/main/scala/com/twitter/zipkin/collector/ItemQueue.scala | Scala | apache-2.0 | 3,407 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.