code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package puck.parser
import puck._
import scala.collection.JavaConverters._
import com.nativelibs4java.opencl.{CLBuffer, CLContext, CLQueue, CLEvent}
import puck.linalg.CLMatrix
import scala.Array
import java.util.zip._
import puck.util._
/**
*
*
* @author dlwh
*/
case class CLUnaryRuleUpdater(kernels: IndexedSeq[RuleKernel]) {
def this(kernels: java.util.List[RuleKernel]) = this(kernels.asScala.toIndexedSeq)
def update(profiler: CLProfiler#EventTimer,
parent: CLMatrix[Float], parentScale: CLBuffer[Float], parentPointers: CLBuffer[Int], parentOff: Int,
child: CLMatrix[Float], childScale: CLBuffer[Float], childPointers: CLBuffer[Int], childOff: Int,
events: CLEvent*)(implicit queue: CLQueue) = synchronized {
require(parent.rows == child.rows)
require(parent.cols == child.cols)
require(parent.majorStride == child.majorStride)
kernels.flatMap(_.kernels).map { k =>
k.setArgs(parent.data.safeBuffer, parentScale, parentPointers, Integer.valueOf(parentOff), child.data.safeBuffer, childScale, childPointers, Integer.valueOf(childOff),
Integer.valueOf(parent.majorStride), Integer.valueOf(parent.rows) )
k.enqueueNDRange(queue, Array(parent.rows), events: _*) profileIn profiler
}
}
def write(name: String, out: ZipOutputStream) {
ZipUtil.serializedEntry(out, s"$name/numKernels", Integer.valueOf(kernels.length))
for(i <- 0 until kernels.length) {
kernels(i).write(s"$name/$i", out)
}
}
}
object CLUnaryRuleUpdater {
def read(in: ZipFile, name: String)(implicit ctxt: CLContext) = {
val x = ZipUtil.deserializeEntry[Integer](in.getInputStream(in.getEntry(s"$name/numKernels")))
val kernels = for(i <- 0 until x.intValue()) yield RuleKernel.read(in, s"$name/$i")
new CLUnaryRuleUpdater(kernels)
}
} | nkhuyu/puck | src/main/scala/puck/parser/CLUnaryRuleUpdater.scala | Scala | apache-2.0 | 1,837 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.graphframes.lib
import org.apache.spark.sql.types.DataTypes
import org.graphframes.{GraphFrameTestSparkContext, SparkFunSuite, TestUtils}
import org.graphframes.examples.Graphs
class LabelPropagationSuite extends SparkFunSuite with GraphFrameTestSparkContext {
val n = 5
test("Toy example") {
val g = Graphs.twoBlobs(n)
val labels = g.labelPropagation.maxIter(4 * n).run()
TestUtils.testSchemaInvariants(g, labels)
TestUtils.checkColumnType(labels.schema, "label", DataTypes.LongType)
val clique1 =
labels.filter(s"id < $n").select("label").collect().toSeq.map(_.getLong(0)).toSet
assert(clique1.size === 1)
val clique2 =
labels.filter(s"id >= $n").select("label").collect().toSeq.map(_.getLong(0)).toSet
assert(clique2.size === 1)
assert(clique1 !== clique2)
}
}
| graphframes/graphframes | src/test/scala/org/graphframes/lib/LabelPropagationSuite.scala | Scala | apache-2.0 | 1,637 |
/**
* Copyright 2015 ICT.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.ac.ict.acs.netflow.query.broker
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._
import cn.ac.ict.acs.netflow.{ Query, NetFlowException }
import cn.ac.ict.acs.netflow.query.master.JobType
case class JobDescription(
description: Option[String],
jobType: Option[String],
deferTime: Option[RelativeTime],
frequency: Option[RelativeTime],
jobQuery: Option[Query],
jar: Option[String],
mainClass: Option[String],
appArgs: Option[Array[String]],
sparkProperties: Option[Map[String, String]],
environmentVariables: Option[Map[String, String]],
outputPath: Option[String]) {
/**
*
* @return (validJobDesc, success, message)
*/
def doValidate(): (ValidJobDescription, Boolean, Option[String]) = {
val tpe = try {
if (jobType.isDefined) {
JobType.withName(jobType.get.toUpperCase)
} else {
return (null, false, Some(s"No jobType provided"))
}
} catch {
case e: NoSuchElementException =>
return (null, false,
Some(s"[jobType: ${jobType.get}] is not a valid one, " +
s"please choose among [ONLINE, ADHOC, REPORT]"))
}
val defer = try {
deferTime.map(_.toDuration).getOrElse(0.seconds)
} catch {
case e: NetFlowException =>
return (null, false, Some(e.getMessage))
}
val freq = try {
frequency.map(_.toDuration)
} catch {
case e: NetFlowException =>
return (null, false, Some(e.getMessage))
}
if (tpe == JobType.REPORT && !frequency.isDefined) {
return (null, false, Some(s"Report Job must specify running frequency"))
}
val query = try {
require(jobQuery.isDefined, "No valid jobQuery available")
jobQuery.get
} catch {
case e: IllegalArgumentException =>
return (null, false, Some(e.getMessage))
}
val vjd = ValidJobDescription(description, tpe, defer, freq, query,
jar, mainClass, appArgs, sparkProperties, environmentVariables, outputPath)
(vjd, true, None)
}
}
case class RelativeTime(num: Int, unit: String) {
def toDuration: FiniteDuration = {
unit match {
case "day" | "days" => new FiniteDuration(num, TimeUnit.DAYS)
case "hour" | "hours" => new FiniteDuration(num, TimeUnit.HOURS)
case "minute" | "minutes" => new FiniteDuration(num, TimeUnit.MINUTES)
case "second" | "seconds" => new FiniteDuration(num, TimeUnit.SECONDS)
case "millisecond" | "milliseconds" => new FiniteDuration(num, TimeUnit.MILLISECONDS)
case x: String =>
throw new NetFlowException(s"Unsupported time unit: $x")
}
}
}
case class ValidJobDescription(
description: Option[String],
jobType: JobType.Value,
deferTime: FiniteDuration,
frequency: Option[FiniteDuration],
query: Query,
jar: Option[String],
mainClass: Option[String],
appArgs: Option[Array[String]],
sparkProperties: Option[Map[String, String]],
environmentVariables: Option[Map[String, String]],
outputPath: Option[String])
| DataSysLab/netflow | query/src/main/scala/cn/ac/ict/acs/netflow/query/broker/JobDescription.scala | Scala | apache-2.0 | 3,925 |
package edu.cornell.cdm89.scalaspec.driver
import akka.actor.{Actor, ActorLogging, Props}
import akka.actor.{ActorIdentity, Identify}
import akka.routing.FromConfig
import edu.cornell.cdm89.scalaspec.domain.{Subdomain, GllElement}
import edu.cornell.cdm89.scalaspec.ode.TimestepController
class EvolutionController(nNodes: Int) extends Actor with ActorLogging {
// TODO: Can some of this be moved to preStart? Should some be passed in
// instead?
val config = context.system.settings.config
val t0 = config.getDouble("harvest.initial-time")
val dt = config.getDouble("harvest.step-size")
val nSteps = config.getInt("harvest.nr-of-steps")
// Create broadcast routers
val domRouter = context.system.actorOf(Props.empty.withRouter(FromConfig), "domain")
val idRouter = context.system.actorOf(Props.empty.withRouter(FromConfig), "initialData")
val obsRouter = context.system.actorOf(Props.empty.withRouter(FromConfig), "observers")
override def receive = standby
def standby: Receive = {
case 'StartEvolution =>
domRouter ! GllElement.CreateElements(domRouter)
context.become(creatingElements)
}
def creatingElements: Receive = {
waitForResponses(0, 'ElementsCreated, () => {
idRouter ! 'ProvideId
obsRouter ! 'Initialize
}, loadingId)
}
def loadingId: Receive = {
waitForResponses(0, 'AllReady, () => {
log.info("Starting evolution")
context.actorOf(Props(classOf[TimestepController], domRouter,
t0, dt, t0+dt*nSteps), "timeStepper")
}, stepping(System.nanoTime))
}
def stepping(timestamp: Long): Receive = {
waitForResponses(0, 'AllDone, () => {
val wallTime = 1.0e-9 * (System.nanoTime - timestamp)
log.info(f"All done! ($wallTime%.3f s)")
domRouter ! 'Shutdown
}, finished)
}
def finished: Receive = {
case msg =>
log.error(s"Unexpected message: $msg")
}
def waitForResponses(count: Int, response: Any, action: () => Unit,
nextState: Receive): Receive = {
case `response` =>
//log.info(s"Got response $response (${count+1})")
if (count+1 == nNodes) {
action()
context.become(nextState)
} else context.become(waitForResponses(count+1, response, action, nextState))
case msg =>
log.error(s"Unexpected message: $msg")
}
}
| cdmuhlb/DGenerate | src/main/scala/edu/cornell/cdm89/scalaspec/driver/EvolutionController.scala | Scala | mit | 2,351 |
object `inline-match-specialize` {
case class Box[+T](value: T)
transparent inline def specialize[T](box: Box[T]): Box[T] = inline box match {
case box: Box[t] => box
}
val ibox: Box[Int] = specialize[Any](Box(0))
}
| dotty-staging/dotty | tests/pos/inline-match-specialize.scala | Scala | apache-2.0 | 229 |
package com.microworkflow
/**
* Created by dam on 12/5/15.
*/
import io.gatling.core.Predef._
import io.gatling.core.scenario.Simulation
import io.gatling.core.structure.ScenarioBuilder
import io.gatling.http.Predef._
class RouterSimulation extends Simulation {
private val getReponseKey = "getResponse"
def makeProtocolBuilder = http
.baseURL("http://localhost:8080")
val getRequestBuilder = http("get request")
.get("/generate")
.check(status.is(session ⇒ 200), bodyString.saveAs(getReponseKey))
val postRequestBuider = http("post request")
.post("/convert")
.body(StringBody(session ⇒ { s"time was: ${session(getReponseKey).validate[String].get}"}))
.check(status.is(200))
def makeScenarioBuilder: ScenarioBuilder =
scenario("sample scenario")
.exec(getRequestBuilder)
.exec(postRequestBuider)
setUp(makeScenarioBuilder.inject(atOnceUsers(1))).protocols(makeProtocolBuilder)
}
| polymorphic/gatling-demo | src/it/scala/com.microworkflow/RouterSimulation.scala | Scala | apache-2.0 | 957 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.util
import java.time.{Instant, LocalDateTime, LocalTime, ZoneOffset}
import java.util.concurrent.TimeUnit
import org.scalatest.Matchers
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.catalyst.util.{DateTimeTestUtils, DateTimeUtils, TimestampFormatter}
import org.apache.spark.sql.catalyst.util.DateTimeUtils.instantToMicros
import org.apache.spark.unsafe.types.UTF8String
class TimestampFormatterSuite extends SparkFunSuite with SQLHelper with Matchers {
test("parsing timestamps using time zones") {
val localDate = "2018-12-02T10:11:12.001234"
val expectedMicros = Map(
"UTC" -> 1543745472001234L,
"PST" -> 1543774272001234L,
"CET" -> 1543741872001234L,
"Africa/Dakar" -> 1543745472001234L,
"America/Los_Angeles" -> 1543774272001234L,
"Antarctica/Vostok" -> 1543723872001234L,
"Asia/Hong_Kong" -> 1543716672001234L,
"Europe/Amsterdam" -> 1543741872001234L)
DateTimeTestUtils.outstandingTimezonesIds.foreach { zoneId =>
val formatter = TimestampFormatter(
"yyyy-MM-dd'T'HH:mm:ss.SSSSSS",
DateTimeUtils.getZoneId(zoneId))
val microsSinceEpoch = formatter.parse(localDate)
assert(microsSinceEpoch === expectedMicros(zoneId))
}
}
test("format timestamps using time zones") {
val microsSinceEpoch = 1543745472001234L
val expectedTimestamp = Map(
"UTC" -> "2018-12-02T10:11:12.001234",
"PST" -> "2018-12-02T02:11:12.001234",
"CET" -> "2018-12-02T11:11:12.001234",
"Africa/Dakar" -> "2018-12-02T10:11:12.001234",
"America/Los_Angeles" -> "2018-12-02T02:11:12.001234",
"Antarctica/Vostok" -> "2018-12-02T16:11:12.001234",
"Asia/Hong_Kong" -> "2018-12-02T18:11:12.001234",
"Europe/Amsterdam" -> "2018-12-02T11:11:12.001234")
DateTimeTestUtils.outstandingTimezonesIds.foreach { zoneId =>
val formatter = TimestampFormatter(
"yyyy-MM-dd'T'HH:mm:ss.SSSSSS",
DateTimeUtils.getZoneId(zoneId))
val timestamp = formatter.format(microsSinceEpoch)
assert(timestamp === expectedTimestamp(zoneId))
}
}
test("roundtrip micros -> timestamp -> micros using timezones") {
Seq("yyyy-MM-dd'T'HH:mm:ss.SSSSSS", "yyyy-MM-dd'T'HH:mm:ss.SSSSSSXXXXX").foreach { pattern =>
Seq(
-58710115316212000L,
-18926315945345679L,
-9463427405253013L,
-244000001L,
0L,
99628200102030L,
1543749753123456L,
2177456523456789L,
11858049903010203L).foreach { micros =>
DateTimeTestUtils.outstandingZoneIds.foreach { zoneId =>
val formatter = TimestampFormatter(pattern, zoneId)
val timestamp = formatter.format(micros)
val parsed = formatter.parse(timestamp)
assert(micros === parsed)
}
}
}
}
test("roundtrip timestamp -> micros -> timestamp using timezones") {
Seq(
"0109-07-20T18:38:03.788000",
"1370-04-01T10:00:54.654321",
"1670-02-11T14:09:54.746987",
"1969-12-31T23:55:55.999999",
"1970-01-01T00:00:00.000000",
"1973-02-27T02:30:00.102030",
"2018-12-02T11:22:33.123456",
"2039-01-01T01:02:03.456789",
"2345-10-07T22:45:03.010203").foreach { timestamp =>
DateTimeTestUtils.outstandingZoneIds.foreach { zoneId =>
val formatter = TimestampFormatter("yyyy-MM-dd'T'HH:mm:ss.SSSSSS", zoneId)
val micros = formatter.parse(timestamp)
val formatted = formatter.format(micros)
assert(timestamp === formatted)
}
}
}
test(" case insensitive parsing of am and pm") {
val formatter = TimestampFormatter("yyyy MMM dd hh:mm:ss a", ZoneOffset.UTC)
val micros = formatter.parse("2009 Mar 20 11:30:01 am")
assert(micros === TimeUnit.SECONDS.toMicros(
LocalDateTime.of(2009, 3, 20, 11, 30, 1).toEpochSecond(ZoneOffset.UTC)))
}
test("format fraction of second") {
val formatter = TimestampFormatter.getFractionFormatter(ZoneOffset.UTC)
assert(formatter.format(0) === "1970-01-01 00:00:00")
assert(formatter.format(1) === "1970-01-01 00:00:00.000001")
assert(formatter.format(1000) === "1970-01-01 00:00:00.001")
assert(formatter.format(900000) === "1970-01-01 00:00:00.9")
assert(formatter.format(1000000) === "1970-01-01 00:00:01")
}
test("formatting negative years with default pattern") {
val instant = LocalDateTime.of(-99, 1, 1, 0, 0, 0)
.atZone(ZoneOffset.UTC)
.toInstant
val micros = DateTimeUtils.instantToMicros(instant)
assert(TimestampFormatter(ZoneOffset.UTC).format(micros) === "-0099-01-01 00:00:00")
}
test("special timestamp values") {
testSpecialDatetimeValues { zoneId =>
val formatter = TimestampFormatter(zoneId)
val tolerance = TimeUnit.SECONDS.toMicros(30)
assert(formatter.parse("EPOCH") === 0)
val now = instantToMicros(Instant.now())
formatter.parse("now") should be(now +- tolerance)
val localToday = LocalDateTime.now(zoneId)
.`with`(LocalTime.MIDNIGHT)
.atZone(zoneId)
val yesterday = instantToMicros(localToday.minusDays(1).toInstant)
formatter.parse("yesterday CET") should be(yesterday +- tolerance)
val today = instantToMicros(localToday.toInstant)
formatter.parse(" TODAY ") should be(today +- tolerance)
val tomorrow = instantToMicros(localToday.plusDays(1).toInstant)
formatter.parse("Tomorrow ") should be(tomorrow +- tolerance)
}
}
test("parsing timestamp strings with various seconds fractions") {
DateTimeTestUtils.outstandingZoneIds.foreach { zoneId =>
def check(pattern: String, input: String, reference: String): Unit = {
val formatter = TimestampFormatter(pattern, zoneId)
val expected = DateTimeUtils.stringToTimestamp(
UTF8String.fromString(reference), zoneId).get
val actual = formatter.parse(input)
assert(actual === expected)
}
check("yyyy-MM-dd'T'HH:mm:ss.SSSSSSSXXX",
"2019-10-14T09:39:07.3220000Z", "2019-10-14T09:39:07.322Z")
check("yyyy-MM-dd'T'HH:mm:ss.SSSSSS",
"2019-10-14T09:39:07.322000", "2019-10-14T09:39:07.322")
check("yyyy-MM-dd'T'HH:mm:ss.SSSSSSX",
"2019-10-14T09:39:07.123456Z", "2019-10-14T09:39:07.123456Z")
check("yyyy-MM-dd'T'HH:mm:ss.SSSSSSX",
"2019-10-14T09:39:07.000010Z", "2019-10-14T09:39:07.00001Z")
check("yyyy HH:mm:ss.SSSSS", "1970 01:02:03.00004", "1970-01-01 01:02:03.00004")
check("yyyy HH:mm:ss.SSSS", "2019 00:00:07.0100", "2019-01-01 00:00:07.0100")
check("yyyy-MM-dd'T'HH:mm:ss.SSSX",
"2019-10-14T09:39:07.322Z", "2019-10-14T09:39:07.322Z")
check("yyyy-MM-dd'T'HH:mm:ss.SS",
"2019-10-14T09:39:07.10", "2019-10-14T09:39:07.1")
check("yyyy-MM-dd'T'HH:mm:ss.S",
"2019-10-14T09:39:07.1", "2019-10-14T09:39:07.1")
try {
TimestampFormatter("yyyy/MM/dd HH_mm_ss.SSSSSS", zoneId)
.parse("2019/11/14 20#25#30.123456")
fail("Expected to throw an exception for the invalid input")
} catch {
case e: java.time.format.DateTimeParseException =>
assert(e.getMessage.contains("could not be parsed"))
}
}
}
test("formatting timestamp strings up to microsecond precision") {
DateTimeTestUtils.outstandingZoneIds.foreach { zoneId =>
def check(pattern: String, input: String, expected: String): Unit = {
val formatter = TimestampFormatter(pattern, zoneId)
val timestamp = DateTimeUtils.stringToTimestamp(
UTF8String.fromString(input), zoneId).get
val actual = formatter.format(timestamp)
assert(actual === expected)
}
check(
"yyyy-MM-dd HH:mm:ss.SSSSSSS", "2019-10-14T09:39:07.123456",
"2019-10-14 09:39:07.1234560")
check(
"yyyy-MM-dd HH:mm:ss.SSSSSS", "1960-01-01T09:39:07.123456",
"1960-01-01 09:39:07.123456")
check(
"yyyy-MM-dd HH:mm:ss.SSSSS", "0001-10-14T09:39:07.1",
"0001-10-14 09:39:07.10000")
check(
"yyyy-MM-dd HH:mm:ss.SSSS", "9999-12-31T23:59:59.999",
"9999-12-31 23:59:59.9990")
check(
"yyyy-MM-dd HH:mm:ss.SSS", "1970-01-01T00:00:00.0101",
"1970-01-01 00:00:00.010")
check(
"yyyy-MM-dd HH:mm:ss.SS", "2019-10-14T09:39:07.09",
"2019-10-14 09:39:07.09")
check(
"yyyy-MM-dd HH:mm:ss.S", "2019-10-14T09:39:07.2",
"2019-10-14 09:39:07.2")
check(
"yyyy-MM-dd HH:mm:ss.S", "2019-10-14T09:39:07",
"2019-10-14 09:39:07.0")
check(
"yyyy-MM-dd HH:mm:ss", "2019-10-14T09:39:07.123456",
"2019-10-14 09:39:07")
}
}
}
| goldmedal/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/util/TimestampFormatterSuite.scala | Scala | apache-2.0 | 9,590 |
/* NeverTerminate.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2015 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos.termination
import scala.concurrent.duration.FiniteDuration
/**
* A termination policy that never signals for termination.
*/
case object NeverTerminate extends atmos.TerminationPolicy {
/* Never signal for termination. */
override def shouldTerminate(attempts: Int, nextAttemptAt: FiniteDuration) = false
} | zmanio/atmos | src/main/scala/atmos/termination/NeverTerminate.scala | Scala | apache-2.0 | 996 |
package com.eevolution.context.dictionary.domain.model
import ai.x.play.json.Jsonx
import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable}
import org.joda.time.DateTime
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com
*/
/**
* User Mail Entity
* @param userMailId User Mail ID
* @param tenantId Tenant ID
* @param organizationId Organization ID
* @param isActive Is Active
* @param created Created
* @param createdBy Created By
* @param updated Updated
* @param updatedBy Updated By
* @param userId user ID
* @param mailTextId Mail Text ID
* @param mailMsgId Mail Msg ID
* @param messageId Message ID
* @param deliveryConfirmation Delivery Confirmation
* @param isDelivered Is Delivered
* @param subject Subject
* @param mailText Mail Text
* @param uuid UUID
*/
case class UserMail (userMailId: Int,
tenantId: Int,
organizationId: Int,
isActive: Boolean = true,
created: DateTime = DateTime.now,
createdBy: Int,
updated: DateTime = DateTime.now,
updatedBy: Int,
userId: Int,
mailTextId: Option[Int],
mailMsgId: Option[Int],
messageId: Option[String],
deliveryConfirmation: Option[String],
isDelivered: Option[Boolean],
subject: Option[String],
mailText: Option[String],
uuid: String
) extends DomainModel
with ActiveEnabled
with Identifiable
with Traceable {
override type ActiveEnabled = this.type
override type Identifiable = this.type
override type Traceable = this.type
override def Id: Int = userMailId
override val entityName: String = "AD_UserMail"
override val identifier: String = "AD_UserMail_ID"
}
object UserMail {
implicit lazy val jsonFormat = Jsonx.formatCaseClass[UserMail]
def create(userMailId: Int,
tenantId: Int,
organizationId: Int,
isActive: Boolean,
created: DateTime,
createdBy: Int,
updated: DateTime,
updatedBy: Int,
userId: Int,
mailTextId: Int,
mailMsgId: Int,
messageId: String,
deliveryConfirmation: String,
isDelivered: Boolean,
subject: String,
mailText: String,
uuid: String) = UserMail(userMailId, tenantId, organizationId, isActive, created, createdBy,
updated, updatedBy, userId, None, None, None, None, None, None, None, uuid)
}
| adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/UserMail.scala | Scala | gpl-3.0 | 3,614 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler
import java.util.concurrent.TimeUnit
import scala.util.{Failure, Success, Try}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Checkpoint, CheckpointWriter, Time}
import org.apache.spark.streaming.api.python.PythonDStream
import org.apache.spark.streaming.util.RecurringTimer
import org.apache.spark.util.{Clock, EventLoop, ManualClock, Utils}
/** Event classes for JobGenerator */
private[scheduler] sealed trait JobGeneratorEvent
private[scheduler] case class GenerateJobs(time: Time) extends JobGeneratorEvent
private[scheduler] case class ClearMetadata(time: Time) extends JobGeneratorEvent
private[scheduler] case class DoCheckpoint(
time: Time, clearCheckpointDataLater: Boolean) extends JobGeneratorEvent
private[scheduler] case class ClearCheckpointData(time: Time) extends JobGeneratorEvent
/**
* This class generates jobs from DStreams as well as drives checkpointing and cleaning
* up DStream metadata.
*/
private[streaming]
class JobGenerator(jobScheduler: JobScheduler) extends Logging {
private val ssc = jobScheduler.ssc
private val conf = ssc.conf
private val graph = ssc.graph
val clock = {
val clockClass = ssc.sc.conf.get(
"spark.streaming.clock", "org.apache.spark.util.SystemClock")
try {
Utils.classForName(clockClass).getConstructor().newInstance().asInstanceOf[Clock]
} catch {
case e: ClassNotFoundException if clockClass.startsWith("org.apache.spark.streaming") =>
val newClockClass = clockClass.replace("org.apache.spark.streaming", "org.apache.spark")
Utils.classForName(newClockClass).getConstructor().newInstance().asInstanceOf[Clock]
}
}
private val timer = new RecurringTimer(clock, ssc.graph.batchDuration.milliseconds,
longTime => eventLoop.post(GenerateJobs(new Time(longTime))), "JobGenerator")
// This is marked lazy so that this is initialized after checkpoint duration has been set
// in the context and the generator has been started.
private lazy val shouldCheckpoint = ssc.checkpointDuration != null && ssc.checkpointDir != null
private lazy val checkpointWriter = if (shouldCheckpoint) {
new CheckpointWriter(this, ssc.conf, ssc.checkpointDir, ssc.sparkContext.hadoopConfiguration)
} else {
null
}
// eventLoop is created when generator starts.
// This not being null means the scheduler has been started and not stopped
private var eventLoop: EventLoop[JobGeneratorEvent] = null
// last batch whose completion,checkpointing and metadata cleanup has been completed
private var lastProcessedBatch: Time = null
/** Start generation of jobs */
def start(): Unit = synchronized {
if (eventLoop != null) return // generator has already been started
// Call checkpointWriter here to initialize it before eventLoop uses it to avoid a deadlock.
// See SPARK-10125
checkpointWriter
eventLoop = new EventLoop[JobGeneratorEvent]("JobGenerator") {
override protected def onReceive(event: JobGeneratorEvent): Unit = processEvent(event)
override protected def onError(e: Throwable): Unit = {
jobScheduler.reportError("Error in job generator", e)
}
}
eventLoop.start()
if (ssc.isCheckpointPresent) {
restart()
} else {
startFirstTime()
}
}
/**
* Stop generation of jobs. processReceivedData = true makes this wait until jobs
* of current ongoing time interval has been generated, processed and corresponding
* checkpoints written.
*/
def stop(processReceivedData: Boolean): Unit = synchronized {
if (eventLoop == null) return // generator has already been stopped
if (processReceivedData) {
logInfo("Stopping JobGenerator gracefully")
val timeWhenStopStarted = System.nanoTime()
val stopTimeoutMs = conf.getTimeAsMs(
"spark.streaming.gracefulStopTimeout", s"${10 * ssc.graph.batchDuration.milliseconds}ms")
val pollTime = 100
// To prevent graceful stop to get stuck permanently
def hasTimedOut: Boolean = {
val diff = TimeUnit.NANOSECONDS.toMillis((System.nanoTime() - timeWhenStopStarted))
val timedOut = diff > stopTimeoutMs
if (timedOut) {
logWarning("Timed out while stopping the job generator (timeout = " + stopTimeoutMs + ")")
}
timedOut
}
// Wait until all the received blocks in the network input tracker has
// been consumed by network input DStreams, and jobs have been generated with them
logInfo("Waiting for all received blocks to be consumed for job generation")
while(!hasTimedOut && jobScheduler.receiverTracker.hasUnallocatedBlocks) {
Thread.sleep(pollTime)
}
logInfo("Waited for all received blocks to be consumed for job generation")
// Stop generating jobs
val stopTime = timer.stop(interruptTimer = false)
graph.stop()
logInfo("Stopped generation timer")
// Wait for the jobs to complete and checkpoints to be written
def haveAllBatchesBeenProcessed: Boolean = {
lastProcessedBatch != null && lastProcessedBatch.milliseconds == stopTime
}
logInfo("Waiting for jobs to be processed and checkpoints to be written")
while (!hasTimedOut && !haveAllBatchesBeenProcessed) {
Thread.sleep(pollTime)
}
logInfo("Waited for jobs to be processed and checkpoints to be written")
} else {
logInfo("Stopping JobGenerator immediately")
// Stop timer and graph immediately, ignore unprocessed data and pending jobs
timer.stop(true)
graph.stop()
}
// First stop the event loop, then stop the checkpoint writer; see SPARK-14701
eventLoop.stop()
if (shouldCheckpoint) checkpointWriter.stop()
logInfo("Stopped JobGenerator")
}
/**
* Callback called when a batch has been completely processed.
*/
def onBatchCompletion(time: Time) {
eventLoop.post(ClearMetadata(time))
}
/**
* Callback called when the checkpoint of a batch has been written.
*/
def onCheckpointCompletion(time: Time, clearCheckpointDataLater: Boolean) {
if (clearCheckpointDataLater) {
eventLoop.post(ClearCheckpointData(time))
}
}
/** Processes all events */
private def processEvent(event: JobGeneratorEvent) {
logDebug("Got event " + event)
event match {
case GenerateJobs(time) => generateJobs(time)
case ClearMetadata(time) => clearMetadata(time)
case DoCheckpoint(time, clearCheckpointDataLater) =>
doCheckpoint(time, clearCheckpointDataLater)
case ClearCheckpointData(time) => clearCheckpointData(time)
}
}
/** Starts the generator for the first time */
private def startFirstTime() {
val startTime = new Time(timer.getStartTime())
graph.start(startTime - graph.batchDuration)
timer.start(startTime.milliseconds)
logInfo("Started JobGenerator at " + startTime)
}
/** Restarts the generator based on the information in checkpoint */
private def restart() {
// If manual clock is being used for testing, then
// either set the manual clock to the last checkpointed time,
// or if the property is defined set it to that time
if (clock.isInstanceOf[ManualClock]) {
val lastTime = ssc.initialCheckpoint.checkpointTime.milliseconds
val jumpTime = ssc.sc.conf.getLong("spark.streaming.manualClock.jump", 0)
clock.asInstanceOf[ManualClock].setTime(lastTime + jumpTime)
}
val batchDuration = ssc.graph.batchDuration
// Batches when the master was down, that is,
// between the checkpoint and current restart time
val checkpointTime = ssc.initialCheckpoint.checkpointTime
val restartTime = new Time(timer.getRestartTime(graph.zeroTime.milliseconds))
val downTimes = checkpointTime.until(restartTime, batchDuration)
logInfo("Batches during down time (" + downTimes.size + " batches): "
+ downTimes.mkString(", "))
// Batches that were unprocessed before failure
val pendingTimes = ssc.initialCheckpoint.pendingTimes.sorted(Time.ordering)
logInfo("Batches pending processing (" + pendingTimes.length + " batches): " +
pendingTimes.mkString(", "))
// Reschedule jobs for these times
val timesToReschedule = (pendingTimes ++ downTimes).filter { _ < restartTime }
.distinct.sorted(Time.ordering)
logInfo("Batches to reschedule (" + timesToReschedule.length + " batches): " +
timesToReschedule.mkString(", "))
timesToReschedule.foreach { time =>
// Allocate the related blocks when recovering from failure, because some blocks that were
// added but not allocated, are dangling in the queue after recovering, we have to allocate
// those blocks to the next batch, which is the batch they were supposed to go.
jobScheduler.receiverTracker.allocateBlocksToBatch(time) // allocate received blocks to batch
jobScheduler.submitJobSet(JobSet(time, graph.generateJobs(time)))
}
// Restart the timer
timer.start(restartTime.milliseconds)
logInfo("Restarted JobGenerator at " + restartTime)
}
/** Generate jobs and perform checkpointing for the given `time`. */
private def generateJobs(time: Time) {
// Checkpoint all RDDs marked for checkpointing to ensure their lineages are
// truncated periodically. Otherwise, we may run into stack overflows (SPARK-6847).
ssc.sparkContext.setLocalProperty(RDD.CHECKPOINT_ALL_MARKED_ANCESTORS, "true")
Try {
jobScheduler.receiverTracker.allocateBlocksToBatch(time) // allocate received blocks to batch
graph.generateJobs(time) // generate jobs using allocated block
} match {
case Success(jobs) =>
val streamIdToInputInfos = jobScheduler.inputInfoTracker.getInfo(time)
jobScheduler.submitJobSet(JobSet(time, jobs, streamIdToInputInfos))
case Failure(e) =>
jobScheduler.reportError("Error generating jobs for time " + time, e)
PythonDStream.stopStreamingContextIfPythonProcessIsDead(e)
}
eventLoop.post(DoCheckpoint(time, clearCheckpointDataLater = false))
}
/** Clear DStream metadata for the given `time`. */
private def clearMetadata(time: Time) {
ssc.graph.clearMetadata(time)
// If checkpointing is enabled, then checkpoint,
// else mark batch to be fully processed
if (shouldCheckpoint) {
eventLoop.post(DoCheckpoint(time, clearCheckpointDataLater = true))
} else {
// If checkpointing is not enabled, then delete metadata information about
// received blocks (block data not saved in any case). Otherwise, wait for
// checkpointing of this batch to complete.
val maxRememberDuration = graph.getMaxInputStreamRememberDuration()
jobScheduler.receiverTracker.cleanupOldBlocksAndBatches(time - maxRememberDuration)
jobScheduler.inputInfoTracker.cleanup(time - maxRememberDuration)
markBatchFullyProcessed(time)
}
}
/** Clear DStream checkpoint data for the given `time`. */
private def clearCheckpointData(time: Time) {
ssc.graph.clearCheckpointData(time)
// All the checkpoint information about which batches have been processed, etc have
// been saved to checkpoints, so its safe to delete block metadata and data WAL files
val maxRememberDuration = graph.getMaxInputStreamRememberDuration()
jobScheduler.receiverTracker.cleanupOldBlocksAndBatches(time - maxRememberDuration)
jobScheduler.inputInfoTracker.cleanup(time - maxRememberDuration)
markBatchFullyProcessed(time)
}
/** Perform checkpoint for the given `time`. */
private def doCheckpoint(time: Time, clearCheckpointDataLater: Boolean) {
if (shouldCheckpoint && (time - graph.zeroTime).isMultipleOf(ssc.checkpointDuration)) {
logInfo("Checkpointing graph for time " + time)
ssc.graph.updateCheckpointData(time)
checkpointWriter.write(new Checkpoint(ssc, time), clearCheckpointDataLater)
} else if (clearCheckpointDataLater) {
markBatchFullyProcessed(time)
}
}
private def markBatchFullyProcessed(time: Time) {
lastProcessedBatch = time
}
}
| WindCanDie/spark | streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala | Scala | apache-2.0 | 12,990 |
package kipsigman.play.mvc
import kipsigman.domain.entity._
import play.api.data.Form
import play.api.i18n.Messages
import play.api.libs.json._
import play.api.mvc.RequestHeader
import play.api.mvc.Result
import play.api.mvc.Results._
import play.api.libs.json.Json.toJsFieldJsValueWrapper
object AjaxHelper {
abstract class Status(val name: String)
object Status {
val key = "status"
case object Success extends Status("success")
case object Error extends Status("error")
}
object Key {
val entity = "entity"
val errors = "errors"
val id = "id"
}
def successResult(jsValue: JsValue)(implicit request: RequestHeader, user: Option[User]): Result = {
Ok(jsValue)
}
def errorResult(jsValue: JsValue)(implicit request: RequestHeader, user: Option[User]): Result = {
BadRequest(jsValue)
}
def notFoundResult(jsValue: JsValue)(implicit request: RequestHeader, user: Option[User]): Result = {
NotFound(jsValue)
}
def contentSaveSuccessResult[T <: Content[T]](content: Content[T])
(implicit request: RequestHeader, messages: Messages, user: Option[User]): Result = {
val jsValue = Json.obj(Status.key -> Status.Success.name, Key.id -> content.id, "contentStatus" -> content.status.name)
successResult(jsValue)
}
def entityNotFoundResult[T](clazz: Class[T], id: Int, additionalErrors: Seq[String] = Seq())
(implicit request: RequestHeader, messages: Messages, user: Option[User]): Result = {
val error = Messages("entity.find.error.notFound", clazz.getSimpleName, id)
val errors = Seq(error) ++ additionalErrors
val jsValue = Json.obj(Status.key -> Status.Error.name, Key.errors -> errors)
notFoundResult(jsValue)
}
def entitySaveErrorResult(entity: IdEntity, errors: Seq[String])
(implicit request: RequestHeader, messages: Messages, user: Option[User]): Result = {
val jsValue = Json.obj(Status.key -> Status.Error.name, Key.id -> entity.id, Key.errors -> errors)
errorResult(jsValue)
}
def entitySaveErrorResult(entity: IdEntity, formWithErrors: Form[_])
(implicit request: RequestHeader, messages: Messages, user: Option[User]): Result = {
val jsValue = Json.obj(Status.key -> Status.Error.name, Key.id -> entity.id, Key.errors -> formWithErrors.errorsAsJson)
errorResult(jsValue)
}
def entitySaveSuccessResult(id: Int)
(implicit request: RequestHeader, messages: Messages, user: Option[User]): Result = {
val jsValue = Json.obj(Status.key -> Status.Success.name, Key.id -> id)
successResult(jsValue)
}
def entitySaveSuccessResult[T <: IdEntity](entity: T)
(implicit request: RequestHeader, messages: Messages, user: Option[User], entityWrites: Writes[T]): Result = {
val entityJsValue = Json.toJson(entity)(entityWrites)
val jsValue = Json.obj(Status.key -> Status.Success.name, Key.entity -> entityJsValue, Key.id -> entity.id)
successResult(jsValue)
}
} | kipsigman/play-extensions | src/main/scala/kipsigman/play/mvc/AjaxHelper.scala | Scala | apache-2.0 | 2,986 |
/*
Copyright (c) 2015, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.util.reflect
import java.io.File
import java.nio.file.Files
import org.sireum.option._
import org.sireum.util._
object ReflectGen {
final def run(option: CliGenOption,
outPrintln: String => Unit,
errPrintln: String => Unit): Boolean = {
val optOpt = check(option, outPrintln, errPrintln)
if (optOpt.isEmpty) return false
val opt = optOpt.get
val o =
try {
val companionClass =
Class.forName(
Reflection.companion(opt.root, processAnnotations = false).
get._1.asModule.fullName)
val o = companionClass.getMethod("make").invoke(null)
if (o.getClass.getName != option.rootClassName) {
errPrintln(s"'${option.rootClassName}' companion 'make()' method does not return an object of type '${option.rootClassName}'")
return false
} else if (!o.isInstanceOf[AnyRef with Product]) {
errPrintln(s"'${option.rootClassName}' is not a case class")
return false
}
o.asInstanceOf[AnyRef with Product]
} catch {
case t: Throwable =>
errPrintln(s"Could not find '${option.rootClassName}' companion 'make():${option.rootClassName}' method")
return false
}
write(
opt,
new CliGen(opt.licenseFileOpt, opt.packageNameOpt, opt.className).
generate(o),
outPrintln)
false
}
final def run(option: JsonGenOption,
outPrintln: String => Unit,
errPrintln: String => Unit): Boolean = {
val optOpt = check(option, outPrintln, errPrintln)
if (optOpt.isEmpty) return false
val opt = optOpt.get
for (roots <- toClass(option.roots, errPrintln)) yield
write(
opt,
new RewriterJsonGen(opt.licenseFileOpt,
opt.packageNameOpt, opt.className,
Reflection.getTypeOfClass(opt.root),
option.imports.toVector, roots).generateJson(),
outPrintln)
false
}
final def run(option: RewriterGenOption,
outPrintln: String => Unit,
errPrintln: String => Unit): Boolean = {
val optOpt = check(option, outPrintln, errPrintln)
if (optOpt.isEmpty) return false
val opt = optOpt.get
write(
opt,
new RewriterJsonGen(opt.licenseFileOpt,
opt.packageNameOpt, opt.className,
Reflection.getTypeOfClass(opt.root),
option.imports.toVector, ivectorEmpty).generateRewriter(),
outPrintln)
false
}
private def toClass(imports: Array[String],
errPrintln: String => Unit): Option[ISeq[Class[_]]] = {
var result = ivectorEmpty[Class[_]]
var ok = true
for (i <- imports) {
try {
result = result :+ Class.forName(i)
} catch {
case t: Throwable =>
errPrintln(s"Could not load class '$i'")
ok = false
}
}
if (ok) Some(result) else None
}
private def write(opt: ReflectGenOpt,
s: String,
outPrintln: String => Unit): Unit = {
opt.outputFileOpt match {
case Some(f) =>
Files.write(f.toPath, s.getBytes)
outPrintln(s"Written ${f.getAbsolutePath}")
case _ => outPrintln(s)
}
}
private def check(option: ReflectGenOption,
outPrintln: String => Unit,
errPrintln: String => Unit): Option[ReflectGenOpt] = {
var ok = true
val clazz =
try {
Class.forName(option.rootClassName).asInstanceOf[Class[AnyRef]]
} catch {
case t: Throwable =>
errPrintln(s"Could not find class named: '${option.rootClassName}'")
ok = false
null
}
val licenseFile =
option.licenseFile match {
case some(path) =>
val f = new File(path)
if (f.isFile) {
Some(new String(Files.readAllBytes(f.toPath)).trim)
} else {
errPrintln(s"Could not read license file: '$path'")
ok = false
None
}
case _ => None
}
val (packageName, className) = {
val i = option.className.lastIndexOf('.')
if (i < 0) (None, option.className)
else
(Some(option.className.substring(0, i)),
option.className.substring(i + 1))
}
val outputFile =
option.outputDir match {
case some(path) =>
val d = new File(path)
if (d.isFile) {
errPrintln(s"Output directory is a file: '$path'")
ok = false
None
} else {
if (!d.exists()) {
outPrintln(s"Output directory does not exist; it will be created")
}
Some(new File(d, option.className.replaceAll("\\\\.", "/") + ".scala"))
}
case _ => None
}
if (ok) {
outputFile.foreach { f =>
val d = f.getParentFile
if (!d.exists() && !d.mkdirs()) {
ok = false
errPrintln(s"Could not create directory: '${d.getAbsolutePath}'")
}
}
}
if (ok)
Some(ReflectGenOpt(packageName, className, licenseFile, outputFile, clazz))
else None
}
private case class ReflectGenOpt(packageNameOpt: Option[String],
className: String,
licenseFileOpt: Option[String],
outputFileOpt: Option[File],
root: Class[AnyRef])
}
| sireum/v3 | cli/jvm/src/main/scala/org/sireum/util/reflect/ReflectGen.scala | Scala | bsd-2-clause | 6,829 |
package com.tothferenc.templateFX.collection
import java.util
import java.util.{List => JList}
import com.tothferenc.templateFX.base.Template
import com.tothferenc.templateFX.change.Change
import com.tothferenc.templateFX.change.InsertWithKey
import com.tothferenc.templateFX.change.MoveNode
import com.tothferenc.templateFX.change.RemoveNodes
import com.tothferenc.templateFX.change.Replace
import com.tothferenc.templateFX.errors.DuplicateKeyException
import scala.collection.convert.wrapAsScala._
import scala.collection.mutable
import scala.collection.immutable
import scala.reflect.ClassTag
final case class OrderedSpecsWithIds[Key: ClassTag, Item](specsWithKeys: immutable.Seq[(Key, Template[Item])]) extends CollectionSpec[Item] {
override def requiredChangesIn(collection: JList[Item]): List[Change] = {
val existingNodesByKey = collection.groupBy(SpecsWithKeys.getItemKey[Key])
val specKeySet = {
val set = new mutable.HashSet[Key]()
specsWithKeys.foreach(pair => set.add(pair._1))
set
}
val removals = for {
(key, nodes) <- existingNodesByKey if key.isEmpty || !specKeySet.contains(key.get)
node <- nodes
} yield node
val mutationsMovesInsertions = specsWithKeys.zipWithIndex.flatMap {
case ((key, spec), desiredPosition) => existingNodesByKey.get(Some(key)) match {
case Some(mutable.Buffer(node)) =>
spec.reconciliationSteps(node).map(List(MoveNode(collection, node, desiredPosition)) ++ _)
.getOrElse(List(Replace(collection, spec, collection.indexOf(node))))
case Some(buffer) if buffer.lengthCompare(1) > 0 =>
throw DuplicateKeyException(key.toString)
case _ =>
List(InsertWithKey(collection, spec, desiredPosition, key))
}
}
(if (removals.isEmpty) Nil else List(RemoveNodes(collection, removals))) ++ mutationsMovesInsertions
}
override def build(): JList[Item] = {
val buffer = new util.ArrayList[Item]()
specsWithKeys.foreach {
case (key, spec) => buffer.add(SpecsWithKeys.setKeyOnItem(key, spec.build()))
}
buffer
}
}
| tferi/templateFX | base/src/main/scala/com/tothferenc/templateFX/collection/OrderedSpecsWithIds.scala | Scala | gpl-3.0 | 2,114 |
package com.sagacify.sonar.scala
import org.sonar.plugins.scala.Scala
import org.scalatest._
// import scalariform.lexer.ScalaLexer
// import scalariform.lexer.Token
// import scalariform.lexer.Tokens.LINE_COMMENT
// import scalariform.lexer.Tokens.MULTILINE_COMMENT
// import scalariform.lexer.Tokens.XML_COMMENT
// import scala.annotation.tailrec
class MeasurersSpec extends FlatSpec with Matchers {
val exampleSourceFile = """/*
* Sonar Scala Plugin
* Copyright (C) 2011-2016 SonarSource SA
* mailto:contact AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package com.sagacify.example
import collection.mutable.Stack
import org.scalatest._
class ScalaSensorSpec extends FlatSpec with Matchers {
// Example test
"A Stack" should "pop values in last-in-first-out order" in {
val stack = new Stack[Int]
stack.push(1) // This is
stack.push(2) // a pointless
stack.pop() should be (2) // example
stack.pop() should be (1)
}
it should "throw NoSuchElementException if an empty stack is popped" in {
val emptyStack = new Stack[Int]
a [NoSuchElementException] should be thrownBy {
emptyStack.pop()
}
}
}
"""
"A Comment lines counter" should "count line comments" in {
val tokens = Scala.tokenize("// this is a test", "2.11.8")
val count = Measures.count_comment_lines(tokens)
assert(count == 1)
}
it should "count multiline comments" in {
val tokens = Scala.tokenize("/* this\\n *is\\n *a\\n *test*/", "2.11.8")
val count = Measures.count_comment_lines(tokens)
assert(count == 4)
}
it should "count trailing comments." in {
val tokens = Scala.tokenize("case class Test() // this is a test", "2.11.8")
val count = Measures.count_comment_lines(tokens)
assert(count == 1)
}
it should "count the correct number of comments" in {
val tokens = Scala.tokenize(exampleSourceFile, "2.11.8")
val count = Measures.count_comment_lines(tokens)
assert(count == 23)
}
"A Non-Comment lines counter" should "count non-comment lines of codes" in {
val tokens = Scala.tokenize("package com.example", "2.11.8")
println(tokens)
val count = Measures.count_ncloc(tokens)
assert(count == 1)
}
it should "count lines of code with a trailing comment" in {
val tokens = Scala.tokenize("case class Test() /*\\n * test\\n */", "2.11.8")
val count = Measures.count_ncloc(tokens)
assert(count == 1)
}
it should "count trailing code." in {
val tokens = Scala.tokenize("/* this is a test */ case class Test()", "2.11.8")
val count = Measures.count_ncloc(tokens)
assert(count == 1)
}
it should "count the correct number of comments" in {
val tokens = Scala.tokenize(exampleSourceFile, "2.11.8")
val count = Measures.count_ncloc(tokens)
assert(count == 18)
}
}
| skirge/sonar-scala | src/test/scala/com/sagacify/sonar/scala/MeasuresSpec.scala | Scala | lgpl-3.0 | 3,541 |
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi.validation
import no.ndla.learningpathapi.model.api.ValidationMessage
import no.ndla.learningpathapi.model.domain._
import scala.util.{Failure, Success, Try}
trait LearningStepValidator {
this: TitleValidator with LanguageValidator =>
val learningStepValidator: LearningStepValidator
class LearningStepValidator {
val noHtmlTextValidator = new TextValidator(allowHtml = false)
val basicHtmlTextValidator = new TextValidator(allowHtml = true)
val urlValidator = new UrlValidator()
val MISSING_DESCRIPTION_OR_EMBED_URL =
"A learningstep is required to have either a description, embedUrl or both."
def validate(newLearningStep: LearningStep, allowUnknownLanguage: Boolean = false): Try[LearningStep] = {
validateLearningStep(newLearningStep, allowUnknownLanguage) match {
case head :: tail => Failure(new ValidationException(errors = head :: tail))
case _ => Success(newLearningStep)
}
}
def validateLearningStep(newLearningStep: LearningStep, allowUnknownLanguage: Boolean): Seq[ValidationMessage] = {
titleValidator.validate(newLearningStep.title, allowUnknownLanguage) ++
validateDescription(newLearningStep.description, allowUnknownLanguage) ++
validateEmbedUrl(newLearningStep.embedUrl, allowUnknownLanguage) ++
validateLicense(newLearningStep.license).toList ++
validateThatDescriptionOrEmbedUrlOrBothIsDefined(newLearningStep).toList
}
def validateDescription(descriptions: Seq[Description], allowUnknownLanguage: Boolean): Seq[ValidationMessage] = {
descriptions.isEmpty match {
case true => List()
case false =>
descriptions.flatMap(description => {
basicHtmlTextValidator
.validate("description", description.description)
.toList :::
languageValidator
.validate("language", description.language, allowUnknownLanguage)
.toList
})
}
}
def validateEmbedUrl(embedUrls: Seq[EmbedUrl], allowUnknownLanguage: Boolean): Seq[ValidationMessage] = {
embedUrls.flatMap(embedUrl => {
urlValidator.validate("embedUrl.url", embedUrl.url).toList :::
languageValidator
.validate("language", embedUrl.language, allowUnknownLanguage)
.toList
})
}
def validateLicense(licenseOpt: Option[String]): Option[ValidationMessage] = {
licenseOpt match {
case None => None
case Some(license) => {
noHtmlTextValidator.validate("license", license)
}
}
}
def validateThatDescriptionOrEmbedUrlOrBothIsDefined(newLearningStep: LearningStep): Option[ValidationMessage] = {
newLearningStep.description.isEmpty && newLearningStep.embedUrl.isEmpty match {
case true =>
Some(ValidationMessage("description|embedUrl", MISSING_DESCRIPTION_OR_EMBED_URL))
case false => None
}
}
}
}
| NDLANO/learningpath-api | src/main/scala/no/ndla/learningpathapi/validation/LearningStepValidator.scala | Scala | gpl-3.0 | 3,100 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io._
import java.nio.ByteBuffer
import java.util.Properties
import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.RDD
/**
* A task that sends back the output to the driver application.
*
* See [[Task]] for more information.
*
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param taskBinary broadcasted version of the serialized RDD and the function to apply on each
* partition of the given RDD. Once deserialized, the type should be
* (RDD[T], (TaskContext, Iterator[T]) => U).
* @param partition partition of the RDD this task is associated with
* @param locs preferred task execution locations for locality scheduling
* @param outputId index of the task in this job (a job can launch tasks on only a subset of the
* input RDD's partitions).
* @param localProperties copy of thread-local properties set by the user on the driver side.
* @param metrics a [[TaskMetrics]] that is created at driver side and sent to executor side.
*/
private[spark] class ResultTask[T, U](
stageId: Int,
stageAttemptId: Int,
taskBinary: Broadcast[Array[Byte]],
partition: Partition,
locs: Seq[TaskLocation],
val outputId: Int,
localProperties: Properties,
metrics: TaskMetrics)
extends Task[U](stageId, stageAttemptId, partition.index, metrics, localProperties)
with Serializable {
@transient private[this] val preferredLocs: Seq[TaskLocation] = {
if (locs == null) Nil else locs.toSet.toSeq
}
override def runTask(context: TaskContext): U = {
// Deserialize the RDD and the func using the broadcast variables.
val deserializeStartTime = System.currentTimeMillis()
val ser = SparkEnv.get.closureSerializer.newInstance()
val (rdd, func) = ser.deserialize[(RDD[T], (TaskContext, Iterator[T]) => U)](
ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
_executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
func(context, rdd.iterator(partition, context))
}
// This is only callable on the driver side.
override def preferredLocations: Seq[TaskLocation] = preferredLocs
override def toString: String = "ResultTask(" + stageId + ", " + partitionId + ")"
}
| gioenn/xSpark | core/src/main/scala/org/apache/spark/scheduler/ResultTask.scala | Scala | apache-2.0 | 3,260 |
package org.scalatra.spring
import org.scalatra.servlet.RichServletContext
import scala.collection.JavaConverters._
import org.scalatra.ScalatraServlet
import javax.servlet.ServletContext
import javax.annotation.PostConstruct
import org.springframework.stereotype.Component
import org.springframework.context.{ApplicationContext, ApplicationContextAware}
import org.springframework.web.context.ServletContextAware
/** @author Stephen Samuel */
@Component
class SpringScalatraBootstrap extends ApplicationContextAware with ServletContextAware {
@PostConstruct
def bootstrap() {
val richContext = new RichServletContext(servletContext)
val resources = appContext.getBeansWithAnnotation(classOf[Path])
resources.values().asScala.foreach {
case servlet: ScalatraServlet =>
var path = servlet.getClass.getAnnotation(classOf[Path]).value()
if (!path.startsWith("/")) path = "/" + path
richContext.mount(servlet, path)
case _ =>
}
}
var servletContext: ServletContext = _
var appContext: ApplicationContext = _
def setServletContext(servletContext: ServletContext): Unit = this.servletContext = servletContext
def setApplicationContext(appContext: ApplicationContext): Unit = this.appContext = appContext
}
| etorreborre/scalatra | spring/src/main/scala/org/scalatra/spring/SpringScalatraBootstrap.scala | Scala | bsd-2-clause | 1,271 |
package org.f100ded.play.fakews
import play.api.libs.ws.{BodyWritable, EmptyBody, WSBody, WSCookie}
import scala.language.implicitConversions
case class FakeResult
(
status: Int,
statusText: String,
body: WSBody = EmptyBody,
headers: Map[String, Seq[String]] = Map(),
cookies: Seq[WSCookie] = Seq()
) {
type Self = FakeResult
def addHeaders(h: (String, String)*): Self = {
val newHeaders = h.foldLeft(headers) {
case (acc, (name, value)) => acc.get(name) match {
case Some(values) => acc.updated(name, values :+ value)
case None => acc + (name -> Seq(value))
}
}
copy(headers = newHeaders)
}
def addCookies(c: WSCookie*): Self = copy(cookies = cookies ++ c)
def apply[T](body: T)(implicit w: BodyWritable[T]): Self = copy(body = w.transform(body))
}
object FakeResult {
implicit def result2routes(result: FakeResult): Routes = {
case _ => result
}
}
| f100ded/play-fake-ws-standalone | src/main/scala/org/f100ded/play/fakews/FakeResult.scala | Scala | apache-2.0 | 926 |
package scalariform.utils
object TextEdit {
def delete(range: Range): TextEdit = delete(range.offset, range.length)
def delete(position: Int, length: Int): TextEdit = TextEdit(position = position, length = length, replacement = "")
}
case class TextEdit(position: Int, length: Int, replacement: String) {
require(position >= 0, "position must be positive: " + position)
require(length >= 0)
override lazy val toString: String = {
val replacementDisplay = replacement.replace("\\n", """\\n""").replace("\\r", """\\r""")
getClass.getSimpleName + "(position = " + position + ", length = " + length + ", replacement = '" + replacementDisplay + "')"
}
def shift(n: Int): TextEdit = copy(position = position + n)
}
object TextEditProcessor {
/**
* @param edits must be ordered and non-overlapping
*/
def runEdits(s: String, edits: TextEdit*): String = runEdits(s, edits.toList)
/**
* @param edits must be ordered and non-overlapping
*/
def runEdits(s: String, edits: List[TextEdit]): String = {
val sb = new StringBuilder
var pos = 0
var editsRemaining = edits
while (pos < s.length) {
if (editsRemaining.isEmpty) {
sb.append(s(pos))
pos += 1
} else {
val edit = editsRemaining.head
if (pos == edit.position) {
editsRemaining = editsRemaining.tail
pos += edit.length
sb.append(edit.replacement)
} else {
sb.append(s(pos))
pos += 1
}
}
}
var processEditsAtEnd = true
while (processEditsAtEnd) {
if (editsRemaining.isEmpty)
processEditsAtEnd = false
else {
val edit = editsRemaining.head
if (pos == edit.position) {
editsRemaining = editsRemaining.tail
pos += edit.length
sb.append(edit.replacement)
} else
processEditsAtEnd = false
}
}
require(editsRemaining.isEmpty)
sb.toString
}
}
| mdr/scalariform | scalariform/src/main/scala/scalariform/utils/TextEdits.scala | Scala | mit | 1,977 |
/*
* Copyright 2012-2014 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.source.log.jul
import viper.domain.Record
/**
* Represents consumer of JUL events.
*/
trait JULConsumer {
def next(): Option[Record]
}
| vyadh/viper | source-log/src/main/scala/viper/source/log/jul/JULConsumer.scala | Scala | apache-2.0 | 764 |
/**
* Copyright (C) 2010-2012 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.limiting
import net.lshift.diffa.kernel.config.DomainServiceLimitsView
import net.lshift.diffa.schema.servicelimits.{Unlimited, ChangeEventRate}
class ServiceLimitsDomainRateLimiterFactory(domainLimitsView: DomainServiceLimitsView) extends DomainRateLimiterFactory {
def createRateLimiter(space: Long): RateLimiter = new RateLimiter(
() => domainLimitsView.getEffectiveLimitByNameForDomain(space, ChangeEventRate)
)
}
trait DomainRateLimiterFactory extends RateLimiterFactory[Long]
trait RateLimiterFactory[T] {
def createRateLimiter(t: T): RateLimiter
}
/**
* Limit the rate at which actions of the specified type are permitted.
* Any action that should be rate limited should call RateLimiter.accept before executing the
* rate limited action.
*
* Important Note
* If the rate definition changes (eventsPerSecondFn), then there is a one second delay
* before the new definition takes effect.
*/
class RateLimiter(eventsPerSecondFn: () => Int, clock: Clock = SystemClock) extends Limiter {
private val params = new TokenBucketParameters {
def capacity = eventsPerSecondFn()
override def initialVolume = eventsPerSecondFn()
override def refillInterval = 1000L
override def refillAmount = eventsPerSecondFn()
}
private val tokenBucket = TokenBucket(params, clock)
private val acceptFn: () => Boolean = if (eventsPerSecondFn() == Unlimited.value) {
() => true
} else {
() => tokenBucket.tryConsume
}
def accept() = acceptFn()
}
trait Limiter {
def accept(): Boolean
}
trait TypedLimiter[ActionType] {
def accept(action: ActionType): Boolean
}
| lshift/diffa | kernel/src/main/scala/net/lshift/diffa/kernel/limiting/RateLimiter.scala | Scala | apache-2.0 | 2,251 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils.timer
import java.util.concurrent.{CountDownLatch, ExecutorService, Executors, TimeUnit}
import junit.framework.Assert._
import java.util.concurrent.atomic._
import org.junit.{Test, After, Before}
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
class TimerTest {
private class TestTask(override val expirationMs: Long, id: Int, latch: CountDownLatch, output: ArrayBuffer[Int]) extends TimerTask {
private[this] val completed = new AtomicBoolean(false)
def run(): Unit = {
if (completed.compareAndSet(false, true)) {
output.synchronized { output += id }
latch.countDown()
}
}
}
private[this] var executor: ExecutorService = null
@Before
def setup() {
executor = Executors.newSingleThreadExecutor()
}
@After
def teardown(): Unit = {
executor.shutdown()
executor = null
}
@Test
def testAlreadyExpiredTask(): Unit = {
val startTime = System.currentTimeMillis()
val timer = new Timer(taskExecutor = executor, tickMs = 1, wheelSize = 3, startMs = startTime)
val output = new ArrayBuffer[Int]()
val latches = (-5 until 0).map { i =>
val latch = new CountDownLatch(1)
timer.add(new TestTask(startTime + i, i, latch, output))
latch
}
latches.take(5).foreach { latch =>
assertEquals("already expired tasks should run immediately", true, latch.await(3, TimeUnit.SECONDS))
}
assertEquals("output of already expired tasks", Set(-5, -4, -3, -2, -1), output.toSet)
}
@Test
def testTaskExpiration(): Unit = {
val startTime = System.currentTimeMillis()
val timer = new Timer(taskExecutor = executor, tickMs = 1, wheelSize = 3, startMs = startTime)
val output = new ArrayBuffer[Int]()
val tasks = new ArrayBuffer[TestTask]()
val ids = new ArrayBuffer[Int]()
val latches =
(0 until 5).map { i =>
val latch = new CountDownLatch(1)
tasks += new TestTask(startTime + i, i, latch, output)
ids += i
latch
} ++ (10 until 100).map { i =>
val latch = new CountDownLatch(2)
tasks += new TestTask(startTime + i, i, latch, output)
tasks += new TestTask(startTime + i, i, latch, output)
ids += i
ids += i
latch
} ++ (100 until 500).map { i =>
val latch = new CountDownLatch(1)
tasks += new TestTask(startTime + i, i, latch, output)
ids += i
latch
}
// randomly submit requests
Random.shuffle(tasks.toSeq).foreach { task => timer.add(task) }
while (timer.advanceClock(1000)) {}
latches.foreach { latch => latch.await() }
assertEquals("output should match", ids.sorted, output.toSeq)
}
}
| tempbottle/kafka | core/src/test/scala/unit/kafka/utils/timer/TimerTest.scala | Scala | apache-2.0 | 3,536 |
/* LogEventsWithAkkaSpec.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2015 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos.monitor
import akka.event.{ Logging, LoggingAdapter }
import org.scalatest._
import org.scalamock.scalatest.MockFactory
/**
* Test suite for [[atmos.monitor.LogEventsWithAkka]].
*/
class LogEventsWithAkkaSpec extends FlatSpec with Matchers with MockFactory {
val thrown = new RuntimeException
"LogEventsWithAkka" should "forward log entries to an Akka logging adapter" in {
val fixture = new LoggerFixture
val monitor = LogEventsWithAkka(fixture.mock)
for {
level <- Seq(Logging.ErrorLevel, Logging.WarningLevel, Logging.InfoLevel, Logging.DebugLevel)
enabled <- Seq(true, false)
t <- Seq(Some(thrown), None)
} {
fixture.isEnabled.expects(level).returns(enabled).once
monitor.isLoggable(level) shouldBe enabled
if (level == Logging.ErrorLevel && t.isDefined) fixture.error.expects(thrown, "MSG").once
else fixture.log.expects(level, "MSG").once
monitor.log(level, "MSG", t)
}
}
class LoggerFixture { self =>
val isEnabled = mockFunction[Logging.LogLevel, Boolean]
val error = mockFunction[Throwable, String, Unit]
val log = mockFunction[Logging.LogLevel, String, Unit]
val mock = new LoggingAdapter {
def isErrorEnabled = self.isEnabled(Logging.ErrorLevel)
def isWarningEnabled = self.isEnabled(Logging.WarningLevel)
def isInfoEnabled = self.isEnabled(Logging.InfoLevel)
def isDebugEnabled = self.isEnabled(Logging.DebugLevel)
def notifyError(message: String) = ???
def notifyError(cause: Throwable, message: String) = ???
def notifyWarning(message: String) = ???
def notifyInfo(message: String) = ???
def notifyDebug(message: String) = ???
override def error(thrown: Throwable, message: String) = self.error(thrown, message)
override def log(level: Logging.LogLevel, message: String) = self.log(level, message)
}
}
} | zmanio/atmos | src/test/scala/atmos/monitor/LogEventsWithAkkaSpec.scala | Scala | apache-2.0 | 2,575 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend
import cats.Foldable
import cats.Monoid
import cats.instances.all._
import cats.syntax.foldable._
import scala.language.higherKinds
class FoldableTest extends TestSpec {
def fold[A: Monoid, F[_]](fa: F[A])(implicit foldable: Foldable[F]): A = foldable.fold(fa)
it should "fold a List of Int" in {
fold(List(1, 2)) shouldBe 3
fold(List.empty[Int]) shouldBe 0 // due to the Monoid[Int].empty
}
it should "fold a List of String" in {
fold(List("a", "b")) shouldBe "ab"
fold(List.empty[String]) shouldBe "" // due to the Monoid[String].empty
}
it should "fold an Option of Int" in {
fold(Option(1)) shouldBe 1
fold(Option.empty[Int]) shouldBe 0 // due to the Monoid[Int].empty
}
it should "combineAll for Int" in {
List(1, 2).combineAll shouldBe 3
List.empty[Int].combineAll shouldBe 0
}
}
| dnvriend/study-category-theory | cats-test/src/test/scala/com/github/dnvriend/FoldableTest.scala | Scala | apache-2.0 | 1,469 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.collector
import com.twitter.app.App
import com.twitter.conversions.time._
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.{Filter, Service}
import com.twitter.util._
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.thriftscala.{Span => ThriftSpan}
import com.twitter.zipkin.storage.SpanStore
sealed trait AwaitableCloser extends Closable with CloseAwaitably
object SpanConvertingFilter extends Filter[Seq[ThriftSpan], Unit, Seq[Span], Unit] {
def apply(spans: Seq[ThriftSpan], svc: Service[Seq[Span], Unit]): Future[Unit] =
svc(spans.map(_.toSpan))
}
/**
* A basic collector from which to create a server. Your collector will extend this
* and implement `newReceiver` and `newSpanStore`. The base collector glues those two
* together.
*/
trait ZipkinCollectorFactory {
def newReceiver(receive: Seq[ThriftSpan] => Future[Unit], stats: StatsReceiver): SpanReceiver
def newSpanStore(stats: StatsReceiver): SpanStore
// overwrite in the Main trait to add a SpanStore filter to the SpanStore
def spanStoreFilter: Filter[Seq[Span], Unit, Seq[Span], Unit] = Filter.identity[Seq[Span], Unit]
def newCollector(stats: StatsReceiver): AwaitableCloser = new AwaitableCloser {
val store = newSpanStore(stats)
val receiver = newReceiver(SpanConvertingFilter andThen spanStoreFilter andThen store, stats)
def close(deadline: Time): Future[Unit] = closeAwaitably {
Closable.sequence(receiver, store).close(deadline)
}
}
}
/**
* A base collector that inserts a configurable queue between the receiver and store.
*/
trait ZipkinQueuedCollectorFactory extends ZipkinCollectorFactory {
self: App =>
val itemQueueTimeout = flag("zipkin.itemQueue.timeout", 30.seconds, "max amount of time to spend waiting for the processor to complete")
val itemQueueMax = flag("zipkin.itemQueue.maxSize", 500, "max number of span items to buffer")
val itemQueueConcurrency = flag("zipkin.itemQueue.concurrency", 10, "number of concurrent workers to process the write queue")
val itemQueueSleepOnFull = flag("zipkin.itemQueue.sleepOnFull", 1.seconds, "amount of time to sleep when the queue fills up")
override def newCollector(stats: StatsReceiver): AwaitableCloser = new AwaitableCloser {
val store = newSpanStore(stats)
val queue = new ItemQueue[Seq[ThriftSpan], Unit](
itemQueueMax(),
itemQueueConcurrency(),
SpanConvertingFilter andThen spanStoreFilter andThen store,
itemQueueTimeout(),
stats.scope("ItemQueue"))
val receiver = newReceiver(queue.add, stats)
def close(deadline: Time): Future[Unit] = closeAwaitably {
Closable.sequence(receiver, queue, store).close(deadline)
}
}
}
/**
* Builds the receiver, filters and storers with a blocking queue in the middle. The receiver should
* attempt to add an item to the queue. If the queue is at capacity then the thread that is adding
* the element will sleep for a specified time and retry instead of throwing an exception.
*/
trait ZipkinBlockingQueuedCollectorFactory extends ZipkinQueuedCollectorFactory { self: App =>
override def newCollector(stats: StatsReceiver): AwaitableCloser = new AwaitableCloser {
val store = newSpanStore(stats)
val blockingQueue = new BlockingItemQueue[Seq[ThriftSpan], Unit](
itemQueueMax(),
itemQueueConcurrency(),
SpanConvertingFilter andThen spanStoreFilter andThen store,
itemQueueTimeout(),
itemQueueSleepOnFull(),
stats.scope("BlockingItemQueue"))
val receiver = newReceiver(blockingQueue.add, stats)
def close(deadline: Time): Future[Unit] = closeAwaitably {
Closable.sequence(receiver, blockingQueue, store).close(deadline)
}
}
}
| coursera/zipkin | zipkin-collector/src/main/scala/com/twitter/zipkin/collector/ZipkinCollectorFactory.scala | Scala | apache-2.0 | 4,405 |
package yoda.orm
package object jtype {
type JBoolean = java.lang.Boolean
type JInt = java.lang.Integer
type JLong = java.lang.Long
type JDouble = java.lang.Double
}
| nuboat/yoda-orm | src/main/scala/yoda/orm/jtype/jtype.scala | Scala | mit | 180 |
package scala.meta
package internal
package trees
import scala.language.experimental.macros
import scala.annotation.StaticAnnotation
import scala.reflect.macros.whitebox.Context
import scala.collection.mutable.ListBuffer
import org.scalameta.internal.MacroCompat
// @ast is a specialized version of @org.scalameta.adt.leaf for scala.meta ASTs.
class ast extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro AstNamerMacros.impl
}
class AstNamerMacros(val c: Context) extends Reflection with CommonNamerMacros {
lazy val u: c.universe.type = c.universe
lazy val mirror = c.mirror
import c.universe._
import Flag._
def impl(annottees: Tree*): Tree =
annottees.transformAnnottees(new ImplTransformer {
override def transformClass(cdef: ClassDef, mdef: ModuleDef): List[ImplDef] = {
val fullName = c.internal.enclosingOwner.fullName + "." + cdef.name.toString
def isQuasi = isQuasiClass(cdef)
val q"$imods class $iname[..$tparams] $ctorMods(...$rawparamss) extends { ..$earlydefns } with ..$iparents { $aself => ..$stats }" =
cdef
// NOTE: For stack traces, we'd like to have short class names, because stack traces print full names anyway.
// However debugging macro expansion errors is much-much easier with full names for Api and Impl classes
// because the typechecker only uses short names in error messages.
// E.g. compare:
// class Impl needs to be abstract, since method withDenot in trait Name
// of type (denot: scala.meta.internal.semantic.Denotation)Impl.this.ThisType is not defined
// and:
// class NameAnonymousImpl needs to be abstract, since method withDenot in trait Name
// of type (denot: scala.meta.internal.semantic.Denotation)NameAnonymousImpl.this.ThisType is not defined
val descriptivePrefix = fullName.stripPrefix("scala.meta.").replace(".", "")
val name = TypeName(descriptivePrefix + "Impl")
val q"$mmods object $mname extends { ..$mearlydefns } with ..$mparents { $mself => ..$mstats }" =
mdef
val bparams1 = ListBuffer[ValDef]() // boilerplate params
val paramss1 = ListBuffer[List[ValDef]]() // payload params
val iself = noSelfType
val self = aself
val istats1 = ListBuffer[Tree]()
val stats1 = ListBuffer[Tree]()
val ianns1 = ListBuffer[Tree]() ++ imods.annotations
def imods1 = imods.mapAnnotations(_ => ianns1.toList)
def mods1 = Modifiers(FINAL, mname.toTypeName, List(SerialVersionUIDAnnotation(1L)))
val iparents1 = ListBuffer[Tree]() ++ iparents
def parents1 = List(tq"$iname")
val mstats1 = ListBuffer[Tree]() ++ mstats
val manns1 = ListBuffer[Tree]() ++ mmods.annotations
def mmods1 = mmods.mapAnnotations(_ => manns1.toList)
// step 1: validate the shape of the class
if (imods.hasFlag(SEALED)) c.abort(cdef.pos, "sealed is redundant for @ast classes")
if (imods.hasFlag(FINAL)) c.abort(cdef.pos, "final is redundant for @ast classes")
if (imods.hasFlag(CASE)) c.abort(cdef.pos, "case is redundant for @ast classes")
if (imods.hasFlag(ABSTRACT)) c.abort(cdef.pos, "@ast classes cannot be abstract")
if (ctorMods.flags != NoFlags)
c.abort(cdef.pos, "@ast classes must define a public primary constructor")
if (rawparamss.isEmpty)
c.abort(cdef.pos, "@leaf classes must define a non-empty parameter list")
// step 2: validate the body of the class
def setterName(vr: ValDef) =
TermName(s"set${vr.name.toString.stripPrefix("_").capitalize}")
def getterName(vr: ValDef) = TermName(s"${vr.name.toString.stripPrefix("_")}")
def binaryCompatMods(vr: ValDef) = {
val getter = q"def ${getterName(vr)}: ${vr.tpt}"
getter.mods.mapAnnotations(_ => vr.mods.annotations)
}
def isBinaryCompatField(vr: ValDef) = vr.mods.annotations.exists {
case q"new binaryCompatField($since)" =>
if (!vr.name.toString.startsWith("_"))
c.abort(vr.pos, "The binaryCompat AST field needs to start with _")
if (!vr.mods.hasFlag(PRIVATE))
c.abort(vr.pos, "The binaryCompat AST field needs to be private")
if (!vr.mods.hasFlag(MUTABLE))
c.abort(vr.pos, "The binaryCompat AST field needs to declared as var")
true
case _ => false
}
val copiesBuilder = List.newBuilder[DefDef]
val importsBuilder = List.newBuilder[Import]
val binaryCompatVarsBuilder = List.newBuilder[ValDef]
val otherDefsBuilder = List.newBuilder[Tree]
val checkFieldsBuilder = List.newBuilder[Tree]
val checkParentsBuilder = List.newBuilder[Tree]
stats.foreach {
case x: Import => importsBuilder += x
case x: DefDef if !isQuasi && x.name == TermName("copy") => copiesBuilder += x
case x: ValDef if isBinaryCompatField(x) => binaryCompatVarsBuilder += x
case x if x.isDef => otherDefsBuilder += x
case q"checkFields($arg)" => checkFieldsBuilder += arg
case x @ q"checkParent($what)" => checkParentsBuilder += x
case x =>
val error =
"only checkFields(...), checkParent(...) and definitions are allowed in @ast classes"
c.abort(x.pos, error)
}
val copies = copiesBuilder.result()
val imports = importsBuilder.result()
val binaryCompatVars = binaryCompatVarsBuilder.result()
val otherDefns = otherDefsBuilder.result()
val fieldChecks = checkFieldsBuilder.result()
val parentChecks = checkParentsBuilder.result()
stats1 ++= otherDefns
val binaryCompatAbstractFields = binaryCompatVars.flatMap { vr: ValDef =>
List(
q"${binaryCompatMods(vr)} def ${getterName(vr)}: ${vr.tpt}",
q"def ${setterName(vr)}(${vr.name} : ${vr.tpt}) : Unit"
)
}
istats1 ++= binaryCompatAbstractFields
val binaryCompatStats = binaryCompatVars.flatMap { vr: ValDef =>
val name = vr.name
val nameString = name.toString
List(
q"def ${getterName(vr)} = this.$name ",
q"""def ${setterName(vr)}($name : ${vr.tpt}) = {
val node = this
$CommonTyperMacrosModule.storeField(this.$name, $name, $nameString)
}""",
vr
)
}
stats1 ++= binaryCompatStats
stats1 ++= imports
// step 3: calculate the parameters of the class
val paramss = rawparamss
// step 4: turn all parameters into vars, create getters and setters
def internalize(name: TermName) = TermName("_" + name.toString)
val fieldParamss = paramss
val fieldParams = fieldParamss.flatten.map(p => (p, p.name.decodedName.toString))
istats1 ++= fieldParams.map({ case (p, _) =>
var getterAnns = List(q"new $AstMetadataModule.astField")
if (p.mods.annotations.exists(_.toString.contains("auxiliary")))
getterAnns :+= q"new $AstMetadataModule.auxiliary"
val getterMods = Modifiers(DEFERRED, typeNames.EMPTY, getterAnns)
q"$getterMods def ${p.name}: ${p.tpt}"
})
paramss1 ++= fieldParamss.map(_.map { case p @ q"$mods val $name: $tpt = $default" =>
val mods1 = mods.mkMutable.unPrivate.unOverride.unDefault
q"$mods1 val ${internalize(p.name)}: $tpt"
})
stats1 ++= fieldParams.map({ case (p, s) =>
val pinternal = internalize(p.name)
val pmods = if (p.mods.hasFlag(OVERRIDE)) Modifiers(OVERRIDE) else NoMods
q"""
$pmods def ${p.name}: ${p.tpt} = {
$CommonTyperMacrosModule.loadField(this.$pinternal, $s)
this.$pinternal
}
"""
})
// step 5: implement the unimplemented methods in InternalTree (part 1)
bparams1 += q"@$TransientAnnotation private[meta] val privatePrototype: $iname"
bparams1 += q"private[meta] val privateParent: $TreeClass"
bparams1 += q"private[meta] val privateOrigin: $OriginClass"
// step 6: implement the unimplemented methods in InternalTree (part 1)
// The purpose of privateCopy is to provide extremely cheap cloning
// in the case when a tree changes its parent (because that happens often in our framework,
// e.g. when we create a quasiquote and then insert it into a bigger quasiquote,
// or when we parse something and build the trees from the ground up).
// In such a situation, we copy all private state verbatim (tokens, denotations, etc)
// and create lazy initializers that will take care of recursively copying the children.
// Compare this with the `copy` method (described below), which additionally flushes the private state.
// This method is private[meta] because the state that it's managing is not supposed to be touched
// by the users of the framework.
val privateCopyBargs = ListBuffer[Tree]()
privateCopyBargs += q"prototype.asInstanceOf[$iname]"
privateCopyBargs += q"parent"
privateCopyBargs += q"origin"
val binaryCompatCopies = binaryCompatVars.collect { case vr: ValDef =>
List(q"newAst.${setterName(vr)}(this.${vr.name});")
}
val privateCopyArgs = paramss.map(
_.map(p => q"$CommonTyperMacrosModule.initField(this.${internalize(p.name)})")
)
val privateCopyParentChecks = {
if (parentChecks.isEmpty) q""
else {
q"""
if (destination != null) {
def checkParent(fn: ($name, $TreeClass, $StringClass) => $BooleanClass): $UnitClass = {
val parentCheckOk = fn(this, parent, destination)
if (!parentCheckOk) {
val parentPrefix = parent.productPrefix
_root_.org.scalameta.invariants.require(parentCheckOk && _root_.org.scalameta.debug(this, parentPrefix, destination))
}
}
..$parentChecks
}
"""
}
}
val privateCopyBody =
q"val newAst = new $name(..$privateCopyBargs)(...$privateCopyArgs); ..$binaryCompatCopies; newAst"
stats1 += q"""
private[meta] def privateCopy(
prototype: $TreeClass = this,
parent: $TreeClass = privateParent,
destination: $StringClass = null,
origin: $OriginClass = privateOrigin): Tree = {
$privateCopyParentChecks
$privateCopyBody
}
"""
// step 7: create the copy method
// The purpose of this method is to provide a facility to change small parts of the tree
// without modifying the other parts, much like the standard case class copy works.
// In such a situation, the tree is going to be recreated.
// NOTE: Can't generate XXX.Quasi.copy, because XXX.Quasi already inherits XXX.copy,
// and there can't be multiple overloaded methods with default parameters.
// Not a big deal though, since XXX.Quasi is an internal class.
if (!isQuasi) {
if (copies.isEmpty) {
val fieldDefaultss = fieldParamss.map(_.map(p => q"this.${p.name}"))
val copyParamss = fieldParamss.zip(fieldDefaultss).map { case (f, d) =>
f.zip(d).map { case (p, default) => q"val ${p.name}: ${p.tpt} = $default" }
}
val copyArgss = fieldParamss.map(_.map(p => q"${p.name}"))
val copyBody = q"$mname.apply(...$copyArgss)"
istats1 += q"def copy(...$copyParamss): $iname"
stats1 += q"""def copy(...$copyParamss): $iname = {val newAst = $copyBody; ..$binaryCompatCopies; newAst}"""
} else {
istats1 ++= copies
}
}
// step 8: create the children method
stats1 += q"def children: $ListClass[$TreeClass] = $CommonTyperMacrosModule.children[$iname, $TreeClass]"
// step 9: generate boilerplate required by the @ast infrastructure
ianns1 += q"new $AstMetadataModule.astClass"
ianns1 += q"new $AdtMetadataModule.leafClass"
manns1 += q"new $AstMetadataModule.astCompanion"
manns1 += q"new $AdtMetadataModule.leafCompanion"
// step 10: generate boilerplate required by the classifier infrastructure
mstats1 ++= mkClassifier(iname)
// step 11: implement Product
val binaryCompatNum = binaryCompatVars.size
val productParamss = rawparamss.map(_.map(_.duplicate))
iparents1 += tq"$ProductClass"
stats1 += q"override def productPrefix: $StringClass = $CommonTyperMacrosModule.productPrefix[$iname]"
stats1 += q"override def productArity: $IntClass = ${productParamss.head.length + binaryCompatNum}"
def patternMatchClauses(
fromField: Int => Tree,
fromBinaryCompatField: (Int, ValDef) => Tree
) = {
val pelClauses = ListBuffer[Tree]()
val fieldsNum = productParamss.head.length
pelClauses ++= 0
.to(fieldsNum - 1)
.map(fromField)
// generate product elements for @binaryCompat fields
pelClauses ++= fieldsNum
.to(fieldsNum + binaryCompatNum)
.zip(binaryCompatVars)
.map { case (i: Int, vr: ValDef) =>
fromBinaryCompatField(i, vr)
}
pelClauses += cq"_ => throw new $IndexOutOfBoundsException(n.toString)"
pelClauses.toList
}
val pelClauses = patternMatchClauses(
i => cq"$i => this.${productParamss.head(i).name}",
{ (i, vr) =>
cq"$i => this.${getterName(vr)}"
}
)
stats1 += q"override def productElement(n: $IntClass): Any = n match { case ..$pelClauses }"
stats1 += q"override def productIterator: $IteratorClass[$AnyClass] = $ScalaRunTimeModule.typedProductIterator(this)"
val productFields = productParamss.head.map(_.name.toString) ++ binaryCompatVars.map {
vr: ValDef => getterName(vr).toString
}
stats1 += q"override def productFields: $ListClass[$StringClass] = _root_.scala.List(..$productFields)"
// step 13a add productElementName for 2.13
if (MacroCompat.productFieldNamesAvailable) {
val penClauses = patternMatchClauses(
i => {
val lit = Literal(Constant(productParamss.head(i).name.toString(): String))
cq"""$i => $lit """
},
{ (i, vr) =>
val lit = Literal(Constant(getterName(vr).toString: String))
cq"""$i => $lit """
}
)
stats1 += q"override def productElementName(n: $IntClass): java.lang.String = n match { case ..$penClauses }"
}
// step 12: generate serialization logic
val fieldInits = fieldParams.map({ case (p, s) =>
q"$CommonTyperMacrosModule.loadField(this.${internalize(p.name)}, $s)"
})
stats1 += q"protected def writeReplace(): $AnyRefClass = { ..$fieldInits; this }"
// step 13: generate Companion.apply
val applyParamss = paramss.map(_.map(_.duplicate))
val internalParamss =
paramss.map(_.map(p => q"@..${p.mods.annotations} val ${p.name}: ${p.tpt}"))
val internalBody = ListBuffer[Tree]()
val internalLocalss = paramss.map(_.map(p => (p.name, internalize(p.name))))
internalBody += q"$CommonTyperMacrosModule.hierarchyCheck[$iname]"
internalBody ++= internalLocalss.flatten.map { case (local, internal) =>
q"$DataTyperMacrosModule.nullCheck($local)"
}
internalBody ++= internalLocalss.flatten.map { case (local, internal) =>
q"$DataTyperMacrosModule.emptyCheck($local)"
}
internalBody ++= imports
internalBody ++= fieldChecks.map { x =>
val fieldCheck = q"_root_.org.scalameta.invariants.require($x)"
var hasErrors = false
object errorChecker extends Traverser {
private val nmeParent = TermName("parent")
override def traverse(tree: Tree): Unit = tree match {
case _: This =>
hasErrors = true; c.error(tree.pos, "cannot refer to this in @ast field checks")
case Ident(`nmeParent`) =>
hasErrors = true
c.error(
tree.pos,
"cannot refer to parent in @ast field checks; use checkParent instead"
)
case _ => super.traverse(tree)
}
}
errorChecker.traverse(fieldCheck)
if (hasErrors) q"()"
else fieldCheck
}
val internalInitCount = 3 // privatePrototype, privateParent, privateOrigin
val internalInitss = 1.to(internalInitCount).map(_ => q"null")
val paramInitss = internalLocalss.map(_.map { case (local, internal) =>
q"$CommonTyperMacrosModule.initParam($local)"
})
internalBody += q"val node = new $name(..$internalInitss)(...$paramInitss)"
internalBody ++= internalLocalss.flatten.map { case (local, internal) =>
q"$CommonTyperMacrosModule.storeField(node.$internal, $local, ${local.toString})"
}
internalBody += q"node"
val internalArgss = paramss.map(_.map(p => q"${p.name}"))
mstats1 += q"""
def apply(...$applyParamss): $iname = {
def internal(...$internalParamss): $iname = {
..$internalBody
}
internal(...$internalArgss)
}
"""
// step 13a: generate additional binary compat Companion.apply
// generate new applies for each new field added
// with field A, B and additional binary compat ones C, D and E, we generate:
// apply(A, B, C), apply(A, B, C, D), apply(A, B, C, D, E)
val sortedBinaryCompatName = binaryCompatVars
.collect { case vr: ValDef => vr }
.sortBy { vr =>
try {
vr.mods.annotations.collectFirst { case q"new binaryCompatField($version)" =>
val since = version.toString().stripPrefix("\"").stripSuffix("\"")
val versions = since.split("\\.").map(_.toInt)
(versions(0), versions(1), versions(2))
}.get
} catch {
case _: Throwable =>
c.abort(
vr.pos,
"binaryCompatField annotation must contain since=\"major.minor.patch\" field"
)
}
}
1.to(binaryCompatNum).foreach { size =>
val fields = sortedBinaryCompatName.take(size)
val paramFields = fields.map(f => q"val ${getterName(f)} : ${f.tpt}")
val params = List(applyParamss.head ++ paramFields) ++ applyParamss.tail
val setters = fields.map { vr =>
q"newAst.${setterName(vr)}(${getterName(vr)});"
}
val checks = fields.flatMap { field =>
List(
q"$DataTyperMacrosModule.nullCheck(${getterName(field)})",
q"$DataTyperMacrosModule.emptyCheck(${getterName(field)})"
)
}
mstats1 += q"""
def apply(...$params): $iname = {
..$checks
val newAst = apply(...$internalArgss);
..$setters
newAst
}
"""
}
// step 14: generate Companion.unapply
val unapplyParamss = rawparamss.map(_.map(_.duplicate))
val unapplyParams = unapplyParamss.head
val needsUnapply = !mstats.exists(stat =>
stat match { case DefDef(_, TermName("unapply"), _, _, _, _) => true; case _ => false }
)
if (needsUnapply) {
if (unapplyParams.length != 0) {
val successTargs = tq"(..${unapplyParams.map(p => p.tpt)})"
val successArgs = q"(..${unapplyParams.map(p => q"x.${p.name}")})"
mstats1 += q"@$InlineAnnotation final def unapply(x: $iname): $OptionClass[$successTargs] = if (x == null) $NoneModule else $SomeModule($successArgs)"
} else {
mstats1 += q"@$InlineAnnotation final def unapply(x: $iname): $BooleanClass = true"
}
}
// step 15: finish codegen for Quasi
if (isQuasi) {
stats1 += q"""
def become[T <: $QuasiClass](implicit ev: $AstInfoClass[T]): T = {
this match {
case $mname(0, tree) =>
ev.quasi(0, tree).withOrigin(this.origin).asInstanceOf[T]
case $mname(1, nested @ $mname(0, tree)) =>
ev.quasi(1, nested.become[T]).withOrigin(this.origin).asInstanceOf[T]
case $mname(2, nested @ $mname(0, tree)) =>
ev.quasi(2, nested.become[T]).withOrigin(this.origin).asInstanceOf[T]
case _ =>
throw new Exception("complex ellipses are not supported yet")
}
}
"""
} else {
mstats1 += mkQuasi(
iname,
iparents,
fieldParamss,
binaryCompatAbstractFields ++ otherDefns,
"name",
"value",
"tpe"
)
}
mstats1 += q"$mods1 class $name[..$tparams] $ctorMods(...${bparams1 +: paramss1}) extends { ..$earlydefns } with ..$parents1 { $self => ..$stats1 }"
val cdef1 = q"$imods1 trait $iname extends ..$iparents1 { $iself => ..$istats1 }"
val mdef1 =
q"$mmods1 object $mname extends { ..$mearlydefns } with ..$mparents { $mself => ..$mstats1 }"
if (c.compilerSettings.contains("-Xprint:typer")) {
println(cdef1); println(mdef1)
}
List(cdef1, mdef1)
}
})
}
| scalameta/scalameta | scalameta/common/shared/src/main/scala/scala/meta/internal/trees/ast.scala | Scala | bsd-3-clause | 22,105 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.runtime.universe.{typeTag, TypeTag}
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.spark.annotation.Stable
import org.apache.spark.sql.api.java._
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.catalyst.analysis.{Star, UnresolvedFunction}
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, HintInfo, ResolvedHint}
import org.apache.spark.sql.catalyst.util.TimestampFormatter
import org.apache.spark.sql.execution.SparkSqlParser
import org.apache.spark.sql.expressions.{Aggregator, SparkUserDefinedFunction, UserDefinedAggregator, UserDefinedFunction}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Commonly used functions available for DataFrame operations. Using functions defined here provides
* a little bit more compile-time safety to make sure the function exists.
*
* Spark also includes more built-in functions that are less common and are not defined here.
* You can still access them (and all the functions defined here) using the `functions.expr()` API
* and calling them through a SQL expression string. You can find the entire list of functions
* at SQL API documentation.
*
* As an example, `isnan` is a function that is defined here. You can use `isnan(col("myCol"))`
* to invoke the `isnan` function. This way the programming language's compiler ensures `isnan`
* exists and is of the proper form. You can also use `expr("isnan(myCol)")` function to invoke the
* same function. In this case, Spark itself will ensure `isnan` exists when it analyzes the query.
*
* `regr_count` is an example of a function that is built-in but not defined here, because it is
* less commonly used. To invoke it, use `expr("regr_count(yCol, xCol)")`.
*
* This function APIs usually have methods with `Column` signature only because it can support not
* only `Column` but also other types such as a native string. The other variants currently exist
* for historical reasons.
*
* @groupname udf_funcs UDF functions
* @groupname agg_funcs Aggregate functions
* @groupname datetime_funcs Date time functions
* @groupname sort_funcs Sorting functions
* @groupname normal_funcs Non-aggregate functions
* @groupname math_funcs Math functions
* @groupname misc_funcs Misc functions
* @groupname window_funcs Window functions
* @groupname string_funcs String functions
* @groupname collection_funcs Collection functions
* @groupname partition_transforms Partition transform functions
* @groupname Ungrouped Support functions for DataFrames
* @since 1.3.0
*/
@Stable
// scalastyle:off
object functions {
// scalastyle:on
private def withExpr(expr: Expression): Column = Column(expr)
private def withAggregateFunction(
func: AggregateFunction,
isDistinct: Boolean = false): Column = {
Column(func.toAggregateExpression(isDistinct))
}
/**
* Returns a [[Column]] based on the given column name.
*
* @group normal_funcs
* @since 1.3.0
*/
def col(colName: String): Column = Column(colName)
/**
* Returns a [[Column]] based on the given column name. Alias of [[col]].
*
* @group normal_funcs
* @since 1.3.0
*/
def column(colName: String): Column = Column(colName)
/**
* Creates a [[Column]] of literal value.
*
* The passed in object is returned directly if it is already a [[Column]].
* If the object is a Scala Symbol, it is converted into a [[Column]] also.
* Otherwise, a new [[Column]] is created to represent the literal value.
*
* @group normal_funcs
* @since 1.3.0
*/
def lit(literal: Any): Column = typedLit(literal)
/**
* Creates a [[Column]] of literal value.
*
* The passed in object is returned directly if it is already a [[Column]].
* If the object is a Scala Symbol, it is converted into a [[Column]] also.
* Otherwise, a new [[Column]] is created to represent the literal value.
* The difference between this function and [[lit]] is that this function
* can handle parameterized scala types e.g.: List, Seq and Map.
*
* @group normal_funcs
* @since 2.2.0
*/
def typedLit[T : TypeTag](literal: T): Column = literal match {
case c: Column => c
case s: Symbol => new ColumnName(s.name)
case _ => Column(Literal.create(literal))
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Sort functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Returns a sort expression based on ascending order of the column.
* {{{
* df.sort(asc("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 1.3.0
*/
def asc(columnName: String): Column = Column(columnName).asc
/**
* Returns a sort expression based on ascending order of the column,
* and null values return before non-null values.
* {{{
* df.sort(asc_nulls_first("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def asc_nulls_first(columnName: String): Column = Column(columnName).asc_nulls_first
/**
* Returns a sort expression based on ascending order of the column,
* and null values appear after non-null values.
* {{{
* df.sort(asc_nulls_last("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def asc_nulls_last(columnName: String): Column = Column(columnName).asc_nulls_last
/**
* Returns a sort expression based on the descending order of the column.
* {{{
* df.sort(asc("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 1.3.0
*/
def desc(columnName: String): Column = Column(columnName).desc
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear before non-null values.
* {{{
* df.sort(asc("dept"), desc_nulls_first("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def desc_nulls_first(columnName: String): Column = Column(columnName).desc_nulls_first
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear after non-null values.
* {{{
* df.sort(asc("dept"), desc_nulls_last("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def desc_nulls_last(columnName: String): Column = Column(columnName).desc_nulls_last
//////////////////////////////////////////////////////////////////////////////////////////////
// Aggregate functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(e: Column): Column = approx_count_distinct(e)
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(columnName: String): Column = approx_count_distinct(columnName)
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(e: Column, rsd: Double): Column = approx_count_distinct(e, rsd)
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(columnName: String, rsd: Double): Column = {
approx_count_distinct(Column(columnName), rsd)
}
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(e: Column): Column = withAggregateFunction {
HyperLogLogPlusPlus(e.expr)
}
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(columnName: String): Column = approx_count_distinct(column(columnName))
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @param rsd maximum estimation error allowed (default = 0.05)
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(e: Column, rsd: Double): Column = withAggregateFunction {
HyperLogLogPlusPlus(e.expr, rsd, 0, 0)
}
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @param rsd maximum estimation error allowed (default = 0.05)
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(columnName: String, rsd: Double): Column = {
approx_count_distinct(Column(columnName), rsd)
}
/**
* Aggregate function: returns the average of the values in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def avg(e: Column): Column = withAggregateFunction { Average(e.expr) }
/**
* Aggregate function: returns the average of the values in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def avg(columnName: String): Column = avg(Column(columnName))
/**
* Aggregate function: returns a list of objects with duplicates.
*
* @note The function is non-deterministic because the order of collected results depends
* on the order of the rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_list(e: Column): Column = withAggregateFunction { CollectList(e.expr) }
/**
* Aggregate function: returns a list of objects with duplicates.
*
* @note The function is non-deterministic because the order of collected results depends
* on the order of the rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_list(columnName: String): Column = collect_list(Column(columnName))
/**
* Aggregate function: returns a set of objects with duplicate elements eliminated.
*
* @note The function is non-deterministic because the order of collected results depends
* on the order of the rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_set(e: Column): Column = withAggregateFunction { CollectSet(e.expr) }
/**
* Aggregate function: returns a set of objects with duplicate elements eliminated.
*
* @note The function is non-deterministic because the order of collected results depends
* on the order of the rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_set(columnName: String): Column = collect_set(Column(columnName))
/**
* Aggregate function: returns the Pearson Correlation Coefficient for two columns.
*
* @group agg_funcs
* @since 1.6.0
*/
def corr(column1: Column, column2: Column): Column = withAggregateFunction {
Corr(column1.expr, column2.expr)
}
/**
* Aggregate function: returns the Pearson Correlation Coefficient for two columns.
*
* @group agg_funcs
* @since 1.6.0
*/
def corr(columnName1: String, columnName2: String): Column = {
corr(Column(columnName1), Column(columnName2))
}
/**
* Aggregate function: returns the number of items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def count(e: Column): Column = withAggregateFunction {
e.expr match {
// Turn count(*) into count(1)
case s: Star => Count(Literal(1))
case _ => Count(e.expr)
}
}
/**
* Aggregate function: returns the number of items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def count(columnName: String): TypedColumn[Any, Long] =
count(Column(columnName)).as(ExpressionEncoder[Long]())
/**
* Aggregate function: returns the number of distinct items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
@scala.annotation.varargs
def countDistinct(expr: Column, exprs: Column*): Column =
// For usage like countDistinct("*"), we should let analyzer expand star and
// resolve function.
Column(UnresolvedFunction("count", (expr +: exprs).map(_.expr), isDistinct = true))
/**
* Aggregate function: returns the number of distinct items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
@scala.annotation.varargs
def countDistinct(columnName: String, columnNames: String*): Column =
countDistinct(Column(columnName), columnNames.map(Column.apply) : _*)
/**
* Aggregate function: returns the population covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_pop(column1: Column, column2: Column): Column = withAggregateFunction {
CovPopulation(column1.expr, column2.expr)
}
/**
* Aggregate function: returns the population covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_pop(columnName1: String, columnName2: String): Column = {
covar_pop(Column(columnName1), Column(columnName2))
}
/**
* Aggregate function: returns the sample covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_samp(column1: Column, column2: Column): Column = withAggregateFunction {
CovSample(column1.expr, column2.expr)
}
/**
* Aggregate function: returns the sample covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_samp(columnName1: String, columnName2: String): Column = {
covar_samp(Column(columnName1), Column(columnName2))
}
/**
* Aggregate function: returns the first value in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def first(e: Column, ignoreNulls: Boolean): Column = withAggregateFunction {
new First(e.expr, Literal(ignoreNulls))
}
/**
* Aggregate function: returns the first value of a column in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def first(columnName: String, ignoreNulls: Boolean): Column = {
first(Column(columnName), ignoreNulls)
}
/**
* Aggregate function: returns the first value in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def first(e: Column): Column = first(e, ignoreNulls = false)
/**
* Aggregate function: returns the first value of a column in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def first(columnName: String): Column = first(Column(columnName))
/**
* Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
* or not, returns 1 for aggregated or 0 for not aggregated in the result set.
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping(e: Column): Column = Column(Grouping(e.expr))
/**
* Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
* or not, returns 1 for aggregated or 0 for not aggregated in the result set.
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping(columnName: String): Column = grouping(Column(columnName))
/**
* Aggregate function: returns the level of grouping, equals to
*
* {{{
* (grouping(c1) <<; (n-1)) + (grouping(c2) <<; (n-2)) + ... + grouping(cn)
* }}}
*
* @note The list of columns should match with grouping columns exactly, or empty (means all the
* grouping columns).
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping_id(cols: Column*): Column = Column(GroupingID(cols.map(_.expr)))
/**
* Aggregate function: returns the level of grouping, equals to
*
* {{{
* (grouping(c1) <<; (n-1)) + (grouping(c2) <<; (n-2)) + ... + grouping(cn)
* }}}
*
* @note The list of columns should match with grouping columns exactly.
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping_id(colName: String, colNames: String*): Column = {
grouping_id((Seq(colName) ++ colNames).map(n => Column(n)) : _*)
}
/**
* Aggregate function: returns the kurtosis of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def kurtosis(e: Column): Column = withAggregateFunction { Kurtosis(e.expr) }
/**
* Aggregate function: returns the kurtosis of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def kurtosis(columnName: String): Column = kurtosis(Column(columnName))
/**
* Aggregate function: returns the last value in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def last(e: Column, ignoreNulls: Boolean): Column = withAggregateFunction {
new Last(e.expr, Literal(ignoreNulls))
}
/**
* Aggregate function: returns the last value of the column in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def last(columnName: String, ignoreNulls: Boolean): Column = {
last(Column(columnName), ignoreNulls)
}
/**
* Aggregate function: returns the last value in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def last(e: Column): Column = last(e, ignoreNulls = false)
/**
* Aggregate function: returns the last value of the column in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on the order of the rows
* which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def last(columnName: String): Column = last(Column(columnName), ignoreNulls = false)
/**
* Aggregate function: returns the maximum value of the expression in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def max(e: Column): Column = withAggregateFunction { Max(e.expr) }
/**
* Aggregate function: returns the maximum value of the column in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def max(columnName: String): Column = max(Column(columnName))
/**
* Aggregate function: returns the average of the values in a group.
* Alias for avg.
*
* @group agg_funcs
* @since 1.4.0
*/
def mean(e: Column): Column = avg(e)
/**
* Aggregate function: returns the average of the values in a group.
* Alias for avg.
*
* @group agg_funcs
* @since 1.4.0
*/
def mean(columnName: String): Column = avg(columnName)
/**
* Aggregate function: returns the minimum value of the expression in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def min(e: Column): Column = withAggregateFunction { Min(e.expr) }
/**
* Aggregate function: returns the minimum value of the column in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def min(columnName: String): Column = min(Column(columnName))
/**
* Aggregate function: returns and array of the approximate percentile values
* of numeric column col at the given percentages.
*
* If percentage is an array, each value must be between 0.0 and 1.0.
* If it is a single floating point value, it must be between 0.0 and 1.0.
*
* The accuracy parameter is a positive numeric literal
* which controls approximation accuracy at the cost of memory.
* Higher value of accuracy yields better accuracy, 1.0/accuracy
* is the relative error of the approximation.
*
* @group agg_funcs
* @since 3.1.0
*/
def percentile_approx(e: Column, percentage: Column, accuracy: Column): Column = {
withAggregateFunction {
new ApproximatePercentile(
e.expr, percentage.expr, accuracy.expr
)
}
}
/**
* Aggregate function: returns the skewness of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def skewness(e: Column): Column = withAggregateFunction { Skewness(e.expr) }
/**
* Aggregate function: returns the skewness of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def skewness(columnName: String): Column = skewness(Column(columnName))
/**
* Aggregate function: alias for `stddev_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) }
/**
* Aggregate function: alias for `stddev_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev(columnName: String): Column = stddev(Column(columnName))
/**
* Aggregate function: returns the sample standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_samp(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) }
/**
* Aggregate function: returns the sample standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_samp(columnName: String): Column = stddev_samp(Column(columnName))
/**
* Aggregate function: returns the population standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_pop(e: Column): Column = withAggregateFunction { StddevPop(e.expr) }
/**
* Aggregate function: returns the population standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_pop(columnName: String): Column = stddev_pop(Column(columnName))
/**
* Aggregate function: returns the sum of all values in the expression.
*
* @group agg_funcs
* @since 1.3.0
*/
def sum(e: Column): Column = withAggregateFunction { Sum(e.expr) }
/**
* Aggregate function: returns the sum of all values in the given column.
*
* @group agg_funcs
* @since 1.3.0
*/
def sum(columnName: String): Column = sum(Column(columnName))
/**
* Aggregate function: returns the sum of distinct values in the expression.
*
* @group agg_funcs
* @since 1.3.0
*/
def sumDistinct(e: Column): Column = withAggregateFunction(Sum(e.expr), isDistinct = true)
/**
* Aggregate function: returns the sum of distinct values in the expression.
*
* @group agg_funcs
* @since 1.3.0
*/
def sumDistinct(columnName: String): Column = sumDistinct(Column(columnName))
/**
* Aggregate function: alias for `var_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def variance(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) }
/**
* Aggregate function: alias for `var_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def variance(columnName: String): Column = variance(Column(columnName))
/**
* Aggregate function: returns the unbiased variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_samp(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) }
/**
* Aggregate function: returns the unbiased variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_samp(columnName: String): Column = var_samp(Column(columnName))
/**
* Aggregate function: returns the population variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_pop(e: Column): Column = withAggregateFunction { VariancePop(e.expr) }
/**
* Aggregate function: returns the population variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_pop(columnName: String): Column = var_pop(Column(columnName))
//////////////////////////////////////////////////////////////////////////////////////////////
// Window functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Window function: returns the cumulative distribution of values within a window partition,
* i.e. the fraction of rows that are below the current row.
*
* {{{
* N = total number of rows in the partition
* cumeDist(x) = number of values before (and including) x / N
* }}}
*
* @group window_funcs
* @since 1.6.0
*/
def cume_dist(): Column = withExpr { new CumeDist }
/**
* Window function: returns the rank of rows within a window partition, without any gaps.
*
* The difference between rank and dense_rank is that denseRank leaves no gaps in ranking
* sequence when there are ties. That is, if you were ranking a competition using dense_rank
* and had three people tie for second place, you would say that all three were in second
* place and that the next person came in third. Rank would give me sequential numbers, making
* the person that came in third place (after the ties) would register as coming in fifth.
*
* This is equivalent to the DENSE_RANK function in SQL.
*
* @group window_funcs
* @since 1.6.0
*/
def dense_rank(): Column = withExpr { new DenseRank }
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `null` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(e: Column, offset: Int): Column = lag(e, offset, null)
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `null` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(columnName: String, offset: Int): Column = lag(columnName, offset, null)
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `defaultValue` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(columnName: String, offset: Int, defaultValue: Any): Column = {
lag(Column(columnName), offset, defaultValue)
}
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `defaultValue` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(e: Column, offset: Int, defaultValue: Any): Column = withExpr {
Lag(e.expr, Literal(offset), Literal(defaultValue))
}
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `null` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(columnName: String, offset: Int): Column = { lead(columnName, offset, null) }
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `null` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(e: Column, offset: Int): Column = { lead(e, offset, null) }
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `defaultValue` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(columnName: String, offset: Int, defaultValue: Any): Column = {
lead(Column(columnName), offset, defaultValue)
}
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `defaultValue` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(e: Column, offset: Int, defaultValue: Any): Column = withExpr {
Lead(e.expr, Literal(offset), Literal(defaultValue))
}
/**
* Window function: returns the ntile group id (from 1 to `n` inclusive) in an ordered window
* partition. For example, if `n` is 4, the first quarter of the rows will get value 1, the second
* quarter will get 2, the third quarter will get 3, and the last quarter will get 4.
*
* This is equivalent to the NTILE function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def ntile(n: Int): Column = withExpr { new NTile(Literal(n)) }
/**
* Window function: returns the relative rank (i.e. percentile) of rows within a window partition.
*
* This is computed by:
* {{{
* (rank of row in its partition - 1) / (number of rows in the partition - 1)
* }}}
*
* This is equivalent to the PERCENT_RANK function in SQL.
*
* @group window_funcs
* @since 1.6.0
*/
def percent_rank(): Column = withExpr { new PercentRank }
/**
* Window function: returns the rank of rows within a window partition.
*
* The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
* sequence when there are ties. That is, if you were ranking a competition using dense_rank
* and had three people tie for second place, you would say that all three were in second
* place and that the next person came in third. Rank would give me sequential numbers, making
* the person that came in third place (after the ties) would register as coming in fifth.
*
* This is equivalent to the RANK function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def rank(): Column = withExpr { new Rank }
/**
* Window function: returns a sequential number starting at 1 within a window partition.
*
* @group window_funcs
* @since 1.6.0
*/
def row_number(): Column = withExpr { RowNumber() }
//////////////////////////////////////////////////////////////////////////////////////////////
// Non-aggregate functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Creates a new array column. The input columns must all have the same data type.
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def array(cols: Column*): Column = withExpr { CreateArray(cols.map(_.expr)) }
/**
* Creates a new array column. The input columns must all have the same data type.
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def array(colName: String, colNames: String*): Column = {
array((colName +: colNames).map(col) : _*)
}
/**
* Creates a new map column. The input columns must be grouped as key-value pairs, e.g.
* (key1, value1, key2, value2, ...). The key columns must all have the same data type, and can't
* be null. The value columns must all have the same data type.
*
* @group normal_funcs
* @since 2.0
*/
@scala.annotation.varargs
def map(cols: Column*): Column = withExpr { CreateMap(cols.map(_.expr)) }
/**
* Creates a new map column. The array in the first column is used for keys. The array in the
* second column is used for values. All elements in the array for key should not be null.
*
* @group normal_funcs
* @since 2.4
*/
def map_from_arrays(keys: Column, values: Column): Column = withExpr {
MapFromArrays(keys.expr, values.expr)
}
/**
* Marks a DataFrame as small enough for use in broadcast joins.
*
* The following example marks the right DataFrame for broadcast hash join using `joinKey`.
* {{{
* // left and right are DataFrames
* left.join(broadcast(right), "joinKey")
* }}}
*
* @group normal_funcs
* @since 1.5.0
*/
def broadcast[T](df: Dataset[T]): Dataset[T] = {
Dataset[T](df.sparkSession,
ResolvedHint(df.logicalPlan, HintInfo(strategy = Some(BROADCAST))))(df.exprEnc)
}
/**
* Returns the first column that is not null, or null if all inputs are null.
*
* For example, `coalesce(a, b, c)` will return a if a is not null,
* or b if a is null and b is not null, or c if both a and b are null but c is not null.
*
* @group normal_funcs
* @since 1.3.0
*/
@scala.annotation.varargs
def coalesce(e: Column*): Column = withExpr { Coalesce(e.map(_.expr)) }
/**
* Creates a string column for the file name of the current Spark task.
*
* @group normal_funcs
* @since 1.6.0
*/
def input_file_name(): Column = withExpr { InputFileName() }
/**
* Return true iff the column is NaN.
*
* @group normal_funcs
* @since 1.6.0
*/
def isnan(e: Column): Column = withExpr { IsNaN(e.expr) }
/**
* Return true iff the column is null.
*
* @group normal_funcs
* @since 1.6.0
*/
def isnull(e: Column): Column = withExpr { IsNull(e.expr) }
/**
* A column expression that generates monotonically increasing 64-bit integers.
*
* The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
* The current implementation puts the partition ID in the upper 31 bits, and the record number
* within each partition in the lower 33 bits. The assumption is that the data frame has
* less than 1 billion partitions, and each partition has less than 8 billion records.
*
* As an example, consider a `DataFrame` with two partitions, each with 3 records.
* This expression would return the following IDs:
*
* {{{
* 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
* }}}
*
* @group normal_funcs
* @since 1.4.0
*/
@deprecated("Use monotonically_increasing_id()", "2.0.0")
def monotonicallyIncreasingId(): Column = monotonically_increasing_id()
/**
* A column expression that generates monotonically increasing 64-bit integers.
*
* The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
* The current implementation puts the partition ID in the upper 31 bits, and the record number
* within each partition in the lower 33 bits. The assumption is that the data frame has
* less than 1 billion partitions, and each partition has less than 8 billion records.
*
* As an example, consider a `DataFrame` with two partitions, each with 3 records.
* This expression would return the following IDs:
*
* {{{
* 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
* }}}
*
* @group normal_funcs
* @since 1.6.0
*/
def monotonically_increasing_id(): Column = withExpr { MonotonicallyIncreasingID() }
/**
* Returns col1 if it is not NaN, or col2 if col1 is NaN.
*
* Both inputs should be floating point columns (DoubleType or FloatType).
*
* @group normal_funcs
* @since 1.5.0
*/
def nanvl(col1: Column, col2: Column): Column = withExpr { NaNvl(col1.expr, col2.expr) }
/**
* Unary minus, i.e. negate the expression.
* {{{
* // Select the amount column and negates all values.
* // Scala:
* df.select( -df("amount") )
*
* // Java:
* df.select( negate(df.col("amount")) );
* }}}
*
* @group normal_funcs
* @since 1.3.0
*/
def negate(e: Column): Column = -e
/**
* Inversion of boolean expression, i.e. NOT.
* {{{
* // Scala: select rows that are not active (isActive === false)
* df.filter( !df("isActive") )
*
* // Java:
* df.filter( not(df.col("isActive")) );
* }}}
*
* @group normal_funcs
* @since 1.3.0
*/
def not(e: Column): Column = !e
/**
* Generate a random column with independent and identically distributed (i.i.d.) samples
* uniformly distributed in [0.0, 1.0).
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def rand(seed: Long): Column = withExpr { Rand(seed) }
/**
* Generate a random column with independent and identically distributed (i.i.d.) samples
* uniformly distributed in [0.0, 1.0).
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def rand(): Column = rand(Utils.random.nextLong)
/**
* Generate a column with independent and identically distributed (i.i.d.) samples from
* the standard normal distribution.
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def randn(seed: Long): Column = withExpr { Randn(seed) }
/**
* Generate a column with independent and identically distributed (i.i.d.) samples from
* the standard normal distribution.
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def randn(): Column = randn(Utils.random.nextLong)
/**
* Partition ID.
*
* @note This is non-deterministic because it depends on data partitioning and task scheduling.
*
* @group normal_funcs
* @since 1.6.0
*/
def spark_partition_id(): Column = withExpr { SparkPartitionID() }
/**
* Computes the square root of the specified float value.
*
* @group math_funcs
* @since 1.3.0
*/
def sqrt(e: Column): Column = withExpr { Sqrt(e.expr) }
/**
* Computes the square root of the specified float value.
*
* @group math_funcs
* @since 1.5.0
*/
def sqrt(colName: String): Column = sqrt(Column(colName))
/**
* Creates a new struct column.
* If the input column is a column in a `DataFrame`, or a derived column expression
* that is named (i.e. aliased), its name would be retained as the StructField's name,
* otherwise, the newly generated StructField's name would be auto generated as
* `col` with a suffix `index + 1`, i.e. col1, col2, col3, ...
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def struct(cols: Column*): Column = withExpr { CreateStruct.create(cols.map(_.expr)) }
/**
* Creates a new struct column that composes multiple input columns.
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def struct(colName: String, colNames: String*): Column = {
struct((colName +: colNames).map(col) : _*)
}
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group normal_funcs
* @since 1.4.0
*/
def when(condition: Column, value: Any): Column = withExpr {
CaseWhen(Seq((condition.expr, lit(value).expr)))
}
/**
* Computes bitwise NOT (~) of a number.
*
* @group normal_funcs
* @since 1.4.0
*/
def bitwiseNOT(e: Column): Column = withExpr { BitwiseNot(e.expr) }
/**
* Parses the expression string into the column that it represents, similar to
* [[Dataset#selectExpr]].
* {{{
* // get the number of words of each length
* df.groupBy(expr("length(word)")).count()
* }}}
*
* @group normal_funcs
*/
def expr(expr: String): Column = {
val parser = SparkSession.getActiveSession.map(_.sessionState.sqlParser).getOrElse {
new SparkSqlParser(new SQLConf)
}
Column(parser.parseExpression(expr))
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Math Functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Computes the absolute value of a numeric value.
*
* @group math_funcs
* @since 1.3.0
*/
def abs(e: Column): Column = withExpr { Abs(e.expr) }
/**
* @return inverse cosine of `e` in radians, as if computed by `java.lang.Math.acos`
*
* @group math_funcs
* @since 1.4.0
*/
def acos(e: Column): Column = withExpr { Acos(e.expr) }
/**
* @return inverse cosine of `columnName`, as if computed by `java.lang.Math.acos`
*
* @group math_funcs
* @since 1.4.0
*/
def acos(columnName: String): Column = acos(Column(columnName))
/**
* @return inverse sine of `e` in radians, as if computed by `java.lang.Math.asin`
*
* @group math_funcs
* @since 1.4.0
*/
def asin(e: Column): Column = withExpr { Asin(e.expr) }
/**
* @return inverse sine of `columnName`, as if computed by `java.lang.Math.asin`
*
* @group math_funcs
* @since 1.4.0
*/
def asin(columnName: String): Column = asin(Column(columnName))
/**
* @return inverse tangent of `e`, as if computed by `java.lang.Math.atan`
*
* @group math_funcs
* @since 1.4.0
*/
def atan(e: Column): Column = withExpr { Atan(e.expr) }
/**
* @return inverse tangent of `columnName`, as if computed by `java.lang.Math.atan`
*
* @group math_funcs
* @since 1.4.0
*/
def atan(columnName: String): Column = atan(Column(columnName))
/**
* @param y coordinate on y-axis
* @param x coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(y: Column, x: Column): Column = withExpr { Atan2(y.expr, x.expr) }
/**
* @param y coordinate on y-axis
* @param xName coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(y: Column, xName: String): Column = atan2(y, Column(xName))
/**
* @param yName coordinate on y-axis
* @param x coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yName: String, x: Column): Column = atan2(Column(yName), x)
/**
* @param yName coordinate on y-axis
* @param xName coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yName: String, xName: String): Column =
atan2(Column(yName), Column(xName))
/**
* @param y coordinate on y-axis
* @param xValue coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(y: Column, xValue: Double): Column = atan2(y, lit(xValue))
/**
* @param yName coordinate on y-axis
* @param xValue coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yName: String, xValue: Double): Column = atan2(Column(yName), xValue)
/**
* @param yValue coordinate on y-axis
* @param x coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yValue: Double, x: Column): Column = atan2(lit(yValue), x)
/**
* @param yValue coordinate on y-axis
* @param xName coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yValue: Double, xName: String): Column = atan2(yValue, Column(xName))
/**
* An expression that returns the string representation of the binary value of the given long
* column. For example, bin("12") returns "1100".
*
* @group math_funcs
* @since 1.5.0
*/
def bin(e: Column): Column = withExpr { Bin(e.expr) }
/**
* An expression that returns the string representation of the binary value of the given long
* column. For example, bin("12") returns "1100".
*
* @group math_funcs
* @since 1.5.0
*/
def bin(columnName: String): Column = bin(Column(columnName))
/**
* Computes the cube-root of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def cbrt(e: Column): Column = withExpr { Cbrt(e.expr) }
/**
* Computes the cube-root of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def cbrt(columnName: String): Column = cbrt(Column(columnName))
/**
* Computes the ceiling of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def ceil(e: Column): Column = withExpr { Ceil(e.expr) }
/**
* Computes the ceiling of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def ceil(columnName: String): Column = ceil(Column(columnName))
/**
* Convert a number in a string column from one base to another.
*
* @group math_funcs
* @since 1.5.0
*/
def conv(num: Column, fromBase: Int, toBase: Int): Column = withExpr {
Conv(num.expr, lit(fromBase).expr, lit(toBase).expr)
}
/**
* @param e angle in radians
* @return cosine of the angle, as if computed by `java.lang.Math.cos`
*
* @group math_funcs
* @since 1.4.0
*/
def cos(e: Column): Column = withExpr { Cos(e.expr) }
/**
* @param columnName angle in radians
* @return cosine of the angle, as if computed by `java.lang.Math.cos`
*
* @group math_funcs
* @since 1.4.0
*/
def cos(columnName: String): Column = cos(Column(columnName))
/**
* @param e hyperbolic angle
* @return hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh`
*
* @group math_funcs
* @since 1.4.0
*/
def cosh(e: Column): Column = withExpr { Cosh(e.expr) }
/**
* @param columnName hyperbolic angle
* @return hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh`
*
* @group math_funcs
* @since 1.4.0
*/
def cosh(columnName: String): Column = cosh(Column(columnName))
/**
* Computes the exponential of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def exp(e: Column): Column = withExpr { Exp(e.expr) }
/**
* Computes the exponential of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def exp(columnName: String): Column = exp(Column(columnName))
/**
* Computes the exponential of the given value minus one.
*
* @group math_funcs
* @since 1.4.0
*/
def expm1(e: Column): Column = withExpr { Expm1(e.expr) }
/**
* Computes the exponential of the given column minus one.
*
* @group math_funcs
* @since 1.4.0
*/
def expm1(columnName: String): Column = expm1(Column(columnName))
/**
* Computes the factorial of the given value.
*
* @group math_funcs
* @since 1.5.0
*/
def factorial(e: Column): Column = withExpr { Factorial(e.expr) }
/**
* Computes the floor of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def floor(e: Column): Column = withExpr { Floor(e.expr) }
/**
* Computes the floor of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def floor(columnName: String): Column = floor(Column(columnName))
/**
* Returns the greatest value of the list of values, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def greatest(exprs: Column*): Column = withExpr { Greatest(exprs.map(_.expr)) }
/**
* Returns the greatest value of the list of column names, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def greatest(columnName: String, columnNames: String*): Column = {
greatest((columnName +: columnNames).map(Column.apply): _*)
}
/**
* Computes hex value of the given column.
*
* @group math_funcs
* @since 1.5.0
*/
def hex(column: Column): Column = withExpr { Hex(column.expr) }
/**
* Inverse of hex. Interprets each pair of characters as a hexadecimal number
* and converts to the byte representation of number.
*
* @group math_funcs
* @since 1.5.0
*/
def unhex(column: Column): Column = withExpr { Unhex(column.expr) }
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Column, r: Column): Column = withExpr { Hypot(l.expr, r.expr) }
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Column, rightName: String): Column = hypot(l, Column(rightName))
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(leftName: String, r: Column): Column = hypot(Column(leftName), r)
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(leftName: String, rightName: String): Column =
hypot(Column(leftName), Column(rightName))
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Column, r: Double): Column = hypot(l, lit(r))
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(leftName: String, r: Double): Column = hypot(Column(leftName), r)
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Double, r: Column): Column = hypot(lit(l), r)
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Double, rightName: String): Column = hypot(l, Column(rightName))
/**
* Returns the least value of the list of values, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def least(exprs: Column*): Column = withExpr { Least(exprs.map(_.expr)) }
/**
* Returns the least value of the list of column names, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def least(columnName: String, columnNames: String*): Column = {
least((columnName +: columnNames).map(Column.apply): _*)
}
/**
* Computes the natural logarithm of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def log(e: Column): Column = withExpr { Log(e.expr) }
/**
* Computes the natural logarithm of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def log(columnName: String): Column = log(Column(columnName))
/**
* Returns the first argument-base logarithm of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def log(base: Double, a: Column): Column = withExpr { Logarithm(lit(base).expr, a.expr) }
/**
* Returns the first argument-base logarithm of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def log(base: Double, columnName: String): Column = log(base, Column(columnName))
/**
* Computes the logarithm of the given value in base 10.
*
* @group math_funcs
* @since 1.4.0
*/
def log10(e: Column): Column = withExpr { Log10(e.expr) }
/**
* Computes the logarithm of the given value in base 10.
*
* @group math_funcs
* @since 1.4.0
*/
def log10(columnName: String): Column = log10(Column(columnName))
/**
* Computes the natural logarithm of the given value plus one.
*
* @group math_funcs
* @since 1.4.0
*/
def log1p(e: Column): Column = withExpr { Log1p(e.expr) }
/**
* Computes the natural logarithm of the given column plus one.
*
* @group math_funcs
* @since 1.4.0
*/
def log1p(columnName: String): Column = log1p(Column(columnName))
/**
* Computes the logarithm of the given column in base 2.
*
* @group math_funcs
* @since 1.5.0
*/
def log2(expr: Column): Column = withExpr { Log2(expr.expr) }
/**
* Computes the logarithm of the given value in base 2.
*
* @group math_funcs
* @since 1.5.0
*/
def log2(columnName: String): Column = log2(Column(columnName))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Column, r: Column): Column = withExpr { Pow(l.expr, r.expr) }
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Column, rightName: String): Column = pow(l, Column(rightName))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(leftName: String, r: Column): Column = pow(Column(leftName), r)
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(leftName: String, rightName: String): Column = pow(Column(leftName), Column(rightName))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Column, r: Double): Column = pow(l, lit(r))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(leftName: String, r: Double): Column = pow(Column(leftName), r)
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Double, r: Column): Column = pow(lit(l), r)
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Double, rightName: String): Column = pow(l, Column(rightName))
/**
* Returns the positive value of dividend mod divisor.
*
* @group math_funcs
* @since 1.5.0
*/
def pmod(dividend: Column, divisor: Column): Column = withExpr {
Pmod(dividend.expr, divisor.expr)
}
/**
* Returns the double value that is closest in value to the argument and
* is equal to a mathematical integer.
*
* @group math_funcs
* @since 1.4.0
*/
def rint(e: Column): Column = withExpr { Rint(e.expr) }
/**
* Returns the double value that is closest in value to the argument and
* is equal to a mathematical integer.
*
* @group math_funcs
* @since 1.4.0
*/
def rint(columnName: String): Column = rint(Column(columnName))
/**
* Returns the value of the column `e` rounded to 0 decimal places with HALF_UP round mode.
*
* @group math_funcs
* @since 1.5.0
*/
def round(e: Column): Column = round(e, 0)
/**
* Round the value of `e` to `scale` decimal places with HALF_UP round mode
* if `scale` is greater than or equal to 0 or at integral part when `scale` is less than 0.
*
* @group math_funcs
* @since 1.5.0
*/
def round(e: Column, scale: Int): Column = withExpr { Round(e.expr, Literal(scale)) }
/**
* Returns the value of the column `e` rounded to 0 decimal places with HALF_EVEN round mode.
*
* @group math_funcs
* @since 2.0.0
*/
def bround(e: Column): Column = bround(e, 0)
/**
* Round the value of `e` to `scale` decimal places with HALF_EVEN round mode
* if `scale` is greater than or equal to 0 or at integral part when `scale` is less than 0.
*
* @group math_funcs
* @since 2.0.0
*/
def bround(e: Column, scale: Int): Column = withExpr { BRound(e.expr, Literal(scale)) }
/**
* Shift the given value numBits left. If the given value is a long value, this function
* will return a long value else it will return an integer value.
*
* @group math_funcs
* @since 1.5.0
*/
def shiftLeft(e: Column, numBits: Int): Column = withExpr { ShiftLeft(e.expr, lit(numBits).expr) }
/**
* (Signed) shift the given value numBits right. If the given value is a long value, it will
* return a long value else it will return an integer value.
*
* @group math_funcs
* @since 1.5.0
*/
def shiftRight(e: Column, numBits: Int): Column = withExpr {
ShiftRight(e.expr, lit(numBits).expr)
}
/**
* Unsigned shift the given value numBits right. If the given value is a long value,
* it will return a long value else it will return an integer value.
*
* @group math_funcs
* @since 1.5.0
*/
def shiftRightUnsigned(e: Column, numBits: Int): Column = withExpr {
ShiftRightUnsigned(e.expr, lit(numBits).expr)
}
/**
* Computes the signum of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def signum(e: Column): Column = withExpr { Signum(e.expr) }
/**
* Computes the signum of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def signum(columnName: String): Column = signum(Column(columnName))
/**
* @param e angle in radians
* @return sine of the angle, as if computed by `java.lang.Math.sin`
*
* @group math_funcs
* @since 1.4.0
*/
def sin(e: Column): Column = withExpr { Sin(e.expr) }
/**
* @param columnName angle in radians
* @return sine of the angle, as if computed by `java.lang.Math.sin`
*
* @group math_funcs
* @since 1.4.0
*/
def sin(columnName: String): Column = sin(Column(columnName))
/**
* @param e hyperbolic angle
* @return hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh`
*
* @group math_funcs
* @since 1.4.0
*/
def sinh(e: Column): Column = withExpr { Sinh(e.expr) }
/**
* @param columnName hyperbolic angle
* @return hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh`
*
* @group math_funcs
* @since 1.4.0
*/
def sinh(columnName: String): Column = sinh(Column(columnName))
/**
* @param e angle in radians
* @return tangent of the given value, as if computed by `java.lang.Math.tan`
*
* @group math_funcs
* @since 1.4.0
*/
def tan(e: Column): Column = withExpr { Tan(e.expr) }
/**
* @param columnName angle in radians
* @return tangent of the given value, as if computed by `java.lang.Math.tan`
*
* @group math_funcs
* @since 1.4.0
*/
def tan(columnName: String): Column = tan(Column(columnName))
/**
* @param e hyperbolic angle
* @return hyperbolic tangent of the given value, as if computed by `java.lang.Math.tanh`
*
* @group math_funcs
* @since 1.4.0
*/
def tanh(e: Column): Column = withExpr { Tanh(e.expr) }
/**
* @param columnName hyperbolic angle
* @return hyperbolic tangent of the given value, as if computed by `java.lang.Math.tanh`
*
* @group math_funcs
* @since 1.4.0
*/
def tanh(columnName: String): Column = tanh(Column(columnName))
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use degrees", "2.1.0")
def toDegrees(e: Column): Column = degrees(e)
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use degrees", "2.1.0")
def toDegrees(columnName: String): Column = degrees(Column(columnName))
/**
* Converts an angle measured in radians to an approximately equivalent angle measured in degrees.
*
* @param e angle in radians
* @return angle in degrees, as if computed by `java.lang.Math.toDegrees`
*
* @group math_funcs
* @since 2.1.0
*/
def degrees(e: Column): Column = withExpr { ToDegrees(e.expr) }
/**
* Converts an angle measured in radians to an approximately equivalent angle measured in degrees.
*
* @param columnName angle in radians
* @return angle in degrees, as if computed by `java.lang.Math.toDegrees`
*
* @group math_funcs
* @since 2.1.0
*/
def degrees(columnName: String): Column = degrees(Column(columnName))
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use radians", "2.1.0")
def toRadians(e: Column): Column = radians(e)
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use radians", "2.1.0")
def toRadians(columnName: String): Column = radians(Column(columnName))
/**
* Converts an angle measured in degrees to an approximately equivalent angle measured in radians.
*
* @param e angle in degrees
* @return angle in radians, as if computed by `java.lang.Math.toRadians`
*
* @group math_funcs
* @since 2.1.0
*/
def radians(e: Column): Column = withExpr { ToRadians(e.expr) }
/**
* Converts an angle measured in degrees to an approximately equivalent angle measured in radians.
*
* @param columnName angle in degrees
* @return angle in radians, as if computed by `java.lang.Math.toRadians`
*
* @group math_funcs
* @since 2.1.0
*/
def radians(columnName: String): Column = radians(Column(columnName))
//////////////////////////////////////////////////////////////////////////////////////////////
// Misc functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Calculates the MD5 digest of a binary column and returns the value
* as a 32 character hex string.
*
* @group misc_funcs
* @since 1.5.0
*/
def md5(e: Column): Column = withExpr { Md5(e.expr) }
/**
* Calculates the SHA-1 digest of a binary column and returns the value
* as a 40 character hex string.
*
* @group misc_funcs
* @since 1.5.0
*/
def sha1(e: Column): Column = withExpr { Sha1(e.expr) }
/**
* Calculates the SHA-2 family of hash functions of a binary column and
* returns the value as a hex string.
*
* @param e column to compute SHA-2 on.
* @param numBits one of 224, 256, 384, or 512.
*
* @group misc_funcs
* @since 1.5.0
*/
def sha2(e: Column, numBits: Int): Column = {
require(Seq(0, 224, 256, 384, 512).contains(numBits),
s"numBits $numBits is not in the permitted values (0, 224, 256, 384, 512)")
withExpr { Sha2(e.expr, lit(numBits).expr) }
}
/**
* Calculates the cyclic redundancy check value (CRC32) of a binary column and
* returns the value as a bigint.
*
* @group misc_funcs
* @since 1.5.0
*/
def crc32(e: Column): Column = withExpr { Crc32(e.expr) }
/**
* Calculates the hash code of given columns, and returns the result as an int column.
*
* @group misc_funcs
* @since 2.0.0
*/
@scala.annotation.varargs
def hash(cols: Column*): Column = withExpr {
new Murmur3Hash(cols.map(_.expr))
}
/**
* Calculates the hash code of given columns using the 64-bit
* variant of the xxHash algorithm, and returns the result as a long
* column.
*
* @group misc_funcs
* @since 3.0.0
*/
@scala.annotation.varargs
def xxhash64(cols: Column*): Column = withExpr {
new XxHash64(cols.map(_.expr))
}
//////////////////////////////////////////////////////////////////////////////////////////////
// String functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Computes the numeric value of the first character of the string column, and returns the
* result as an int column.
*
* @group string_funcs
* @since 1.5.0
*/
def ascii(e: Column): Column = withExpr { Ascii(e.expr) }
/**
* Computes the BASE64 encoding of a binary column and returns it as a string column.
* This is the reverse of unbase64.
*
* @group string_funcs
* @since 1.5.0
*/
def base64(e: Column): Column = withExpr { Base64(e.expr) }
/**
* Concatenates multiple input string columns together into a single string column,
* using the given separator.
*
* @group string_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def concat_ws(sep: String, exprs: Column*): Column = withExpr {
ConcatWs(Literal.create(sep, StringType) +: exprs.map(_.expr))
}
/**
* Computes the first argument into a string from a binary using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*
* @group string_funcs
* @since 1.5.0
*/
def decode(value: Column, charset: String): Column = withExpr {
Decode(value.expr, lit(charset).expr)
}
/**
* Computes the first argument into a binary from a string using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*
* @group string_funcs
* @since 1.5.0
*/
def encode(value: Column, charset: String): Column = withExpr {
Encode(value.expr, lit(charset).expr)
}
/**
* Formats numeric column x to a format like '#,###,###.##', rounded to d decimal places
* with HALF_EVEN round mode, and returns the result as a string column.
*
* If d is 0, the result has no decimal point or fractional part.
* If d is less than 0, the result will be null.
*
* @group string_funcs
* @since 1.5.0
*/
def format_number(x: Column, d: Int): Column = withExpr {
FormatNumber(x.expr, lit(d).expr)
}
/**
* Formats the arguments in printf-style and returns the result as a string column.
*
* @group string_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def format_string(format: String, arguments: Column*): Column = withExpr {
FormatString((lit(format) +: arguments).map(_.expr): _*)
}
/**
* Returns a new string column by converting the first letter of each word to uppercase.
* Words are delimited by whitespace.
*
* For example, "hello world" will become "Hello World".
*
* @group string_funcs
* @since 1.5.0
*/
def initcap(e: Column): Column = withExpr { InitCap(e.expr) }
/**
* Locate the position of the first occurrence of substr column in the given string.
* Returns null if either of the arguments are null.
*
* @note The position is not zero based, but 1 based index. Returns 0 if substr
* could not be found in str.
*
* @group string_funcs
* @since 1.5.0
*/
def instr(str: Column, substring: String): Column = withExpr {
StringInstr(str.expr, lit(substring).expr)
}
/**
* Computes the character length of a given string or number of bytes of a binary string.
* The length of character strings include the trailing spaces. The length of binary strings
* includes binary zeros.
*
* @group string_funcs
* @since 1.5.0
*/
def length(e: Column): Column = withExpr { Length(e.expr) }
/**
* Converts a string column to lower case.
*
* @group string_funcs
* @since 1.3.0
*/
def lower(e: Column): Column = withExpr { Lower(e.expr) }
/**
* Computes the Levenshtein distance of the two given string columns.
* @group string_funcs
* @since 1.5.0
*/
def levenshtein(l: Column, r: Column): Column = withExpr { Levenshtein(l.expr, r.expr) }
/**
* Locate the position of the first occurrence of substr.
*
* @note The position is not zero based, but 1 based index. Returns 0 if substr
* could not be found in str.
*
* @group string_funcs
* @since 1.5.0
*/
def locate(substr: String, str: Column): Column = withExpr {
new StringLocate(lit(substr).expr, str.expr)
}
/**
* Locate the position of the first occurrence of substr in a string column, after position pos.
*
* @note The position is not zero based, but 1 based index. returns 0 if substr
* could not be found in str.
*
* @group string_funcs
* @since 1.5.0
*/
def locate(substr: String, str: Column, pos: Int): Column = withExpr {
StringLocate(lit(substr).expr, str.expr, lit(pos).expr)
}
/**
* Left-pad the string column with pad to a length of len. If the string column is longer
* than len, the return value is shortened to len characters.
*
* @group string_funcs
* @since 1.5.0
*/
def lpad(str: Column, len: Int, pad: String): Column = withExpr {
StringLPad(str.expr, lit(len).expr, lit(pad).expr)
}
/**
* Trim the spaces from left end for the specified string value.
*
* @group string_funcs
* @since 1.5.0
*/
def ltrim(e: Column): Column = withExpr {StringTrimLeft(e.expr) }
/**
* Trim the specified character string from left end for the specified string column.
* @group string_funcs
* @since 2.3.0
*/
def ltrim(e: Column, trimString: String): Column = withExpr {
StringTrimLeft(e.expr, Literal(trimString))
}
/**
* Extract a specific group matched by a Java regex, from the specified string column.
* If the regex did not match, or the specified group did not match, an empty string is returned.
*
* @group string_funcs
* @since 1.5.0
*/
def regexp_extract(e: Column, exp: String, groupIdx: Int): Column = withExpr {
RegExpExtract(e.expr, lit(exp).expr, lit(groupIdx).expr)
}
/**
* Replace all substrings of the specified string value that match regexp with rep.
*
* @group string_funcs
* @since 1.5.0
*/
def regexp_replace(e: Column, pattern: String, replacement: String): Column = withExpr {
RegExpReplace(e.expr, lit(pattern).expr, lit(replacement).expr)
}
/**
* Replace all substrings of the specified string value that match regexp with rep.
*
* @group string_funcs
* @since 2.1.0
*/
def regexp_replace(e: Column, pattern: Column, replacement: Column): Column = withExpr {
RegExpReplace(e.expr, pattern.expr, replacement.expr)
}
/**
* Decodes a BASE64 encoded string column and returns it as a binary column.
* This is the reverse of base64.
*
* @group string_funcs
* @since 1.5.0
*/
def unbase64(e: Column): Column = withExpr { UnBase64(e.expr) }
/**
* Right-pad the string column with pad to a length of len. If the string column is longer
* than len, the return value is shortened to len characters.
*
* @group string_funcs
* @since 1.5.0
*/
def rpad(str: Column, len: Int, pad: String): Column = withExpr {
StringRPad(str.expr, lit(len).expr, lit(pad).expr)
}
/**
* Repeats a string column n times, and returns it as a new string column.
*
* @group string_funcs
* @since 1.5.0
*/
def repeat(str: Column, n: Int): Column = withExpr {
StringRepeat(str.expr, lit(n).expr)
}
/**
* Trim the spaces from right end for the specified string value.
*
* @group string_funcs
* @since 1.5.0
*/
def rtrim(e: Column): Column = withExpr { StringTrimRight(e.expr) }
/**
* Trim the specified character string from right end for the specified string column.
* @group string_funcs
* @since 2.3.0
*/
def rtrim(e: Column, trimString: String): Column = withExpr {
StringTrimRight(e.expr, Literal(trimString))
}
/**
* Returns the soundex code for the specified expression.
*
* @group string_funcs
* @since 1.5.0
*/
def soundex(e: Column): Column = withExpr { SoundEx(e.expr) }
/**
* Splits str around matches of the given pattern.
*
* @param str a string expression to split
* @param pattern a string representing a regular expression. The regex string should be
* a Java regular expression.
*
* @group string_funcs
* @since 1.5.0
*/
def split(str: Column, pattern: String): Column = withExpr {
StringSplit(str.expr, Literal(pattern), Literal(-1))
}
/**
* Splits str around matches of the given pattern.
*
* @param str a string expression to split
* @param pattern a string representing a regular expression. The regex string should be
* a Java regular expression.
* @param limit an integer expression which controls the number of times the regex is applied.
* <ul>
* <li>limit greater than 0: The resulting array's length will not be more than limit,
* and the resulting array's last entry will contain all input beyond the last
* matched regex.</li>
* <li>limit less than or equal to 0: `regex` will be applied as many times as
* possible, and the resulting array can be of any size.</li>
* </ul>
*
* @group string_funcs
* @since 3.0.0
*/
def split(str: Column, pattern: String, limit: Int): Column = withExpr {
StringSplit(str.expr, Literal(pattern), Literal(limit))
}
/**
* Substring starts at `pos` and is of length `len` when str is String type or
* returns the slice of byte array that starts at `pos` in byte and is of length `len`
* when str is Binary type
*
* @note The position is not zero based, but 1 based index.
*
* @group string_funcs
* @since 1.5.0
*/
def substring(str: Column, pos: Int, len: Int): Column = withExpr {
Substring(str.expr, lit(pos).expr, lit(len).expr)
}
/**
* Returns the substring from string str before count occurrences of the delimiter delim.
* If count is positive, everything the left of the final delimiter (counting from left) is
* returned. If count is negative, every to the right of the final delimiter (counting from the
* right) is returned. substring_index performs a case-sensitive match when searching for delim.
*
* @group string_funcs
*/
def substring_index(str: Column, delim: String, count: Int): Column = withExpr {
SubstringIndex(str.expr, lit(delim).expr, lit(count).expr)
}
/**
* Overlay the specified portion of `src` with `replace`,
* starting from byte position `pos` of `src` and proceeding for `len` bytes.
*
* @group string_funcs
* @since 3.0.0
*/
def overlay(src: Column, replace: Column, pos: Column, len: Column): Column = withExpr {
Overlay(src.expr, replace.expr, pos.expr, len.expr)
}
/**
* Overlay the specified portion of `src` with `replace`,
* starting from byte position `pos` of `src`.
*
* @group string_funcs
* @since 3.0.0
*/
def overlay(src: Column, replace: Column, pos: Column): Column = withExpr {
new Overlay(src.expr, replace.expr, pos.expr)
}
/**
* Translate any character in the src by a character in replaceString.
* The characters in replaceString correspond to the characters in matchingString.
* The translate will happen when any character in the string matches the character
* in the `matchingString`.
*
* @group string_funcs
* @since 1.5.0
*/
def translate(src: Column, matchingString: String, replaceString: String): Column = withExpr {
StringTranslate(src.expr, lit(matchingString).expr, lit(replaceString).expr)
}
/**
* Trim the spaces from both ends for the specified string column.
*
* @group string_funcs
* @since 1.5.0
*/
def trim(e: Column): Column = withExpr { StringTrim(e.expr) }
/**
* Trim the specified character from both ends for the specified string column.
* @group string_funcs
* @since 2.3.0
*/
def trim(e: Column, trimString: String): Column = withExpr {
StringTrim(e.expr, Literal(trimString))
}
/**
* Converts a string column to upper case.
*
* @group string_funcs
* @since 1.3.0
*/
def upper(e: Column): Column = withExpr { Upper(e.expr) }
//////////////////////////////////////////////////////////////////////////////////////////////
// DateTime functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Returns the date that is `numMonths` after `startDate`.
*
* @param startDate A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param numMonths The number of months to add to `startDate`, can be negative to subtract months
* @return A date, or null if `startDate` was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def add_months(startDate: Column, numMonths: Int): Column = add_months(startDate, lit(numMonths))
/**
* Returns the date that is `numMonths` after `startDate`.
*
* @param startDate A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param numMonths A column of the number of months to add to `startDate`, can be negative to
* subtract months
* @return A date, or null if `startDate` was a string that could not be cast to a date
* @group datetime_funcs
* @since 3.0.0
*/
def add_months(startDate: Column, numMonths: Column): Column = withExpr {
AddMonths(startDate.expr, numMonths.expr)
}
/**
* Returns the current date as a date column.
*
* @group datetime_funcs
* @since 1.5.0
*/
def current_date(): Column = withExpr { CurrentDate() }
/**
* Returns the current timestamp as a timestamp column.
*
* @group datetime_funcs
* @since 1.5.0
*/
def current_timestamp(): Column = withExpr { CurrentTimestamp() }
/**
* Converts a date/timestamp/string to a value of string in the format specified by the date
* format given by the second argument.
*
* See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>
* for valid date and time format patterns
*
* @param dateExpr A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param format A pattern `dd.MM.yyyy` would return a string like `18.03.1993`
* @return A string, or null if `dateExpr` was a string that could not be cast to a timestamp
* @note Use specialized functions like [[year]] whenever possible as they benefit from a
* specialized implementation.
* @throws IllegalArgumentException if the `format` pattern is invalid
* @group datetime_funcs
* @since 1.5.0
*/
def date_format(dateExpr: Column, format: String): Column = withExpr {
DateFormatClass(dateExpr.expr, Literal(format))
}
/**
* Returns the date that is `days` days after `start`
*
* @param start A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param days The number of days to add to `start`, can be negative to subtract days
* @return A date, or null if `start` was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def date_add(start: Column, days: Int): Column = date_add(start, lit(days))
/**
* Returns the date that is `days` days after `start`
*
* @param start A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param days A column of the number of days to add to `start`, can be negative to subtract days
* @return A date, or null if `start` was a string that could not be cast to a date
* @group datetime_funcs
* @since 3.0.0
*/
def date_add(start: Column, days: Column): Column = withExpr { DateAdd(start.expr, days.expr) }
/**
* Returns the date that is `days` days before `start`
*
* @param start A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param days The number of days to subtract from `start`, can be negative to add days
* @return A date, or null if `start` was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def date_sub(start: Column, days: Int): Column = date_sub(start, lit(days))
/**
* Returns the date that is `days` days before `start`
*
* @param start A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param days A column of the number of days to subtract from `start`, can be negative to add
* days
* @return A date, or null if `start` was a string that could not be cast to a date
* @group datetime_funcs
* @since 3.0.0
*/
def date_sub(start: Column, days: Column): Column = withExpr { DateSub(start.expr, days.expr) }
/**
* Returns the number of days from `start` to `end`.
*
* Only considers the date part of the input. For example:
* {{{
* dateddiff("2018-01-10 00:00:00", "2018-01-09 23:59:59")
* // returns 1
* }}}
*
* @param end A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param start A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @return An integer, or null if either `end` or `start` were strings that could not be cast to
* a date. Negative if `end` is before `start`
* @group datetime_funcs
* @since 1.5.0
*/
def datediff(end: Column, start: Column): Column = withExpr { DateDiff(end.expr, start.expr) }
/**
* Extracts the year as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def year(e: Column): Column = withExpr { Year(e.expr) }
/**
* Extracts the quarter as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def quarter(e: Column): Column = withExpr { Quarter(e.expr) }
/**
* Extracts the month as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def month(e: Column): Column = withExpr { Month(e.expr) }
/**
* Extracts the day of the week as an integer from a given date/timestamp/string.
* Ranges from 1 for a Sunday through to 7 for a Saturday
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 2.3.0
*/
def dayofweek(e: Column): Column = withExpr { DayOfWeek(e.expr) }
/**
* Extracts the day of the month as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def dayofmonth(e: Column): Column = withExpr { DayOfMonth(e.expr) }
/**
* Extracts the day of the year as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def dayofyear(e: Column): Column = withExpr { DayOfYear(e.expr) }
/**
* Extracts the hours as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def hour(e: Column): Column = withExpr { Hour(e.expr) }
/**
* Returns the last day of the month which the given date belongs to.
* For example, input "2015-07-27" returns "2015-07-31" since July 31 is the last day of the
* month in July 2015.
*
* @param e A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @return A date, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def last_day(e: Column): Column = withExpr { LastDay(e.expr) }
/**
* Extracts the minutes as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def minute(e: Column): Column = withExpr { Minute(e.expr) }
/**
* Returns number of months between dates `start` and `end`.
*
* A whole number is returned if both inputs have the same day of month or both are the last day
* of their respective months. Otherwise, the difference is calculated assuming 31 days per month.
*
* For example:
* {{{
* months_between("2017-11-14", "2017-07-14") // returns 4.0
* months_between("2017-01-01", "2017-01-10") // returns 0.29032258
* months_between("2017-06-01", "2017-06-16 12:00:00") // returns -0.5
* }}}
*
* @param end A date, timestamp or string. If a string, the data must be in a format that can
* be cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param start A date, timestamp or string. If a string, the data must be in a format that can
* cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @return A double, or null if either `end` or `start` were strings that could not be cast to a
* timestamp. Negative if `end` is before `start`
* @group datetime_funcs
* @since 1.5.0
*/
def months_between(end: Column, start: Column): Column = withExpr {
new MonthsBetween(end.expr, start.expr)
}
/**
* Returns number of months between dates `end` and `start`. If `roundOff` is set to true, the
* result is rounded off to 8 digits; it is not rounded otherwise.
* @group datetime_funcs
* @since 2.4.0
*/
def months_between(end: Column, start: Column, roundOff: Boolean): Column = withExpr {
MonthsBetween(end.expr, start.expr, lit(roundOff).expr)
}
/**
* Returns the first date which is later than the value of the `date` column that is on the
* specified day of the week.
*
* For example, `next_day('2015-07-27', "Sunday")` returns 2015-08-02 because that is the first
* Sunday after 2015-07-27.
*
* @param date A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param dayOfWeek Case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"
* @return A date, or null if `date` was a string that could not be cast to a date or if
* `dayOfWeek` was an invalid value
* @group datetime_funcs
* @since 1.5.0
*/
def next_day(date: Column, dayOfWeek: String): Column = withExpr {
NextDay(date.expr, lit(dayOfWeek).expr)
}
/**
* Extracts the seconds as an integer from a given date/timestamp/string.
* @return An integer, or null if the input was a string that could not be cast to a timestamp
* @group datetime_funcs
* @since 1.5.0
*/
def second(e: Column): Column = withExpr { Second(e.expr) }
/**
* Extracts the week number as an integer from a given date/timestamp/string.
*
* A week is considered to start on a Monday and week 1 is the first week with more than 3 days,
* as defined by ISO 8601
*
* @return An integer, or null if the input was a string that could not be cast to a date
* @group datetime_funcs
* @since 1.5.0
*/
def weekofyear(e: Column): Column = withExpr { WeekOfYear(e.expr) }
/**
* Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
* representing the timestamp of that moment in the current system time zone in the
* yyyy-MM-dd HH:mm:ss format.
*
* @param ut A number of a type that is castable to a long, such as string or integer. Can be
* negative for timestamps before the unix epoch
* @return A string, or null if the input was a string that could not be cast to a long
* @group datetime_funcs
* @since 1.5.0
*/
def from_unixtime(ut: Column): Column = withExpr {
FromUnixTime(ut.expr, Literal(TimestampFormatter.defaultPattern))
}
/**
* Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
* representing the timestamp of that moment in the current system time zone in the given
* format.
*
* See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>
* for valid date and time format patterns
*
* @param ut A number of a type that is castable to a long, such as string or integer. Can be
* negative for timestamps before the unix epoch
* @param f A date time pattern that the input will be formatted to
* @return A string, or null if `ut` was a string that could not be cast to a long or `f` was
* an invalid date time pattern
* @group datetime_funcs
* @since 1.5.0
*/
def from_unixtime(ut: Column, f: String): Column = withExpr {
FromUnixTime(ut.expr, Literal(f))
}
/**
* Returns the current Unix timestamp (in seconds) as a long.
*
* @note All calls of `unix_timestamp` within the same query return the same value
* (i.e. the current timestamp is calculated at the start of query evaluation).
*
* @group datetime_funcs
* @since 1.5.0
*/
def unix_timestamp(): Column = withExpr {
UnixTimestamp(CurrentTimestamp(), Literal(TimestampFormatter.defaultPattern))
}
/**
* Converts time string in format yyyy-MM-dd HH:mm:ss to Unix timestamp (in seconds),
* using the default timezone and the default locale.
*
* @param s A date, timestamp or string. If a string, the data must be in the
* `yyyy-MM-dd HH:mm:ss` format
* @return A long, or null if the input was a string not of the correct format
* @group datetime_funcs
* @since 1.5.0
*/
def unix_timestamp(s: Column): Column = withExpr {
UnixTimestamp(s.expr, Literal(TimestampFormatter.defaultPattern))
}
/**
* Converts time string with given pattern to Unix timestamp (in seconds).
*
* See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>
* for valid date and time format patterns
*
* @param s A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param p A date time pattern detailing the format of `s` when `s` is a string
* @return A long, or null if `s` was a string that could not be cast to a date or `p` was
* an invalid format
* @group datetime_funcs
* @since 1.5.0
*/
def unix_timestamp(s: Column, p: String): Column = withExpr { UnixTimestamp(s.expr, Literal(p)) }
/**
* Converts to a timestamp by casting rules to `TimestampType`.
*
* @param s A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @return A timestamp, or null if the input was a string that could not be cast to a timestamp
* @group datetime_funcs
* @since 2.2.0
*/
def to_timestamp(s: Column): Column = withExpr {
new ParseToTimestamp(s.expr)
}
/**
* Converts time string with the given pattern to timestamp.
*
* See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>
* for valid date and time format patterns
*
* @param s A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param fmt A date time pattern detailing the format of `s` when `s` is a string
* @return A timestamp, or null if `s` was a string that could not be cast to a timestamp or
* `fmt` was an invalid format
* @group datetime_funcs
* @since 2.2.0
*/
def to_timestamp(s: Column, fmt: String): Column = withExpr {
new ParseToTimestamp(s.expr, Literal(fmt))
}
/**
* Converts the column into `DateType` by casting rules to `DateType`.
*
* @group datetime_funcs
* @since 1.5.0
*/
def to_date(e: Column): Column = withExpr { new ParseToDate(e.expr) }
/**
* Converts the column into a `DateType` with a specified format
*
* See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>
* for valid date and time format patterns
*
* @param e A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param fmt A date time pattern detailing the format of `e` when `e`is a string
* @return A date, or null if `e` was a string that could not be cast to a date or `fmt` was an
* invalid format
* @group datetime_funcs
* @since 2.2.0
*/
def to_date(e: Column, fmt: String): Column = withExpr {
new ParseToDate(e.expr, Literal(fmt))
}
/**
* Returns date truncated to the unit specified by the format.
*
* For example, `trunc("2018-11-19 12:01:19", "year")` returns 2018-01-01
*
* @param date A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param format: 'year', 'yyyy', 'yy' to truncate by year,
* or 'month', 'mon', 'mm' to truncate by month
* Other options are: 'week', 'quarter'
*
* @return A date, or null if `date` was a string that could not be cast to a date or `format`
* was an invalid value
* @group datetime_funcs
* @since 1.5.0
*/
def trunc(date: Column, format: String): Column = withExpr {
TruncDate(date.expr, Literal(format))
}
/**
* Returns timestamp truncated to the unit specified by the format.
*
* For example, `date_trunc("year", "2018-11-19 12:01:19")` returns 2018-01-01 00:00:00
*
* @param format: 'year', 'yyyy', 'yy' to truncate by year,
* 'month', 'mon', 'mm' to truncate by month,
* 'day', 'dd' to truncate by day,
* Other options are:
* 'microsecond', 'millisecond', 'second', 'minute', 'hour', 'week', 'quarter'
* @param timestamp A date, timestamp or string. If a string, the data must be in a format that
* can be cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @return A timestamp, or null if `timestamp` was a string that could not be cast to a timestamp
* or `format` was an invalid value
* @group datetime_funcs
* @since 2.3.0
*/
def date_trunc(format: String, timestamp: Column): Column = withExpr {
TruncTimestamp(Literal(format), timestamp.expr)
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders
* that time as a timestamp in the given time zone. For example, 'GMT+1' would yield
* '2017-07-14 03:40:00.0'.
*
* @param ts A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param tz A string detailing the time zone ID that the input should be adjusted to. It should
* be in the format of either region-based zone IDs or zone offsets. Region IDs must
* have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
* the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
* supported as aliases of '+00:00'. Other short names are not recommended to use
* because they can be ambiguous.
* @return A timestamp, or null if `ts` was a string that could not be cast to a timestamp or
* `tz` was an invalid value
* @group datetime_funcs
* @since 1.5.0
*/
def from_utc_timestamp(ts: Column, tz: String): Column = withExpr {
FromUTCTimestamp(ts.expr, Literal(tz))
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders
* that time as a timestamp in the given time zone. For example, 'GMT+1' would yield
* '2017-07-14 03:40:00.0'.
* @group datetime_funcs
* @since 2.4.0
*/
def from_utc_timestamp(ts: Column, tz: Column): Column = withExpr {
FromUTCTimestamp(ts.expr, tz.expr)
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time
* zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield
* '2017-07-14 01:40:00.0'.
*
* @param ts A date, timestamp or string. If a string, the data must be in a format that can be
* cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS`
* @param tz A string detailing the time zone ID that the input should be adjusted to. It should
* be in the format of either region-based zone IDs or zone offsets. Region IDs must
* have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
* the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
* supported as aliases of '+00:00'. Other short names are not recommended to use
* because they can be ambiguous.
* @return A timestamp, or null if `ts` was a string that could not be cast to a timestamp or
* `tz` was an invalid value
* @group datetime_funcs
* @since 1.5.0
*/
def to_utc_timestamp(ts: Column, tz: String): Column = withExpr {
ToUTCTimestamp(ts.expr, Literal(tz))
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time
* zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield
* '2017-07-14 01:40:00.0'.
* @group datetime_funcs
* @since 2.4.0
*/
def to_utc_timestamp(ts: Column, tz: Column): Column = withExpr {
ToUTCTimestamp(ts.expr, tz.expr)
}
/**
* Bucketize rows into one or more time windows given a timestamp specifying column. Window
* starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
* [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
* the order of months are not supported. The following example takes the average stock price for
* a one minute window every 10 seconds starting 5 seconds after the hour:
*
* {{{
* val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType
* df.groupBy(window($"time", "1 minute", "10 seconds", "5 seconds"), $"stockId")
* .agg(mean("price"))
* }}}
*
* The windows will look like:
*
* {{{
* 09:00:05-09:01:05
* 09:00:15-09:01:15
* 09:00:25-09:01:25 ...
* }}}
*
* For a streaming query, you may use the function `current_timestamp` to generate windows on
* processing time.
*
* @param timeColumn The column or the expression to use as the timestamp for windowing by time.
* The time column must be of TimestampType.
* @param windowDuration A string specifying the width of the window, e.g. `10 minutes`,
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers. Note that the duration is a fixed length of
* time, and does not vary over time according to a calendar. For example,
* `1 day` always means 86,400,000 milliseconds, not a calendar day.
* @param slideDuration A string specifying the sliding interval of the window, e.g. `1 minute`.
* A new window will be generated every `slideDuration`. Must be less than
* or equal to the `windowDuration`. Check
* `org.apache.spark.unsafe.types.CalendarInterval` for valid duration
* identifiers. This duration is likewise absolute, and does not vary
* according to a calendar.
* @param startTime The offset with respect to 1970-01-01 00:00:00 UTC with which to start
* window intervals. For example, in order to have hourly tumbling windows that
* start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide
* `startTime` as `15 minutes`.
*
* @group datetime_funcs
* @since 2.0.0
*/
def window(
timeColumn: Column,
windowDuration: String,
slideDuration: String,
startTime: String): Column = {
withExpr {
TimeWindow(timeColumn.expr, windowDuration, slideDuration, startTime)
}.as("window")
}
/**
* Bucketize rows into one or more time windows given a timestamp specifying column. Window
* starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
* [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
* the order of months are not supported. The windows start beginning at 1970-01-01 00:00:00 UTC.
* The following example takes the average stock price for a one minute window every 10 seconds:
*
* {{{
* val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType
* df.groupBy(window($"time", "1 minute", "10 seconds"), $"stockId")
* .agg(mean("price"))
* }}}
*
* The windows will look like:
*
* {{{
* 09:00:00-09:01:00
* 09:00:10-09:01:10
* 09:00:20-09:01:20 ...
* }}}
*
* For a streaming query, you may use the function `current_timestamp` to generate windows on
* processing time.
*
* @param timeColumn The column or the expression to use as the timestamp for windowing by time.
* The time column must be of TimestampType.
* @param windowDuration A string specifying the width of the window, e.g. `10 minutes`,
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers. Note that the duration is a fixed length of
* time, and does not vary over time according to a calendar. For example,
* `1 day` always means 86,400,000 milliseconds, not a calendar day.
* @param slideDuration A string specifying the sliding interval of the window, e.g. `1 minute`.
* A new window will be generated every `slideDuration`. Must be less than
* or equal to the `windowDuration`. Check
* `org.apache.spark.unsafe.types.CalendarInterval` for valid duration
* identifiers. This duration is likewise absolute, and does not vary
* according to a calendar.
*
* @group datetime_funcs
* @since 2.0.0
*/
def window(timeColumn: Column, windowDuration: String, slideDuration: String): Column = {
window(timeColumn, windowDuration, slideDuration, "0 second")
}
/**
* Generates tumbling time windows given a timestamp specifying column. Window
* starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
* [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
* the order of months are not supported. The windows start beginning at 1970-01-01 00:00:00 UTC.
* The following example takes the average stock price for a one minute tumbling window:
*
* {{{
* val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType
* df.groupBy(window($"time", "1 minute"), $"stockId")
* .agg(mean("price"))
* }}}
*
* The windows will look like:
*
* {{{
* 09:00:00-09:01:00
* 09:01:00-09:02:00
* 09:02:00-09:03:00 ...
* }}}
*
* For a streaming query, you may use the function `current_timestamp` to generate windows on
* processing time.
*
* @param timeColumn The column or the expression to use as the timestamp for windowing by time.
* The time column must be of TimestampType.
* @param windowDuration A string specifying the width of the window, e.g. `10 minutes`,
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers.
*
* @group datetime_funcs
* @since 2.0.0
*/
def window(timeColumn: Column, windowDuration: String): Column = {
window(timeColumn, windowDuration, windowDuration, "0 second")
}
/**
* Creates timestamp from the number of seconds since UTC epoch.
* @group datetime_funcs
* @since 3.1.0
*/
def timestamp_seconds(e: Column): Column = withExpr {
SecondsToTimestamp(e.expr)
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Collection functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Returns null if the array is null, true if the array contains `value`, and false otherwise.
* @group collection_funcs
* @since 1.5.0
*/
def array_contains(column: Column, value: Any): Column = withExpr {
ArrayContains(column.expr, lit(value).expr)
}
/**
* Returns `true` if `a1` and `a2` have at least one non-null element in common. If not and both
* the arrays are non-empty and any of them contains a `null`, it returns `null`. It returns
* `false` otherwise.
* @group collection_funcs
* @since 2.4.0
*/
def arrays_overlap(a1: Column, a2: Column): Column = withExpr {
ArraysOverlap(a1.expr, a2.expr)
}
/**
* Returns an array containing all the elements in `x` from index `start` (or starting from the
* end if `start` is negative) with the specified `length`.
*
* @param x the array column to be sliced
* @param start the starting index
* @param length the length of the slice
*
* @group collection_funcs
* @since 2.4.0
*/
def slice(x: Column, start: Int, length: Int): Column = withExpr {
Slice(x.expr, Literal(start), Literal(length))
}
/**
* Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
* `nullReplacement`.
* @group collection_funcs
* @since 2.4.0
*/
def array_join(column: Column, delimiter: String, nullReplacement: String): Column = withExpr {
ArrayJoin(column.expr, Literal(delimiter), Some(Literal(nullReplacement)))
}
/**
* Concatenates the elements of `column` using the `delimiter`.
* @group collection_funcs
* @since 2.4.0
*/
def array_join(column: Column, delimiter: String): Column = withExpr {
ArrayJoin(column.expr, Literal(delimiter), None)
}
/**
* Concatenates multiple input columns together into a single column.
* The function works with strings, binary and compatible array columns.
*
* @group collection_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def concat(exprs: Column*): Column = withExpr { Concat(exprs.map(_.expr)) }
/**
* Locates the position of the first occurrence of the value in the given array as long.
* Returns null if either of the arguments are null.
*
* @note The position is not zero based, but 1 based index. Returns 0 if value
* could not be found in array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_position(column: Column, value: Any): Column = withExpr {
ArrayPosition(column.expr, lit(value).expr)
}
/**
* Returns element of array at given index in value if column is array. Returns value for
* the given key in value if column is map.
*
* @group collection_funcs
* @since 2.4.0
*/
def element_at(column: Column, value: Any): Column = withExpr {
ElementAt(column.expr, lit(value).expr)
}
/**
* Sorts the input array in ascending order. The elements of the input array must be orderable.
* Null elements will be placed at the end of the returned array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_sort(e: Column): Column = withExpr { new ArraySort(e.expr) }
/**
* Remove all elements that equal to element from the given array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_remove(column: Column, element: Any): Column = withExpr {
ArrayRemove(column.expr, lit(element).expr)
}
/**
* Removes duplicate values from the array.
* @group collection_funcs
* @since 2.4.0
*/
def array_distinct(e: Column): Column = withExpr { ArrayDistinct(e.expr) }
/**
* Returns an array of the elements in the intersection of the given two arrays,
* without duplicates.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_intersect(col1: Column, col2: Column): Column = withExpr {
ArrayIntersect(col1.expr, col2.expr)
}
/**
* Returns an array of the elements in the union of the given two arrays, without duplicates.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_union(col1: Column, col2: Column): Column = withExpr {
ArrayUnion(col1.expr, col2.expr)
}
/**
* Returns an array of the elements in the first array but not in the second array,
* without duplicates. The order of elements in the result is not determined
*
* @group collection_funcs
* @since 2.4.0
*/
def array_except(col1: Column, col2: Column): Column = withExpr {
ArrayExcept(col1.expr, col2.expr)
}
private def createLambda(f: Column => Column) = {
val x = UnresolvedNamedLambdaVariable(Seq("x"))
val function = f(Column(x)).expr
LambdaFunction(function, Seq(x))
}
private def createLambda(f: (Column, Column) => Column) = {
val x = UnresolvedNamedLambdaVariable(Seq("x"))
val y = UnresolvedNamedLambdaVariable(Seq("y"))
val function = f(Column(x), Column(y)).expr
LambdaFunction(function, Seq(x, y))
}
private def createLambda(f: (Column, Column, Column) => Column) = {
val x = UnresolvedNamedLambdaVariable(Seq("x"))
val y = UnresolvedNamedLambdaVariable(Seq("y"))
val z = UnresolvedNamedLambdaVariable(Seq("z"))
val function = f(Column(x), Column(y), Column(z)).expr
LambdaFunction(function, Seq(x, y, z))
}
/**
* Returns an array of elements after applying a transformation to each element
* in the input array.
* {{{
* df.select(transform(col("i"), x => x + 1))
* }}}
*
* @param column the input array column
* @param f col => transformed_col, the lambda function to transform the input column
*
* @group collection_funcs
* @since 3.0.0
*/
def transform(column: Column, f: Column => Column): Column = withExpr {
ArrayTransform(column.expr, createLambda(f))
}
/**
* Returns an array of elements after applying a transformation to each element
* in the input array.
* {{{
* df.select(transform(col("i"), (x, i) => x + i))
* }}}
*
* @param column the input array column
* @param f (col, index) => transformed_col, the lambda function to filter the input column
* given the index. Indices start at 0.
*
* @group collection_funcs
* @since 3.0.0
*/
def transform(column: Column, f: (Column, Column) => Column): Column = withExpr {
ArrayTransform(column.expr, createLambda(f))
}
/**
* Returns whether a predicate holds for one or more elements in the array.
* {{{
* df.select(exists(col("i"), _ % 2 === 0))
* }}}
*
* @param column the input array column
* @param f col => predicate, the Boolean predicate to check the input column
*
* @group collection_funcs
* @since 3.0.0
*/
def exists(column: Column, f: Column => Column): Column = withExpr {
ArrayExists(column.expr, createLambda(f))
}
/**
* Returns whether a predicate holds for every element in the array.
* {{{
* df.select(forall(col("i"), x => x % 2 === 0))
* }}}
*
* @param column the input array column
* @param f col => predicate, the Boolean predicate to check the input column
*
* @group collection_funcs
* @since 3.0.0
*/
def forall(column: Column, f: Column => Column): Column = withExpr {
ArrayForAll(column.expr, createLambda(f))
}
/**
* Returns an array of elements for which a predicate holds in a given array.
* {{{
* df.select(filter(col("s"), x => x % 2 === 0))
* }}}
*
* @param column the input array column
* @param f col => predicate, the Boolean predicate to filter the input column
*
* @group collection_funcs
* @since 3.0.0
*/
def filter(column: Column, f: Column => Column): Column = withExpr {
ArrayFilter(column.expr, createLambda(f))
}
/**
* Returns an array of elements for which a predicate holds in a given array.
* {{{
* df.select(filter(col("s"), (x, i) => i % 2 === 0))
* }}}
*
* @param column the input array column
* @param f (col, index) => predicate, the Boolean predicate to filter the input column
* given the index. Indices start at 0.
*
* @group collection_funcs
* @since 3.0.0
*/
def filter(column: Column, f: (Column, Column) => Column): Column = withExpr {
ArrayFilter(column.expr, createLambda(f))
}
/**
* Applies a binary operator to an initial state and all elements in the array,
* and reduces this to a single state. The final state is converted into the final result
* by applying a finish function.
* {{{
* df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x, _ * 10))
* }}}
*
* @param expr the input array column
* @param initialValue the initial value
* @param merge (combined_value, input_value) => combined_value, the merge function to merge
* an input value to the combined_value
* @param finish combined_value => final_value, the lambda function to convert the combined value
* of all inputs to final result
*
* @group collection_funcs
* @since 3.0.0
*/
def aggregate(
expr: Column,
initialValue: Column,
merge: (Column, Column) => Column,
finish: Column => Column): Column = withExpr {
ArrayAggregate(
expr.expr,
initialValue.expr,
createLambda(merge),
createLambda(finish)
)
}
/**
* Applies a binary operator to an initial state and all elements in the array,
* and reduces this to a single state.
* {{{
* df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x))
* }}}
*
* @param expr the input array column
* @param initialValue the initial value
* @param merge (combined_value, input_value) => combined_value, the merge function to merge
* an input value to the combined_value
* @group collection_funcs
* @since 3.0.0
*/
def aggregate(expr: Column, initialValue: Column, merge: (Column, Column) => Column): Column =
aggregate(expr, initialValue, merge, c => c)
/**
* Merge two given arrays, element-wise, into a single array using a function.
* If one array is shorter, nulls are appended at the end to match the length of the longer
* array, before applying the function.
* {{{
* df.select(zip_with(df1("val1"), df1("val2"), (x, y) => x + y))
* }}}
*
* @param left the left input array column
* @param right the right input array column
* @param f (lCol, rCol) => col, the lambda function to merge two input columns into one column
*
* @group collection_funcs
* @since 3.0.0
*/
def zip_with(left: Column, right: Column, f: (Column, Column) => Column): Column = withExpr {
ZipWith(left.expr, right.expr, createLambda(f))
}
/**
* Applies a function to every key-value pair in a map and returns
* a map with the results of those applications as the new keys for the pairs.
* {{{
* df.select(transform_keys(col("i"), (k, v) => k + v))
* }}}
*
* @param expr the input map column
* @param f (key, value) => new_key, the lambda function to transform the key of input map column
*
* @group collection_funcs
* @since 3.0.0
*/
def transform_keys(expr: Column, f: (Column, Column) => Column): Column = withExpr {
TransformKeys(expr.expr, createLambda(f))
}
/**
* Applies a function to every key-value pair in a map and returns
* a map with the results of those applications as the new values for the pairs.
* {{{
* df.select(transform_values(col("i"), (k, v) => k + v))
* }}}
*
* @param expr the input map column
* @param f (key, value) => new_value, the lambda function to transform the value of input map
* column
*
* @group collection_funcs
* @since 3.0.0
*/
def transform_values(expr: Column, f: (Column, Column) => Column): Column = withExpr {
TransformValues(expr.expr, createLambda(f))
}
/**
* Returns a map whose key-value pairs satisfy a predicate.
* {{{
* df.select(map_filter(col("m"), (k, v) => k * 10 === v))
* }}}
*
* @param expr the input map column
* @param f (key, value) => predicate, the Boolean predicate to filter the input map column
*
* @group collection_funcs
* @since 3.0.0
*/
def map_filter(expr: Column, f: (Column, Column) => Column): Column = withExpr {
MapFilter(expr.expr, createLambda(f))
}
/**
* Merge two given maps, key-wise into a single map using a function.
* {{{
* df.select(map_zip_with(df("m1"), df("m2"), (k, v1, v2) => k === v1 + v2))
* }}}
*
* @param left the left input map column
* @param right the right input map column
* @param f (key, value1, value2) => new_value, the lambda function to merge the map values
*
* @group collection_funcs
* @since 3.0.0
*/
def map_zip_with(
left: Column,
right: Column,
f: (Column, Column, Column) => Column): Column = withExpr {
MapZipWith(left.expr, right.expr, createLambda(f))
}
/**
* Creates a new row for each element in the given array or map column.
* Uses the default column name `col` for elements in the array and
* `key` and `value` for elements in the map unless specified otherwise.
*
* @group collection_funcs
* @since 1.3.0
*/
def explode(e: Column): Column = withExpr { Explode(e.expr) }
/**
* Creates a new row for each element in the given array or map column.
* Uses the default column name `col` for elements in the array and
* `key` and `value` for elements in the map unless specified otherwise.
* Unlike explode, if the array/map is null or empty then null is produced.
*
* @group collection_funcs
* @since 2.2.0
*/
def explode_outer(e: Column): Column = withExpr { GeneratorOuter(Explode(e.expr)) }
/**
* Creates a new row for each element with position in the given array or map column.
* Uses the default column name `pos` for position, and `col` for elements in the array
* and `key` and `value` for elements in the map unless specified otherwise.
*
* @group collection_funcs
* @since 2.1.0
*/
def posexplode(e: Column): Column = withExpr { PosExplode(e.expr) }
/**
* Creates a new row for each element with position in the given array or map column.
* Uses the default column name `pos` for position, and `col` for elements in the array
* and `key` and `value` for elements in the map unless specified otherwise.
* Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
*
* @group collection_funcs
* @since 2.2.0
*/
def posexplode_outer(e: Column): Column = withExpr { GeneratorOuter(PosExplode(e.expr)) }
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*
* @group collection_funcs
* @since 1.6.0
*/
def get_json_object(e: Column, path: String): Column = withExpr {
GetJsonObject(e.expr, lit(path).expr)
}
/**
* Creates a new row for a json column according to the given field names.
*
* @group collection_funcs
* @since 1.6.0
*/
@scala.annotation.varargs
def json_tuple(json: Column, fields: String*): Column = withExpr {
require(fields.nonEmpty, "at least 1 field name should be given.")
JsonTuple(json.expr +: fields.map(Literal.apply))
}
/**
* (Scala-specific) Parses a column containing a JSON string into a `StructType` with the
* specified schema. Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. Accepts the same options as the
* json data source.
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: StructType, options: Map[String, String]): Column =
from_json(e, schema.asInstanceOf[DataType], options)
/**
* (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.2.0
*/
def from_json(e: Column, schema: DataType, options: Map[String, String]): Column = withExpr {
JsonToStructs(schema, options, e.expr)
}
/**
* (Java-specific) Parses a column containing a JSON string into a `StructType` with the
* specified schema. Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: StructType, options: java.util.Map[String, String]): Column =
from_json(e, schema, options.asScala.toMap)
/**
* (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.2.0
*/
def from_json(e: Column, schema: DataType, options: java.util.Map[String, String]): Column =
from_json(e, schema, options.asScala.toMap)
/**
* Parses a column containing a JSON string into a `StructType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: StructType): Column =
from_json(e, schema, Map.empty[String, String])
/**
* Parses a column containing a JSON string into a `MapType` with `StringType` as keys type,
* `StructType` or `ArrayType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
*
* @group collection_funcs
* @since 2.2.0
*/
def from_json(e: Column, schema: DataType): Column =
from_json(e, schema, Map.empty[String, String])
/**
* (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string as a json string. In Spark 2.1,
* the user-provided schema has to be in JSON format. Since Spark 2.2, the DDL
* format is also supported for the schema.
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: String, options: java.util.Map[String, String]): Column = {
from_json(e, schema, options.asScala.toMap)
}
/**
* (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string as a json string, it could be a
* JSON format string or a DDL-formatted string.
*
* @group collection_funcs
* @since 2.3.0
*/
def from_json(e: Column, schema: String, options: Map[String, String]): Column = {
val dataType = try {
DataType.fromJson(schema)
} catch {
case NonFatal(_) => DataType.fromDDL(schema)
}
from_json(e, dataType, options)
}
/**
* (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
*
* @group collection_funcs
* @since 2.4.0
*/
def from_json(e: Column, schema: Column): Column = {
from_json(e, schema, Map.empty[String, String].asJava)
}
/**
* (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.4.0
*/
def from_json(e: Column, schema: Column, options: java.util.Map[String, String]): Column = {
withExpr(new JsonToStructs(e.expr, schema.expr, options.asScala.toMap))
}
/**
* Parses a JSON string and infers its schema in DDL format.
*
* @param json a JSON string.
*
* @group collection_funcs
* @since 2.4.0
*/
def schema_of_json(json: String): Column = schema_of_json(lit(json))
/**
* Parses a JSON string and infers its schema in DDL format.
*
* @param json a string literal containing a JSON string.
*
* @group collection_funcs
* @since 2.4.0
*/
def schema_of_json(json: Column): Column = withExpr(new SchemaOfJson(json.expr))
/**
* Parses a JSON string and infers its schema in DDL format using options.
*
* @param json a string column containing JSON data.
* @param options options to control how the json is parsed. accepts the same options and the
* json data source. See [[DataFrameReader#json]].
* @return a column with string literal containing schema in DDL format.
*
* @group collection_funcs
* @since 3.0.0
*/
def schema_of_json(json: Column, options: java.util.Map[String, String]): Column = {
withExpr(SchemaOfJson(json.expr, options.asScala.toMap))
}
/**
* (Scala-specific) Converts a column containing a `StructType`, `ArrayType` or
* a `MapType` into a JSON string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct, an array or a map.
* @param options options to control how the struct column is converted into a json string.
* accepts the same options and the json data source.
* Additionally the function supports the `pretty` option which enables
* pretty JSON generation.
*
* @group collection_funcs
* @since 2.1.0
*/
def to_json(e: Column, options: Map[String, String]): Column = withExpr {
StructsToJson(options, e.expr)
}
/**
* (Java-specific) Converts a column containing a `StructType`, `ArrayType` or
* a `MapType` into a JSON string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct, an array or a map.
* @param options options to control how the struct column is converted into a json string.
* accepts the same options and the json data source.
* Additionally the function supports the `pretty` option which enables
* pretty JSON generation.
*
* @group collection_funcs
* @since 2.1.0
*/
def to_json(e: Column, options: java.util.Map[String, String]): Column =
to_json(e, options.asScala.toMap)
/**
* Converts a column containing a `StructType`, `ArrayType` or
* a `MapType` into a JSON string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct, an array or a map.
*
* @group collection_funcs
* @since 2.1.0
*/
def to_json(e: Column): Column =
to_json(e, Map.empty[String, String])
/**
* Returns length of array or map.
*
* The function returns null for null input if spark.sql.legacy.sizeOfNull is set to false or
* spark.sql.ansi.enabled is set to true. Otherwise, the function returns -1 for null input.
* With the default settings, the function returns -1 for null input.
*
* @group collection_funcs
* @since 1.5.0
*/
def size(e: Column): Column = withExpr { Size(e.expr) }
/**
* Sorts the input array for the given column in ascending order,
* according to the natural ordering of the array elements.
* Null elements will be placed at the beginning of the returned array.
*
* @group collection_funcs
* @since 1.5.0
*/
def sort_array(e: Column): Column = sort_array(e, asc = true)
/**
* Sorts the input array for the given column in ascending or descending order,
* according to the natural ordering of the array elements.
* Null elements will be placed at the beginning of the returned array in ascending order or
* at the end of the returned array in descending order.
*
* @group collection_funcs
* @since 1.5.0
*/
def sort_array(e: Column, asc: Boolean): Column = withExpr { SortArray(e.expr, lit(asc).expr) }
/**
* Returns the minimum value in the array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_min(e: Column): Column = withExpr { ArrayMin(e.expr) }
/**
* Returns the maximum value in the array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_max(e: Column): Column = withExpr { ArrayMax(e.expr) }
/**
* Returns a random permutation of the given array.
*
* @note The function is non-deterministic.
*
* @group collection_funcs
* @since 2.4.0
*/
def shuffle(e: Column): Column = withExpr { Shuffle(e.expr) }
/**
* Returns a reversed string or an array with reverse order of elements.
* @group collection_funcs
* @since 1.5.0
*/
def reverse(e: Column): Column = withExpr { Reverse(e.expr) }
/**
* Creates a single array from an array of arrays. If a structure of nested arrays is deeper than
* two levels, only one level of nesting is removed.
* @group collection_funcs
* @since 2.4.0
*/
def flatten(e: Column): Column = withExpr { Flatten(e.expr) }
/**
* Generate a sequence of integers from start to stop, incrementing by step.
*
* @group collection_funcs
* @since 2.4.0
*/
def sequence(start: Column, stop: Column, step: Column): Column = withExpr {
new Sequence(start.expr, stop.expr, step.expr)
}
/**
* Generate a sequence of integers from start to stop,
* incrementing by 1 if start is less than or equal to stop, otherwise -1.
*
* @group collection_funcs
* @since 2.4.0
*/
def sequence(start: Column, stop: Column): Column = withExpr {
new Sequence(start.expr, stop.expr)
}
/**
* Creates an array containing the left argument repeated the number of times given by the
* right argument.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_repeat(left: Column, right: Column): Column = withExpr {
ArrayRepeat(left.expr, right.expr)
}
/**
* Creates an array containing the left argument repeated the number of times given by the
* right argument.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_repeat(e: Column, count: Int): Column = array_repeat(e, lit(count))
/**
* Returns an unordered array containing the keys of the map.
* @group collection_funcs
* @since 2.3.0
*/
def map_keys(e: Column): Column = withExpr { MapKeys(e.expr) }
/**
* Returns an unordered array containing the values of the map.
* @group collection_funcs
* @since 2.3.0
*/
def map_values(e: Column): Column = withExpr { MapValues(e.expr) }
/**
* Returns an unordered array of all entries in the given map.
* @group collection_funcs
* @since 3.0.0
*/
def map_entries(e: Column): Column = withExpr { MapEntries(e.expr) }
/**
* Returns a map created from the given array of entries.
* @group collection_funcs
* @since 2.4.0
*/
def map_from_entries(e: Column): Column = withExpr { MapFromEntries(e.expr) }
/**
* Returns a merged array of structs in which the N-th struct contains all N-th values of input
* arrays.
* @group collection_funcs
* @since 2.4.0
*/
@scala.annotation.varargs
def arrays_zip(e: Column*): Column = withExpr { ArraysZip(e.map(_.expr)) }
/**
* Returns the union of all the given maps.
* @group collection_funcs
* @since 2.4.0
*/
@scala.annotation.varargs
def map_concat(cols: Column*): Column = withExpr { MapConcat(cols.map(_.expr)) }
/**
* Parses a column containing a CSV string into a `StructType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing CSV data.
* @param schema the schema to use when parsing the CSV string
* @param options options to control how the CSV is parsed. accepts the same options and the
* CSV data source.
*
* @group collection_funcs
* @since 3.0.0
*/
def from_csv(e: Column, schema: StructType, options: Map[String, String]): Column = withExpr {
CsvToStructs(schema, options, e.expr)
}
/**
* (Java-specific) Parses a column containing a CSV string into a `StructType`
* with the specified schema. Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing CSV data.
* @param schema the schema to use when parsing the CSV string
* @param options options to control how the CSV is parsed. accepts the same options and the
* CSV data source.
*
* @group collection_funcs
* @since 3.0.0
*/
def from_csv(e: Column, schema: Column, options: java.util.Map[String, String]): Column = {
withExpr(new CsvToStructs(e.expr, schema.expr, options.asScala.toMap))
}
/**
* Parses a CSV string and infers its schema in DDL format.
*
* @param csv a CSV string.
*
* @group collection_funcs
* @since 3.0.0
*/
def schema_of_csv(csv: String): Column = schema_of_csv(lit(csv))
/**
* Parses a CSV string and infers its schema in DDL format.
*
* @param csv a string literal containing a CSV string.
*
* @group collection_funcs
* @since 3.0.0
*/
def schema_of_csv(csv: Column): Column = withExpr(new SchemaOfCsv(csv.expr))
/**
* Parses a CSV string and infers its schema in DDL format using options.
*
* @param csv a string literal containing a CSV string.
* @param options options to control how the CSV is parsed. accepts the same options and the
* json data source. See [[DataFrameReader#csv]].
* @return a column with string literal containing schema in DDL format.
*
* @group collection_funcs
* @since 3.0.0
*/
def schema_of_csv(csv: Column, options: java.util.Map[String, String]): Column = {
withExpr(SchemaOfCsv(csv.expr, options.asScala.toMap))
}
/**
* (Java-specific) Converts a column containing a `StructType` into a CSV string with
* the specified schema. Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct.
* @param options options to control how the struct column is converted into a CSV string.
* It accepts the same options and the json data source.
*
* @group collection_funcs
* @since 3.0.0
*/
def to_csv(e: Column, options: java.util.Map[String, String]): Column = withExpr {
StructsToCsv(options.asScala.toMap, e.expr)
}
/**
* Converts a column containing a `StructType` into a CSV string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct.
*
* @group collection_funcs
* @since 3.0.0
*/
def to_csv(e: Column): Column = to_csv(e, Map.empty[String, String].asJava)
/**
* A transform for timestamps and dates to partition data into years.
*
* @group partition_transforms
* @since 3.0.0
*/
def years(e: Column): Column = withExpr { Years(e.expr) }
/**
* A transform for timestamps and dates to partition data into months.
*
* @group partition_transforms
* @since 3.0.0
*/
def months(e: Column): Column = withExpr { Months(e.expr) }
/**
* A transform for timestamps and dates to partition data into days.
*
* @group partition_transforms
* @since 3.0.0
*/
def days(e: Column): Column = withExpr { Days(e.expr) }
/**
* A transform for timestamps to partition data into hours.
*
* @group partition_transforms
* @since 3.0.0
*/
def hours(e: Column): Column = withExpr { Hours(e.expr) }
/**
* A transform for any type that partitions by a hash of the input column.
*
* @group partition_transforms
* @since 3.0.0
*/
def bucket(numBuckets: Column, e: Column): Column = withExpr {
numBuckets.expr match {
case lit @ Literal(_, IntegerType) =>
Bucket(lit, e.expr)
case _ =>
throw new AnalysisException(s"Invalid number of buckets: bucket($numBuckets, $e)")
}
}
/**
* A transform for any type that partitions by a hash of the input column.
*
* @group partition_transforms
* @since 3.0.0
*/
def bucket(numBuckets: Int, e: Column): Column = withExpr {
Bucket(Literal(numBuckets), e.expr)
}
// scalastyle:off line.size.limit
// scalastyle:off parameter.number
/* Use the following code to generate:
(0 to 10).foreach { x =>
val types = (1 to x).foldRight("RT")((i, s) => {s"A$i, $s"})
val typeTags = (1 to x).map(i => s"A$i: TypeTag").foldLeft("RT: TypeTag")(_ + ", " + _)
val inputEncoders = (1 to x).foldRight("Nil")((i, s) => {s"Try(ExpressionEncoder[A$i]()).toOption :: $s"})
println(s"""
|/**
| * Defines a Scala closure of $x arguments as user-defined function (UDF).
| * The data types are automatically inferred based on the Scala closure's
| * signature. By default the returned UDF is deterministic. To change it to
| * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
| *
| * @group udf_funcs
| * @since 1.3.0
| */
|def udf[$typeTags](f: Function$x[$types]): UserDefinedFunction = {
| val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
| val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
| val inputEncoders = $inputEncoders
| val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
| if (nullable) udf else udf.asNonNullable()
|}""".stripMargin)
}
(0 to 10).foreach { i =>
val extTypeArgs = (0 to i).map(_ => "_").mkString(", ")
val anyTypeArgs = (0 to i).map(_ => "Any").mkString(", ")
val anyCast = s".asInstanceOf[UDF$i[$anyTypeArgs]]"
val anyParams = (1 to i).map(_ => "_: Any").mkString(", ")
val funcCall = if (i == 0) s"() => f$anyCast.call($anyParams)" else s"f$anyCast.call($anyParams)"
println(s"""
|/**
| * Defines a Java UDF$i instance as user-defined function (UDF).
| * The caller must specify the output data type, and there is no automatic input type coercion.
| * By default the returned UDF is deterministic. To change it to nondeterministic, call the
| * API `UserDefinedFunction.asNondeterministic()`.
| *
| * @group udf_funcs
| * @since 2.3.0
| */
|def udf(f: UDF$i[$extTypeArgs], returnType: DataType): UserDefinedFunction = {
| val func = $funcCall
| SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill($i)(None))
|}""".stripMargin)
}
*/
//////////////////////////////////////////////////////////////////////////////////////////////
// Scala UDF functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Obtains a `UserDefinedFunction` that wraps the given `Aggregator`
* so that it may be used with untyped Data Frames.
* {{{
* val agg = // Aggregator[IN, BUF, OUT]
*
* // declare a UDF based on agg
* val aggUDF = udaf(agg)
* val aggData = df.agg(aggUDF($"colname"))
*
* // register agg as a named function
* spark.udf.register("myAggName", udaf(agg))
* }}}
*
* @tparam IN the aggregator input type
* @tparam BUF the aggregating buffer type
* @tparam OUT the finalized output type
*
* @param agg the typed Aggregator
*
* @return a UserDefinedFunction that can be used as an aggregating expression.
*
* @note The input encoder is inferred from the input type IN.
*/
def udaf[IN: TypeTag, BUF, OUT](agg: Aggregator[IN, BUF, OUT]): UserDefinedFunction = {
udaf(agg, ExpressionEncoder[IN]())
}
/**
* Obtains a `UserDefinedFunction` that wraps the given `Aggregator`
* so that it may be used with untyped Data Frames.
* {{{
* Aggregator<IN, BUF, OUT> agg = // custom Aggregator
* Encoder<IN> enc = // input encoder
*
* // declare a UDF based on agg
* UserDefinedFunction aggUDF = udaf(agg, enc)
* DataFrame aggData = df.agg(aggUDF($"colname"))
*
* // register agg as a named function
* spark.udf.register("myAggName", udaf(agg, enc))
* }}}
*
* @tparam IN the aggregator input type
* @tparam BUF the aggregating buffer type
* @tparam OUT the finalized output type
*
* @param agg the typed Aggregator
* @param inputEncoder a specific input encoder to use
*
* @return a UserDefinedFunction that can be used as an aggregating expression
*
* @note This overloading takes an explicit input encoder, to support UDAF
* declarations in Java.
*/
def udaf[IN, BUF, OUT](
agg: Aggregator[IN, BUF, OUT],
inputEncoder: Encoder[IN]): UserDefinedFunction = {
UserDefinedAggregator(agg, inputEncoder)
}
/**
* Defines a Scala closure of 0 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag](f: Function0[RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 1 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag](f: Function1[A1, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 2 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag](f: Function2[A1, A2, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 3 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag](f: Function3[A1, A2, A3, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 4 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag](f: Function4[A1, A2, A3, A4, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 5 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag](f: Function5[A1, A2, A3, A4, A5, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 6 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag](f: Function6[A1, A2, A3, A4, A5, A6, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 7 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag](f: Function7[A1, A2, A3, A4, A5, A6, A7, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 8 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag](f: Function8[A1, A2, A3, A4, A5, A6, A7, A8, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 9 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag](f: Function9[A1, A2, A3, A4, A5, A6, A7, A8, A9, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 10 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag](f: Function10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, RT]): UserDefinedFunction = {
val outputEncoder = Try(ExpressionEncoder[RT]()).toOption
val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(_.dataTypeAndNullable).getOrElse(ScalaReflection.schemaFor[RT])
val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Nil
val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder)
if (nullable) udf else udf.asNonNullable()
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Java UDF functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Defines a Java UDF0 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF0[_], returnType: DataType): UserDefinedFunction = {
val func = () => f.asInstanceOf[UDF0[Any]].call()
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(0)(None))
}
/**
* Defines a Java UDF1 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF1[_, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF1[Any, Any]].call(_: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(1)(None))
}
/**
* Defines a Java UDF2 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF2[_, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF2[Any, Any, Any]].call(_: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(2)(None))
}
/**
* Defines a Java UDF3 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF3[_, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF3[Any, Any, Any, Any]].call(_: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(3)(None))
}
/**
* Defines a Java UDF4 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF4[_, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF4[Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(4)(None))
}
/**
* Defines a Java UDF5 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF5[_, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF5[Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(5)(None))
}
/**
* Defines a Java UDF6 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF6[_, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF6[Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(6)(None))
}
/**
* Defines a Java UDF7 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF7[_, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF7[Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(7)(None))
}
/**
* Defines a Java UDF8 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF8[_, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF8[Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(8)(None))
}
/**
* Defines a Java UDF9 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF9[_, _, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF9[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(9)(None))
}
/**
* Defines a Java UDF10 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF10[_, _, _, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(10)(None))
}
// scalastyle:on parameter.number
// scalastyle:on line.size.limit
/**
* Defines a deterministic user-defined function (UDF) using a Scala closure. For this variant,
* the caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* Note that, although the Scala closure can have primitive-type function argument, it doesn't
* work well with null values. Because the Scala closure is passed in as Any type, there is no
* type information for the function arguments. Without the type information, Spark may blindly
* pass null to the Scala closure with primitive-type argument, and the closure will see the
* default value of the Java type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`,
* the result is 0 for null input.
*
* @param f A closure in Scala
* @param dataType The output data type of the UDF
*
* @group udf_funcs
* @since 2.0.0
*/
@deprecated("Scala `udf` method with return type parameter is deprecated. " +
"Please use Scala `udf` method without return type parameter.", "3.0.0")
def udf(f: AnyRef, dataType: DataType): UserDefinedFunction = {
if (!SQLConf.get.getConf(SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF)) {
val errorMsg = "You're using untyped Scala UDF, which does not have the input type " +
"information. Spark may blindly pass null to the Scala closure with primitive-type " +
"argument, and the closure will see the default value of the Java type for the null " +
"argument, e.g. `udf((x: Int) => x, IntegerType)`, the result is 0 for null input. " +
"To get rid of this error, you could:\\n" +
"1. use typed Scala UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\\n" +
"2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { " +
"override def call(s: String): Integer = s.length() }, IntegerType)`, " +
"if input types are all non primitive\\n" +
s"3. set ${SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF.key} to true and " +
s"use this API with caution"
throw new AnalysisException(errorMsg)
}
SparkUserDefinedFunction(f, dataType, inputEncoders = Nil)
}
/**
* Call an user-defined function.
* Example:
* {{{
* import org.apache.spark.sql._
*
* val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
* val spark = df.sparkSession
* spark.udf.register("simpleUDF", (v: Int) => v * v)
* df.select($"id", callUDF("simpleUDF", $"value"))
* }}}
*
* @group udf_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def callUDF(udfName: String, cols: Column*): Column = withExpr {
UnresolvedFunction(udfName, cols.map(_.expr), isDistinct = false)
}
}
| spark-test/spark | sql/core/src/main/scala/org/apache/spark/sql/functions.scala | Scala | apache-2.0 | 172,771 |
package com.codacy.client.bitbucket.v1
import java.time.LocalDateTime
import play.api.libs.functional.syntax._
import play.api.libs.json._
case class Repository(name: String, full_name: String, description: String, scm: String,
created_on: LocalDateTime, updated_on: LocalDateTime, owner: String, size: Long,
has_issues: Boolean, is_private: Boolean, language: String,
url: Seq[RepositoryUrl])
object Repository {
val dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSSSSXXX"
val dateFormatWithoutMillis = "yyyy-MM-dd'T'HH:mm:ssXXX"
implicit val dateTimeReads: Reads[LocalDateTime] =
Reads.localDateTimeReads(dateFormat)
.orElse(Reads.localDateTimeReads(dateFormatWithoutMillis))
implicit val reader: Reads[Repository] = {
((__ \\ "name").read[String] and
(__ \\ "full_name").read[String] and
(__ \\ "description").read[String] and
(__ \\ "scm").read[String] and
(__ \\ "created_on").read[LocalDateTime] and
(__ \\ "updated_on").read[LocalDateTime] and
(__ \\ "owner" \\ "username").read[String] and
(__ \\ "size").read[Long] and
(__ \\ "has_issues").read[Boolean] and
(__ \\ "is_private").read[Boolean] and
(__ \\ "language").read[String] and
(__ \\ "links").read[Map[String, JsValue]].map(parseLinks)
) (Repository.apply _)
}
private def parseLinks(links: Map[String, JsValue]): Seq[RepositoryUrl] = {
links.flatMap {
case (linkName, linkMap) =>
val simpleLinks = for {
ref <- linkMap.asOpt[Map[String, String]]
urlType <- RepositoryUrlType.find(linkName)
linkUrl <- ref.get("href")
} yield RepositoryUrl(urlType, linkUrl)
val complexLinks = for {
refs <- linkMap.asOpt[Seq[Map[String, String]]].toSeq
ref <- refs
linkName <- ref.get("name")
urlType <- RepositoryUrlType.find(linkName)
linkUrl <- ref.get("href")
} yield RepositoryUrl(urlType, linkUrl)
simpleLinks ++ complexLinks
}.toSeq
}
}
object RepositoryUrlType extends Enumeration {
val Https = Value("https")
val Ssh = Value("ssh")
def find(urlType: String): Option[Value] = {
values.find(_.toString == urlType)
}
}
case class RepositoryUrl(urlType: RepositoryUrlType.Value, link: String)
| rtfpessoa/bitbucket-scala-client | src/main/scala/com/codacy/client/bitbucket/v1/Repository.scala | Scala | apache-2.0 | 2,350 |
import sbt._
import sbt.Keys._
import com.typesafe.sbt.SbtScalariform
import com.typesafe.sbt.SbtScalariform.ScalariformKeys.preferences
import scalariform.formatter.preferences._
import de.heikoseeberger.sbtheader.HeaderPlugin
import de.heikoseeberger.sbtheader.HeaderKey.headers
import de.heikoseeberger.sbtheader.license.Apache2_0
import Dependencies._
object EventuateToolsBuildPlugin extends AutoPlugin {
override def trigger = allRequirements
override def requires = HeaderPlugin && SbtScalariform
override def projectSettings =
artifactSettings ++
compileSettings ++
testSettings ++
dependencySettings ++
resolverSettings ++
publishSettings ++
formatSettings ++
headerSettings
val artifactSettings = Seq(
organization := "com.rbmhtechnology.eventuate-tools"
)
val compileSettings = Seq(
scalaVersion := "2.11.8",
javacOptions += "-Xlint:unchecked",
scalacOptions ++= Seq("-deprecation", "-feature", "-language:existentials", "-language:postfixOps"),
autoAPIMappings := true
)
val testSettings = Seq(
fork in Test := true
)
val dependencySettings = Seq(
libraryDependencies ++=
eventuate ++
(scalaTest ++
eventuateLevelDb ++
akkaTestKit).map(_ % Test),
checksums in update := Seq("md5")
)
val formatSettings = SbtScalariform.scalariformSettings ++ Seq(
preferences := preferences.value.setPreference(AlignSingleLineCaseStatements, true)
)
private val header = Apache2_0("2016", "Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.")
val headerSettings = Seq(
headers := Map("scala" -> header)
)
private val jfrogHost = "oss.jfrog.org"
private val jfrogUri = s"https://$jfrogHost"
private val jfrogPublish = s"$jfrogUri/artifactory"
private val jfrogSnapshots = "oss-snapshot-local"
private val jfrogReleases = "oss-release-local"
val publishSettings = Seq(
credentials += Credentials(
"Artifactory Realm",
jfrogHost,
sys.env.getOrElse("OSS_JFROG_USER", ""),
sys.env.getOrElse("OSS_JFROG_PASS", "")
),
publishTo := {
if (isSnapshot.value)
Some("Publish OJO Snapshots" at s"$jfrogPublish/$jfrogSnapshots")
else
Some("Publish OJO Releases" at s"$jfrogPublish/$jfrogReleases")
}
)
val resolverSettings = Seq(
resolvers ++= Seq(
"OJO Snapshots" at s"$jfrogUri/$jfrogSnapshots",
"Eventuate Releases" at "https://dl.bintray.com/rbmhtechnology/maven"
)
)
}
| RBMHTechnology/eventuate-tools | project/EventuateToolsBuildPlugin.scala | Scala | apache-2.0 | 2,539 |
package monocle.macros.internal
import scala.reflect.internal.TreeGen
trait MacrosCompatibility {
type Context = scala.reflect.macros.blackbox.Context
def getDeclarations(c: Context)(tpe: c.universe.Type): c.universe.MemberScope =
tpe.decls
def getParameterLists(c: Context)(method: c.universe.MethodSymbol): List[List[c.universe.Symbol]] =
method.paramLists
def getDeclaration(c: Context)(tpe: c.universe.Type, name: c.universe.Name): c.universe.Symbol =
tpe.decl(name)
def createTermName(c: Context)(name: String): c.universe.TermName =
c.universe.TermName(name)
def createTypeName(c: Context)(name: String): c.universe.TypeName =
c.universe.TypeName(name)
def resetLocalAttrs(c: Context)(tree: c.Tree): c.Tree =
c.untypecheck(tree)
def getTermNames(c: Context): c.universe.TermNamesApi =
c.universe.termNames
def companionTpe(c: Context)(tpe: c.universe.Type): c.universe.Symbol =
tpe.typeSymbol.companion
def makeAttributedQualifier(c: Context)(tree: TreeGen, tpe: c.universe.Type): c.universe.Tree =
tree.mkAttributedQualifier(tpe.asInstanceOf[tree.global.Type]).asInstanceOf[c.universe.Tree]
}
| malcolmgreaves/Monocle | macro/src/main/scala-2.11/monocle.macros.internal/MacrosCompatibility.scala | Scala | mit | 1,167 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js API **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package scala.scalajs.js
/** Marker trait for top-level objects representing the JS global scope.
*
* When calling method on a top-level object or package object that is a
* subtype of GlobalScope, the receiver is dropped, and the JavaScript global
* scope is used instead.
*
* @see [[http://www.scala-js.org/doc/calling-javascript.html Calling JavaScript from Scala.js]]
*/
@native
trait GlobalScope extends Any
| mdedetrich/scala-js | library/src/main/scala/scala/scalajs/js/GlobalScope.scala | Scala | bsd-3-clause | 953 |
/**
* A solution to Euler problem 6.
*/
object Problem_6 {
def main(args: Array[String]) = {
val solution = math.pow((1 to 100).sum, 2) - (1 to 100).map(i => i * i).sum
println(solution)
}
}
| chetaldrich/euler | scala/6/6.scala | Scala | mit | 213 |
package im.actor.server.migrations
import akka.actor.ActorSystem
import akka.util.Timeout
import im.actor.server.group.GroupErrors.NoBotFound
import im.actor.server.group.{ GroupExtension, GroupOffice, GroupViewRegion }
import im.actor.server.{ KeyValueMappings, persist }
import shardakka.keyvalue.SimpleKeyValue
import shardakka.{ IntCodec, ShardakkaExtension }
import slick.driver.PostgresDriver
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
object IntegrationTokenMigrator extends Migration {
override protected def migrationName: String = "2015-08-21-IntegrationTokenMigration"
override protected def migrationTimeout: Duration = 1.hour
protected override def startMigration()(
implicit
system: ActorSystem,
db: PostgresDriver.api.Database,
ec: ExecutionContext
): Future[Unit] = {
implicit val kv = ShardakkaExtension(system).simpleKeyValue[Int](KeyValueMappings.IntegrationTokens, IntCodec)
implicit val viewRegion = GroupExtension(system).viewRegion
db.run(persist.GroupRepo.findAllIds) flatMap { ids ⇒
system.log.debug("Going to migrate integration tokens for groups: {}", ids)
Future.sequence(ids map (groupId ⇒ migrateSingle(groupId) recover {
case NoBotFound ⇒
system.log.warning("No bot found for groupId: {}", groupId)
case e ⇒
system.log.error(e, "Failed to migrate token for groupId: {}", groupId)
throw e
}))
} map (_ ⇒ ())
}
private def migrateSingle(groupId: Int)(implicit system: ActorSystem, kv: SimpleKeyValue[Int]): Future[Unit] = {
import system.dispatcher
implicit val timeout = Timeout(40.seconds)
for {
optToken ← GroupExtension(system).getIntegrationToken(groupId)
_ ← optToken map { token ⇒ kv.upsert(token, groupId) } getOrElse {
system.log.warning("Could not find integration token in group {}", groupId)
Future.successful(())
}
} yield {
system.log.info("Integration token migrated for group {}", groupId)
()
}
}
}
| ljshj/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/migrations/IntegrationTokenMigrator.scala | Scala | mit | 2,096 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.directed
import cc.factorie._
import cc.factorie.model._
import cc.factorie.variable.{MutableVar, Var}
trait DirectedFactor extends Factor {
type ChildType <: Var
def child: ChildType
def parents: Seq[Var]
//def pr(s:StatisticsType): Double
def pr: Double // = pr(statistics)
//def logpr(s:StatisticsType): Double = math.log(pr(s))
def logpr: Double = math.log(pr) // logpr(statistics)
//def score: Double = logpr
//def sampledValue(s:StatisticsType): Any
def sampledValue(implicit random: scala.util.Random): Any // = sampledValue(statistics)
// TODO Consider removing these methods because we'd have specialized code in the inference recipes.
/** Update sufficient statistics in collapsed parents, using current value of child, with weight. Return false on failure. */
// TODO Consider passing a second argument which is the value of the child to use in the update
def updateCollapsedParents(weight:Double): Boolean = throw new Error(factorName+": Collapsing parent not implemented in " + this.getClass.getName)
def updateCollapsedChild(): Boolean = throw new Error(factorName+": Collapsing child not implemented.")
def resetCollapsedChild(): Boolean = throw new Error(factorName+": Resetting child not implemented.")
}
class GeneratedVarWrapper[V<:Var](val v:V) {
/** Create a new DirectedFactor, make it the "parent" generating factor for this variable,
and add this new factor to the given model. */
def ~[V2<:Var](partialFactor: V2 => DirectedFactor)(implicit model:MutableDirectedModel): V = {
model += partialFactor(v.asInstanceOf[V2])
v
}
}
class GeneratedMutableVarWrapper[V<:MutableVar](val v:V) {
/** Create a new DirectedFactor, make it the "parent" generating factor for this variable,
add this new factor to the given model,
and also assign the variable a new value randomly drawn from this factor. */
def :~[V2<:Var](partialFactor: V2 => DirectedFactor)(implicit model:MutableDirectedModel, random: scala.util.Random): V = {
model += partialFactor(v.asInstanceOf[V2])
v.set(model.parentFactor(v).sampledValue.asInstanceOf[v.Value])(null)
v
}
}
trait RealGeneratingFactor extends DirectedFactor {
def sampleDouble: Double
def pr(x:Double): Double
def logpr(x:Double): Double
}
trait IntGeneratingFactor extends DirectedFactor {
def sampleInt: Int
def pr(x:Int): Double
def logpr(x:Int): Double
}
abstract class DirectedFactorWithStatistics1[C<:Var](override val _1:C) extends FactorWithStatistics1[C](_1) with DirectedFactor {
type ChildType = C
def child = _1
def parents: Seq[Var] = Nil
def score(v1:C#Value): Double = logpr(v1:C#Value)
def pr(v1:C#Value): Double
def logpr(v1:C#Value): Double = math.log(pr(v1))
def pr: Double = pr(_1.value.asInstanceOf[C#Value])
override def sampledValue(implicit random: scala.util.Random): C#Value
}
abstract class DirectedFactorWithStatistics2[C<:Var,P1<:Var](override val _1:C, override val _2:P1) extends TupleFactorWithStatistics2[C,P1](_1, _2) with DirectedFactor {
type ChildType = C
def child = _1
def parents = Seq(_2)
def score(v1:C#Value, v2:P1#Value): Double = logpr(v1, v2)
def pr(v1:C#Value, v2:P1#Value): Double
def logpr(v1:C#Value, v2:P1#Value): Double = math.log(pr(v1, v2))
def pr: Double = pr(_1.value.asInstanceOf[C#Value], _2.value.asInstanceOf[P1#Value])
def sampledValue(p1:P1#Value)(implicit random: scala.util.Random): C#Value
def sampledValue(implicit random: scala.util.Random): C#Value = sampledValue(_2.value.asInstanceOf[P1#Value])
// TODO Consider this:
//def parents = _2 match { case vars:Vars[Parameter] => vars; case _ => Seq(_2) }
}
abstract class DirectedFactorWithStatistics3[C<:Var,P1<:Var,P2<:Var](override val _1:C, override val _2:P1, override val _3:P2) extends TupleFactorWithStatistics3[C,P1,P2](_1, _2, _3) with DirectedFactor {
type ChildType = C
def child = _1
def parents = Seq(_2, _3)
def score(v1:C#Value, v2:P1#Value, v3:P2#Value): Double = logpr(v1, v2, v3)
def pr(v1:C#Value, v2:P1#Value, v3:P2#Value): Double
def logpr(v1:C#Value, v2:P1#Value, v3:P2#Value): Double = math.log(pr(v1, v2, v3))
def pr: Double = pr(_1.value.asInstanceOf[C#Value], _2.value.asInstanceOf[P1#Value], _3.value.asInstanceOf[P2#Value])
def sampledValue(p1:P1#Value, p2:P2#Value)(implicit random: scala.util.Random): C#Value
def sampledValue(implicit random: scala.util.Random): C#Value = sampledValue(_2.value.asInstanceOf[P1#Value], _3.value.asInstanceOf[P2#Value])
}
abstract class DirectedFactorWithStatistics4[C<:Var,P1<:Var,P2<:Var,P3<:Var](override val _1:C, override val _2:P1, override val _3:P2, override val _4:P3) extends TupleFactorWithStatistics4[C,P1,P2,P3](_1, _2, _3, _4) with DirectedFactor {
type ChildType = C
def child = _1
def parents = Seq(_2, _3, _4)
def score(v1:C#Value, v2:P1#Value, v3:P2#Value, v4:P3#Value): Double = logpr(v1, v2, v3, v4)
def pr(v1:C#Value, v2:P1#Value, v3:P2#Value, v4:P3#Value): Double
def logpr(v1:C#Value, v2:P1#Value, v3:P2#Value, v4:P3#Value): Double = math.log(pr(v1, v2, v3, v4))
def pr: Double = pr(_1.value.asInstanceOf[C#Value], _2.value.asInstanceOf[P1#Value], _3.value.asInstanceOf[P2#Value], _4.value.asInstanceOf[P3#Value])
def sampledValue(p1:P1#Value, p2:P2#Value, p3:P3#Value)(implicit random: scala.util.Random): C#Value
def sampledValue(implicit random: scala.util.Random): C#Value = sampledValue(_2.value.asInstanceOf[P1#Value], _3.value.asInstanceOf[P2#Value], _4.value.asInstanceOf[P3#Value])
}
trait DirectedFamily1[Child<:Var] {
type C = Child
abstract class Factor(override val _1:Child) extends DirectedFactorWithStatistics1[C](_1)
def newFactor(c:C): Factor
def apply(): C => Factor = newFactor(_)
}
trait DirectedFamily2[Child<:Var,Parent1<:Var] {
type C = Child
type P1 = Parent1
abstract class Factor(override val _1:Child, override val _2:Parent1) extends DirectedFactorWithStatistics2[C,P1](_1, _2)
def newFactor(c:C, p1:P1): Factor
def apply(p1: P1): C => Factor = newFactor(_, p1)
}
trait DirectedFamily3[Child<:Var,Parent1<:Var,Parent2<:Var] {
type C = Child
type P1 = Parent1
type P2 = Parent2
abstract class Factor(override val _1:Child, override val _2:Parent1, override val _3:Parent2) extends DirectedFactorWithStatistics3[C,P1,P2](_1, _2, _3)
def newFactor(c:C, p1:P1, p2:P2): Factor
def apply(p1: P1, p2: P2): C => Factor = newFactor(_, p1, p2)
}
trait DirectedFamily4[Child<:Var,Parent1<:Var,Parent2<:Var,Parent3<:Var] {
type C = Child
type P1 = Parent1
type P2 = Parent2
type P3 = Parent3
abstract class Factor(override val _1:Child, override val _2:Parent1, override val _3:Parent2, override val _4:Parent3) extends DirectedFactorWithStatistics4[C,P1,P2,P3](_1, _2, _3, _4)
def newFactor(c:C, p1:P1, p2:P2, p3:P3): Factor
def apply(p1: P1, p2: P2, p3: P3): C => Factor = newFactor(_, p1, p2, p3)
}
| hlin117/factorie | src/main/scala/cc/factorie/directed/DirectedFactor.scala | Scala | apache-2.0 | 7,673 |
package geek.lawsof.physics.lib.util.handler
import cpw.mods.fml.common.eventhandler.SubscribeEvent
import cpw.mods.fml.common.gameevent.InputEvent.KeyInputEvent
import geek.lawsof.physics.lib.util.helpers.Log
import geek.lawsof.physics.{Reference, LawsOfPhysicsMod}
import net.minecraft.client.settings.KeyBinding
import org.lwjgl.input.Keyboard
import scala.collection.mutable
/**
* Created by anshuman on 22-07-2014.
*/
object KeyBindings {
private val bindings = mutable.MutableList.empty[KeyBindingBase]
def registerKeyBinding(name: String, category: String, key: Int, onKeyPressed: () => Unit = () => {}) = bindings += new KeyBindingBase(name, category, key, onKeyPressed)
def onModeSwitchKeyPressed() = {
//todo switch modes or something
}
def init() = {
Log("Register KeyBindings And KeyInputHandler")
FMLEventBus += this
registerKeyBinding("key.modeSwitch", s"key.category.${Reference.MOD_ID}", Keyboard.KEY_M, onModeSwitchKeyPressed)
}
@SubscribeEvent
def onKeyPressed(evt: KeyInputEvent) = bindings.filter(_.getIsKeyPressed).foreach(_.onKeyPressed)
}
class KeyBindingBase(name: String, category: String, key: Int, val onKeyPressed: () => Unit = () => {}) extends KeyBinding(name, key, category) | GeckoTheGeek42/TheLawsOfPhysics | src/main/scala/geek/lawsof/physics/lib/util/handler/KeyBindings.scala | Scala | mit | 1,249 |
package toguru.helpers
import java.util.concurrent.TimeoutException
import akka.actor.Scheduler
import akka.pattern.after
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration
object FutureTimeout extends FutureTimeout {
}
trait FutureTimeout {
def timeout[T](deadline: FiniteDuration, future: Future[T])(implicit ec: ExecutionContext, scheduler: Scheduler) : Future[T] = {
val timeoutFuture = after(deadline, scheduler) { Future.failed(new TimeoutException()) }
Future.firstCompletedOf(List(future, timeoutFuture))
}
}
| andreas-schroeder/toguru | app/toguru/helpers/FutureTimeout.scala | Scala | mit | 584 |
package org.andrewconner.spot.core
import java.util.concurrent.TimeoutException
import akka.util.ByteString
import play.api.Logger
import play.api.libs.iteratee.Execution.Implicits._
import play.api.libs.streams.Accumulator
import play.api.mvc._
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scalaz.concurrent.Task
trait TaskAction[A] extends Action[A] {
def applyT(request: Request[A]): Task[Result]
// Implementation:
def apply(request: Request[A]): Future[Result] = {
applyT(request).runFuture()
}
override def apply(rh: RequestHeader): Accumulator[ByteString, Result] = {
parser(rh).mapFuture { ei =>
val result = ei match {
case Left(r) => Task.now(r)
case Right(a) => applyT(Request(rh, a))
}
val itFut = result.runFuture()
itFut
}(defaultExecutionContext)
}
}
trait TaskActionFunction[-R[_], +P[_]] extends ActionFunction[R, P] { self =>
def invokeBlockT[A](request: R[A], block: P[A] => Task[Result]): Task[Result]
def invokeBlock[A](request: R[A], block: P[A] => Future[Result]): Future[Result] = {
invokeBlockT(request, block.andThen(_.asTask)).runFuture()
}
override def andThen[Q[_]](other: ActionFunction[P, Q]): ActionFunction[R, Q] = new TaskActionFunction[R, Q] {
def invokeBlockT[A](request: R[A], block: Q[A] => Task[Result]) = {
other match {
case t: TaskActionFunction[P, Q] =>
self.invokeBlockT[A](request, t.invokeBlockT[A](_, block))
case o: ActionFunction[P, Q] =>
self.invokeBlock[A](request, o.invokeBlock[A](_, block.andThen(_.runFuture()))).asTask
}
}
}
}
trait TaskActionBuilder[+R[_]] extends TaskActionFunction[Request, R] { self =>
// Task APIs
def applyT[A](bodyParser: BodyParser[A])(block: R[A] => Result): Action[A] = task(bodyParser) { req: R[A] =>
Task.fork(Task.now(block(req)))
}
def applyT(block: R[AnyContent] => Result): Action[AnyContent] = applyT(BodyParsers.parse.default)(block)
def applyT(block: => Result): Action[AnyContent] = applyT(_ => block)
def task(block: => Task[Result]): Action[AnyContent] = task(_ => block)
def task(block: R[AnyContent] => Task[Result]): Action[AnyContent] = task(BodyParsers.parse.default)(block)
def task[A](bodyParser: BodyParser[A])(block: R[A] => Task[Result]): Action[A] = composeAction(new TaskAction[A] {
def parser = composeParser(bodyParser)
override def apply(request: Request[A]) = try {
// This is opinionated in that the blocks should be run in another thread. This may not be what you want, though.
Task.fork(invokeBlockT(request, block)).runFuture()
} catch {
case e: NotImplementedError => throw new RuntimeException(e)
case e: LinkageError => throw new RuntimeException(e)
}
def applyT(request: Request[A]) = try {
invokeBlockT(request, block)
} catch {
case e: NotImplementedError => throw new RuntimeException(e)
case e: LinkageError => throw new RuntimeException(e)
}
})
protected def composeParser[A](bodyParser: BodyParser[A]): BodyParser[A] = bodyParser
protected def composeAction[A](action: Action[A]): Action[A] = action
override def andThen[Q[_]](other: ActionFunction[R, Q]): TaskActionBuilder[Q] = new TaskActionBuilder[Q] {
def invokeBlockT[A](request: Request[A], block: Q[A] => Task[Result]) = {
other match {
case t: TaskActionFunction[R, Q] =>
self.invokeBlockT[A](request, t.invokeBlockT[A](_, block))
case o: ActionFunction[R, Q] =>
self.invokeBlock[A](request, o.invokeBlock[A](_, block.andThen(_.runFuture()))).asTask
}
}
override protected def composeParser[A](bodyParser: BodyParser[A]): BodyParser[A] = self.composeParser(bodyParser)
override protected def composeAction[A](action: Action[A]): Action[A] = self.composeAction(action)
}
}
object TaskAction extends TaskActionBuilder[Request] {
private val logger = Logger(Action.getClass)
def invokeBlockT[A](request: Request[A], block: (Request[A]) => Task[Result]) = block(request)
}
trait TaskActionRefiner[-R[_], +P[_]] extends TaskActionFunction[R, P] {
protected def refine[A](request: R[A]): Task[scalaz.\/[Result, P[A]]]
final def invokeBlockT[A](request: R[A], block: P[A] => Task[Result]) = {
refine(request).flatMap(_.fold(Task.now, block))
}
}
trait TaskActionTransformer[-R[_], +P[_]] extends TaskActionRefiner[R, P] {
protected def transform[A](request: R[A]): Task[P[A]]
final def refine[A](request: R[A]) =
transform(request).map(scalaz.\/-(_))
}
trait TaskActionFilter[R[_]] extends TaskActionRefiner[R, R] {
protected def filter[A](request: R[A]): Task[Option[Result]]
final protected def refine[A](request: R[A]) =
filter(request).map {
case Some(rejection) => scalaz.-\/(rejection)
case None => scalaz.\/-(request)
}
}
case class TimedTaskAction(timeout: Duration) extends TaskActionBuilder[Request] {
def invokeBlockT[A](request: Request[A], block: (Request[A] => Task[Result])) = {
block(request).unsafePerformTimed(timeout).handle {
case t: TimeoutException =>
t.printStackTrace()
Results.Ok("timedout")
}
}
} | andrewconner/spotsy | app/org/andrewconner/spot/core/TaskActions.scala | Scala | mit | 5,235 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.items.weapons.types
import com.anathema_roguelike.entities.items.weapons.WeaponMaterial
import com.anathema_roguelike.entities.items.weapons.WoodWeaponMaterial
case class Crossbow(name: String, weight: Double, attackSpeed: Double, damage: Double, range: Double)
extends RangedWeaponType(name, weight, attackSpeed, damage, range: Double) {
override def getMaterialType: Class[_ <: WeaponMaterial] = classOf[WoodWeaponMaterial]
} | carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/items/weapons/types/Crossbow.scala | Scala | gpl-3.0 | 1,329 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
object Monads2 {
abstract class Option[T]
case class Some[T](t: T) extends Option[T]
case class None[T]() extends Option[T]
def flatMap[T,U](opt: Option[T], f: T => Option[U]): Option[U] = opt match {
case Some(x) => f(x)
case None() => None()
}
def add[T](o1: Option[T], o2: Option[T]): Option[T] = o1 match {
case Some(x) => o1
case None() => o2
}
def associative_law[T,U,V](opt: Option[T], f: T => Option[U], g: U => Option[V]): Boolean = {
flatMap(flatMap(opt, f), g) == flatMap(opt, (x: T) => flatMap(f(x), g))
}.holds
def left_unit_law[T,U](x: T, f: T => Option[U]): Boolean = {
flatMap(Some(x), f) == f(x)
}.holds
def right_unit_law[T,U](opt: Option[T]): Boolean = {
flatMap(opt, (x: T) => Some(x)) == opt
}.holds
def flatMap_zero_law[T,U](none: None[T], f: T => Option[U]): Boolean = {
flatMap(none, f) == None[U]()
}.holds
def flatMap_to_zero_law[T,U](opt: Option[T]): Boolean = {
flatMap(opt, (x: T) => None[U]()) == None[U]()
}.holds
def add_zero_law[T](opt: Option[T]): Boolean = {
add(opt, None[T]()) == opt
}.holds
def zero_add_law[T](opt: Option[T]): Boolean = {
add(None[T](), opt) == opt
}.holds
}
// vim: set ts=4 sw=4 et:
| regb/leon | src/test/resources/regression/verification/purescala/valid/Monads2.scala | Scala | gpl-3.0 | 1,299 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import android.os.{Message, Handler, Bundle}
import android.app.{ProgressDialog, AlertDialog, Activity}
import android.content.{Intent, DialogInterface}
import com.github.shadowsocks.database.{ProfileManager, Profile}
import com.github.shadowsocks.utils.{Parser, Action}
import android.preference.PreferenceManager
import android.view.WindowManager
import android.graphics.Color
import android.graphics.drawable.ColorDrawable
class ParserActivity extends Activity {
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
showAsPopup(this)
val data = getIntent.getData.toString
new AlertDialog.Builder(this)
.setTitle(R.string.add_profile_dialog)
.setCancelable(false)
.setPositiveButton(R.string.yes, new DialogInterface.OnClickListener() {
override def onClick(dialog: DialogInterface, id: Int) {
Parser.parse(data) match {
case Some(profile) => addProfile(profile)
case _ => // ignore
}
dialog.dismiss()
}
})
.setNegativeButton(R.string.no, new DialogInterface.OnClickListener() {
override def onClick(dialog: DialogInterface, id: Int) {
dialog.dismiss()
finish()
}
})
.setMessage(data)
.create()
.show()
}
def showAsPopup(activity: Activity) {
activity.getWindow.setFlags(WindowManager.LayoutParams.FLAG_DIM_BEHIND,
WindowManager.LayoutParams.FLAG_DIM_BEHIND)
val params = activity.getWindow.getAttributes
params.alpha = 1.0f
params.dimAmount = 0.5f
activity.getWindow.setAttributes(params.asInstanceOf[android.view.WindowManager.LayoutParams])
activity.getWindow.setBackgroundDrawable(new ColorDrawable(Color.TRANSPARENT))
}
def addProfile(profile: Profile) {
val h = showProgress(getString(R.string.loading))
h.postDelayed(new Runnable {
def run() {
val profileManager =
new ProfileManager(PreferenceManager.getDefaultSharedPreferences(getBaseContext),
getApplication.asInstanceOf[ShadowsocksApplication].dbHelper)
profileManager.createOrUpdateProfile(profile)
profileManager.reload(profile.id)
h.sendEmptyMessage(0)
}
}, 600)
}
private def showProgress(msg: String): Handler = {
val progressDialog = ProgressDialog.show(this, "", msg, true, false)
new Handler {
override def handleMessage(msg: Message) {
progressDialog.dismiss()
finish()
}
}
}
}
| azraelrabbit/shadowsocks-android | src/main/scala/com/github/shadowsocks/ParserActivity.scala | Scala | gpl-3.0 | 4,318 |
package lang.lambda.num
import lang.lambda.Exp
import name.namegraph.NameGraphExtended
import name.Renaming
/**
* Created by seba on 01/08/14.
*/
case class Add(e1: Exp, e2: Exp) extends Exp {
def allNames = e1.allNames ++ e2.allNames
def rename(renaming: Renaming) = Add(e1.rename(renaming), e2.rename(renaming))
def resolveNames(scope: Scope) = {
val g1 = e1.resolveNames(scope)
val g2 = e2.resolveNames(scope)
g1 + g2
}
def unsafeSubst(w: String, e: Exp) = Add(e1.unsafeSubst(w, e), e2.unsafeSubst(w, e))
def unsafeNormalize = (e1.unsafeNormalize, e2.unsafeNormalize) match {
case (Num(v1), Num(v2)) => Num(v1 + v2)
case (v1, v2) => Add(v1, v2)
}
def alphaEqual(e: Exp, g: NameGraphExtended) = e match {
case Add(e3, e4) => e1.alphaEqual(e3, g) && e2.alphaEqual(e4, g)
case _ => false
}
} | matthisk/hygienic-transformations | scala/src/main/scala/lang/lambda/num/Add.scala | Scala | lgpl-3.0 | 844 |
package models
import org.joda.time.DateTime
import java.sql.Timestamp
import play.api.db.slick.DatabaseConfigProvider
import slick.driver.JdbcProfile
import play.api.Play.current
/**
* Created by hluu on 3/18/16.
*/
object SlickMapping {
protected val dbConfig = DatabaseConfigProvider.get[JdbcProfile](current)
import dbConfig._
import dbConfig.driver.api._
implicit val jodaDateTimeMapping = {
MappedColumnType.base[DateTime, Timestamp](
dt => new Timestamp(dt.getMillis),
ts => new DateTime(ts))
}
}
| hluu/todos | app/models/SlickMapping.scala | Scala | mit | 535 |
package hevs.especial.generator
import hevs.especial.dsl.components.bool
import hevs.especial.dsl.components.core.Constant
import hevs.especial.dsl.components.target.stm32stk.Stm32stkIO
/**
* Switch the `led1` to ON using a constant value.
* Use the extension board.
*
* @version 1.0
* @author Christopher Metrailler (mei@hevs.ch)
*/
class Sch0Code extends STM32TestSuite {
def isQemuLoggerEnabled = false
def runDslCode(): Unit = {
// Input
val cst1 = Constant(bool(true))
// Output
val led1 = Stm32stkIO.led1
// Connecting stuff
cst1.out --> led1.in
// Same as :
// Constant(true).out --> Stm32stk.led0.in
}
runDotGeneratorTest()
runCodeCheckerTest()
runCodeOptimizer()
runDotGeneratorTest(optimizedVersion = true)
runCodeGenTest()
} | hevs-isi/especial-frontend | src/test/scala/hevs/especial/generator/Sch0Code.scala | Scala | mit | 801 |
package com.tirthal.learning.scala.features
// Syntax of Pattern Matching in Scala
object PatternMatching extends App {
// (1) ---> Switch like behavior
// Scala has no Switch statement, instead it uses Match Expressions / Pattern Matching
// Flexible match conditions - it can match anything including even objects
// It has no break and a case match is an expression (can assign value)
val day = "ABC"
val dayType = day match {
case "Monday" | "Tuesday" | "Wednesday" | "Thurday" | "Friday" => "working day"
case "Saturday" | "Sunday" => "holiday"
case _ => "unknown" // optional default catch every value at the end using wildcard(_)
}
println(day + " is " + dayType)
// (2) ---> What is pattern?
//
// In below syntax, pattern can be wildcard (_), variable name, literal equality, constructor match, deconstructor match, type query patterns, a pattern with alternatives
//
// value match {
// case pattern guard => expression
// case ...
// case _ => expression
// }
// --- Example - Pattern can be any variable name i.e. x
val str = "tirthal"
str match {
case x => println("Hello " + x)
}
// --- Example - Literal Matches (value must exactly matched with the Literal case i.e. match using ==)
val language = "Gujarati"
language match {
case "Engish" => println("How are you?")
case "gujarati" => println("Kem cho?")
case "Gujarati" => println("Kem cho? Majama?")
case _ => println("Hi")
}
// --- Example - Constructor Patterns (allow you to match on the arguments used to construct an object) works on "case" class only
val color = new Color(1, "Red", "RC")
color match {
case Color(_, "Red", _) => println("This is red color") // Match if name is "Red" in 2nd argument of Color constructor
case Color(_, _, _) => println("???") // "_" indicates that you don't care what the values are
}
// --- Example - Type Queries (used for convenient type checks and type casts)
def generalSize(x: Any): Int = x match {
case s: String => s.length // 's' can only be a 'String'
case m: Map[_, _] => m.size // 'm' can only be a 'Map'
case _ => -1 // for any other type, return -1
}
println { generalSize("Tirthal") } //> Int = 7
println { generalSize(Map(1 -> "one", 2 -> "two")) } //> Int = 2
println { generalSize(math.Pi) } //> Int = -1
println { generalSize(List("A1", "A2", "A3")) } //> Int = -1
// (3) ---> What is guard?
// A pattern guard comes after a pattern and starts with an if & the pattern matches only if the guard evaluates to true
// Example syntax -
// match only positive integers --- case n: Int if 0 < n => n + " is positive"
// match only strings starting with the letter 'a' --- case s: String if s(0) == 'a' => s + " starts with letter 'a'"
}
// Case classes are regular classes which export their constructor parameters and which provide a recursive decomposition mechanism via pattern matching
case class Color(id: Int, name: String, code: String)
| tirthalpatel/Learning-Scala | ScalaQuickStart/src/main/scala/com/tirthal/learning/scala/features/PatternMatching.scala | Scala | mit | 3,275 |
package demo.helloworld
import _root_.java.io.File
import _root_.org.scalatest.FunSuite
import _root_.scala.xml.XML
import _root_.net.liftweb.common.Full
import _root_.net.liftweb.util.PCDataXmlParser
/**
* Unit test for simple App.
*/
class AppTest extends FunSuite {
/**
* Rigourous Tests :-)
*/
test("OK"){
assert(true === true)
}
ignore("KO"){
assert(true === false)
}
/**
* Tests to make sure the project's XML files are well-formed.
*
* Finds every *.html and *.xml file in src/main/webapp (and its
* subdirectories) and tests to make sure they are well-formed.
*/
test("xml"){
var failed: List[File] = Nil
def handledXml(file: String) =
file.endsWith(".xml")
def handledXHtml(file: String) =
file.endsWith(".html") || file.endsWith(".htm") || file.endsWith(".xhtml")
def wellFormed(file: File) {
if (file.isDirectory)
for (f <- file.listFiles) wellFormed(f)
if (file.isFile && handledXml(file.getName)) {
try {
XML.loadFile(file)
} catch {
case e: _root_.org.xml.sax.SAXParseException => failed = file :: failed
}
}
if (file.isFile && handledXHtml(file.getName)) {
PCDataXmlParser(new java.io.FileInputStream(file.getAbsolutePath)) match {
case Full(_) => // file is ok
case _ => failed = file :: failed
}
}
}
wellFormed(new File("src/main/webapp"))
val numFails = failed.size
if (numFails > 0) {
val fileStr = if (numFails == 1) "file" else "files"
val msg = "Malformed XML in " + numFails + " " + fileStr + ": " + failed.mkString(", ")
println(msg)
fail(msg)
}
}
}
| scalatest/scalatest-maven-plugin | src/it/lift/src/test/scala/demo/helloworld/AppTest.scala | Scala | apache-2.0 | 1,723 |
package com.twitter.finagle.mux
import org.specs.SpecificationWithJUnit
import org.jboss.netty.buffer.ChannelBuffers
import com.twitter.finagle.tracing
class ProtoSpec extends SpecificationWithJUnit {
"Message" should {
val body = ChannelBuffers.wrappedBuffer(Array[Byte](1,2,3,4))
import Message._
"d(e(m)) == m" in {
val msgs = Seq(
Treq(1, Some(tracing.Trace.nextId), body),
Treq(1, None, body),
RreqOk(1, body),
Treq(123, Some(tracing.Trace.nextId), body),
RreqOk(123, body),
Treq(8388607, Some(tracing.Trace.nextId), body),
RreqOk(8388607, body),
Tdrain(1),
Rdrain(123),
Tdiscarded(391, "because i felt like it")
)
for (m <- msgs)
decode(encode(m)) must be_==(m)
}
"not encode invalid messages" in {
encode(Treq(-1, Some(tracing.Trace.nextId), body)) must throwA(
BadMessageException("invalid tag number -1"))
encode(Treq(0, Some(tracing.Trace.nextId), body)) must throwA(
BadMessageException("invalid tag number 0"))
encode(Treq(1<<24, Some(tracing.Trace.nextId), body)) must throwA(
BadMessageException("invalid tag number 16777216"))
}
"not decode invalid messages" in {
decode(ChannelBuffers.EMPTY_BUFFER) must throwA(
BadMessageException("short message"))
decode(ChannelBuffers.wrappedBuffer(Array[Byte](0, 0, 0, 1))) must throwA(
BadMessageException("bad message type: 0 [tag=1]"))
}
}
}
| joshbedo/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/ProtoSpec.scala | Scala | apache-2.0 | 1,518 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.models
import org.bdgenomics.formats.avro.{ Genotype, DatabaseVariantAnnotation, Variant }
import org.bdgenomics.adam.rich.RichVariant
/**
* Note: VariantContext inherits its name from the Picard VariantContext, and is not related to the SparkContext object.
* If you're looking for the latter, see [[org.bdgenomics.adam.rdd.variation.VariationContext]]
*/
object VariantContext {
/**
* Produces a new variant context by merging with an optional annotation.
*
* If the existing context doesn't have an annotation, pick the new
* annotation, if present. If both exist, then merge.
*
* @param v An existing VariantContext to annotate.
* @param optAnn An optional annotation to add.
* @return A new VariantContext, where an annotation has been added/merged.
*/
def apply(v: VariantContext,
optAnn: Option[DatabaseVariantAnnotation]): VariantContext = {
// if the join yielded one or fewer annotation, pick what we've got. else, merge.
val ann = (v.databases, optAnn) match {
case (None, a) => a
case (a, None) => a
case (Some(a), Some(b)) => Some(mergeAnnotations(a, b))
}
// copy all fields except for the annotation from the input context
new VariantContext(v.position,
v.variant,
v.genotypes,
ann)
}
/**
* Greedily merges two annotation records by filling empty fields.
*
* Merges two records by taking the union of all fields. If a field is
* populated in both records, it's value will be taken from the left
* record.
*
* @param leftRecord First record to merge. If fields are seen in both
* records, the value in this record will win.
* @param rightRecord Second record to merge. Used to populate missing fields
* from the left record.
* @return Returns the union of these two annotations.
*/
def mergeAnnotations(leftRecord: DatabaseVariantAnnotation,
rightRecord: DatabaseVariantAnnotation): DatabaseVariantAnnotation = {
val mergedAnnotation = DatabaseVariantAnnotation.newBuilder(leftRecord)
.build()
val numFields = DatabaseVariantAnnotation.getClassSchema.getFields.size
def insertField(fieldIdx: Int) =
{
val value = rightRecord.get(fieldIdx)
if (value != null) {
mergedAnnotation.put(fieldIdx, value)
}
}
(0 until numFields).foreach(insertField(_))
mergedAnnotation
}
/**
* Constructs an VariantContext from locus data. Used in merger process.
*
* @param kv Nested tuple containing (locus on reference, (variants at site, genotypes at site,
* optional domain annotation at site))
* @return VariantContext corresponding to the data above.
*/
def apply(kv: (ReferencePosition, Variant, Iterable[Genotype], Option[DatabaseVariantAnnotation])): VariantContext = {
new VariantContext(kv._1, kv._2, kv._3, kv._4)
}
/**
* Constructs an VariantContext from an Variant
*
* @param v Variant which is used to construct the ReferencePosition
* @return VariantContext corresponding to the Variant
*/
def apply(v: Variant): VariantContext = {
apply((ReferencePosition(v), v, Seq(), None))
}
/**
* Constructs an VariantContext from an Variant and Seq[Genotype]
* and DatabaseVariantAnnotation
*
* @param v Variant which is used to construct the ReferencePosition
* @param genotypes Seq[Genotype]
* @param annotation Option[DatabaseVariantAnnotation]
* @return VariantContext corresponding to the Variant
*/
def apply(v: Variant, genotypes: Iterable[Genotype], annotation: Option[DatabaseVariantAnnotation] = None): VariantContext = {
apply((ReferencePosition(v), v, genotypes, annotation))
}
/**
* Builds a variant context off of a set of genotypes. Builds variants from the genotypes.
*
* @note Genotypes must be at the same position.
*
* @param genotypes List of genotypes to build variant context from.
* @return A variant context corresponding to the variants and genotypes at this site.
*/
def buildFromGenotypes(genotypes: Seq[Genotype]): VariantContext = {
val position = ReferencePosition(genotypes.head)
assert(
genotypes.map(ReferencePosition(_)).forall(_ == position),
"Genotypes do not all have the same position."
)
val variant = genotypes.head.getVariant
new VariantContext(position, variant, genotypes, None)
}
}
class VariantContext(
val position: ReferencePosition,
val variant: RichVariant,
val genotypes: Iterable[Genotype],
val databases: Option[DatabaseVariantAnnotation] = None) {
}
| tdanford/adam | adam-core/src/main/scala/org/bdgenomics/adam/models/VariantContext.scala | Scala | apache-2.0 | 5,475 |
package io.iohk.ethereum.vm
import io.iohk.ethereum.vm.utils.EvmTestEnv
import org.scalatest.matchers.should.Matchers
import org.scalatest.freespec.AnyFreeSpec
import io.iohk.ethereum.domain.UInt256
// scalastyle:off magic.number
class ContractCallingItselfSpec extends AnyFreeSpec with Matchers {
"EVM running ContractCallingItself contract" - {
"should handle a call to itself" in new EvmTestEnv {
val (_, contract) = deployContract("ContractCallingItself")
contract.getSomeVar().call().returnData shouldBe UInt256(10).bytes
val result = contract.callSelf().call()
result.error shouldBe None
contract.getSomeVar().call().returnData shouldBe UInt256(20).bytes
}
}
}
| input-output-hk/etc-client | src/evmTest/scala/io/iohk/ethereum/vm/ContractCallingItselfSpec.scala | Scala | mit | 717 |
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
* This file incorporates work covered by the following copyright and permission notice:
*
* Copyright 2012 silenteh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import records.AAAA
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import utils.HostnameUtils
import scala.util.Random
import configs.ConfigService
import org.slf4j.LoggerFactory
@JsonIgnoreProperties(Array("typ"))
case class IPv6AddressHost(
@JsonProperty("class") cls: String = null,
@JsonProperty("name") name: String = null,
@JsonProperty("value") ips: Array[WeightedIP] = null,
@JsonProperty("ttl") timetolive: Long
) extends Host("AAAA") {
val logger = LoggerFactory.getLogger("app")
val randomizeRecords = ConfigService.config.getBoolean("randomizeRecords")
def setName(newname: String) = IPv6AddressHost(cls, newname, ips, timetolive)
override def toAbsoluteNames(domain: ExtendedDomain) =
IPv6AddressHost(cls, HostnameUtils.absoluteHostName(name, domain.fullName), ips, timetolive)
override def equals(other: Any) = other match {
case h: IPv6AddressHost => h.cls == cls && h.name == name && h.ips.forall(wip => ips.exists(_.ip == wip.ip))
case _ => false
}
protected def getRData =
if(ips.size == 1)
{
logger.debug("Single AAAA")
ips(0).weightIP.map(ip => new AAAA(ipToBytes(ip), timetolive))
}
else if(randomizeRecords == true)
{
/**
Se c'è un array di weighted ip (SBAGLIATO fare più record con lo stesso nome e weight diverso, basta un record
con più values weighted) scegline uno a caso.
*/
logger.debug("Collapsing duplicate weighted AAAAs")
val list = ips.map(wip => wip.weightIP.map(ip => new AAAA(ipToBytes(ip), timetolive))).flatten.toList
Array[AAAA](Random.shuffle(list).head)
}
else ips.map(wip => wip.weightIP.map(ip => new AAAA(ipToBytes(ip), timetolive))).flatten
private def ipToBytes(ip: String) = {
val parts =
if (ip.count(_ == ':') == 7) ip.split(""":""").map(part => if(part != "") part else "0")
else {
val parts = ip.split(""":""")
val (partsLeft, partsRight) = parts.filterNot(_ == "").splitAt(parts.indexOf(""))
partsLeft ++ Array.fill(8 - partsLeft.length - partsRight.length)("0") ++ partsRight
}
hexToBytes(parts.map(completeString(_)).mkString)
}
private def hexToBytes(hexstr: String, bytes: Array[Byte] = Array()): Array[Byte] =
if(hexstr.length == 0) bytes
else hexToBytes(hexstr.takeRight(hexstr.length - 2),
bytes :+ ((Character.digit(hexstr.head, 16) << 4) +
Character.digit(hexstr.tail.head, 16)).toShort.toByte)
private def completeString(str: String): String =
if(str.length >= 4) str else completeString("0" + str)
} | Moydco/AlwaysResolveDNS | src/main/scala/models/IPv6AddressHost.scala | Scala | apache-2.0 | 3,443 |
/* Title: Pure/PIDE/document.scala
Author: Makarius
Document as collection of named nodes, each consisting of an editable
list of commands, associated with asynchronous execution process.
*/
package isabelle
import scala.collection.mutable
object Document
{
/** document structure **/
/* overlays -- print functions with arguments */
object Overlays
{
val empty = new Overlays(Map.empty)
}
final class Overlays private(rep: Map[Node.Name, Node.Overlays])
{
def apply(name: Document.Node.Name): Node.Overlays =
rep.getOrElse(name, Node.Overlays.empty)
private def update(name: Node.Name, f: Node.Overlays => Node.Overlays): Overlays =
{
val node_overlays = f(apply(name))
new Overlays(if (node_overlays.is_empty) rep - name else rep + (name -> node_overlays))
}
def insert(command: Command, fn: String, args: List[String]): Overlays =
update(command.node_name, _.insert(command, fn, args))
def remove(command: Command, fn: String, args: List[String]): Overlays =
update(command.node_name, _.remove(command, fn, args))
override def toString: String = rep.mkString("Overlays(", ",", ")")
}
/* document blobs: auxiliary files */
sealed case class Blob(bytes: Bytes, chunk: Symbol.Text_Chunk, changed: Boolean)
{
def unchanged: Blob = copy(changed = false)
}
object Blobs
{
def apply(blobs: Map[Node.Name, Blob]): Blobs = new Blobs(blobs)
val empty: Blobs = apply(Map.empty)
}
final class Blobs private(blobs: Map[Node.Name, Blob])
{
def get(name: Node.Name): Option[Blob] = blobs.get(name)
def changed(name: Node.Name): Boolean =
get(name) match {
case Some(blob) => blob.changed
case None => false
}
override def toString: String = blobs.mkString("Blobs(", ",", ")")
}
/* document nodes: theories and auxiliary files */
type Edit[A, B] = (Node.Name, Node.Edit[A, B])
type Edit_Text = Edit[Text.Edit, Text.Perspective]
type Edit_Command = Edit[Command.Edit, Command.Perspective]
object Node
{
/* header and name */
sealed case class Header(
imports: List[Name],
keywords: Thy_Header.Keywords,
errors: List[String])
{
def error(msg: String): Header = copy(errors = errors ::: List(msg))
def cat_errors(msg2: String): Header =
copy(errors = errors.map(msg1 => Library.cat_message(msg1, msg2)))
}
val no_header = Header(Nil, Nil, Nil)
def bad_header(msg: String): Header = Header(Nil, Nil, List(msg))
object Name
{
val empty = Name("")
object Ordering extends scala.math.Ordering[Name]
{
def compare(name1: Name, name2: Name): Int = name1.node compare name2.node
}
}
sealed case class Name(node: String, master_dir: String = "", theory: String = "")
{
override def hashCode: Int = node.hashCode
override def equals(that: Any): Boolean =
that match {
case other: Name => node == other.node
case _ => false
}
def is_theory: Boolean = !theory.isEmpty
override def toString: String = if (is_theory) theory else node
def map(f: String => String): Name = copy(f(node), f(master_dir), theory)
}
/* node overlays */
object Overlays
{
val empty = new Overlays(Multi_Map.empty)
}
final class Overlays private(rep: Multi_Map[Command, (String, List[String])])
{
def commands: Set[Command] = rep.keySet
def is_empty: Boolean = rep.isEmpty
def dest: List[(Command, (String, List[String]))] = rep.iterator.toList
def insert(cmd: Command, fn: String, args: List[String]): Overlays =
new Overlays(rep.insert(cmd, (fn, args)))
def remove(cmd: Command, fn: String, args: List[String]): Overlays =
new Overlays(rep.remove(cmd, (fn, args)))
override def toString: String = rep.mkString("Node.Overlays(", ",", ")")
}
/* edits */
sealed abstract class Edit[A, B]
{
def foreach(f: A => Unit)
{
this match {
case Edits(es) => es.foreach(f)
case _ =>
}
}
def is_void: Boolean =
this match {
case Edits(Nil) => true
case _ => false
}
}
case class Clear[A, B]() extends Edit[A, B]
case class Blob[A, B](blob: Document.Blob) extends Edit[A, B]
case class Edits[A, B](edits: List[A]) extends Edit[A, B]
case class Deps[A, B](header: Header) extends Edit[A, B]
case class Perspective[A, B](required: Boolean, visible: B, overlays: Overlays) extends Edit[A, B]
/* perspective */
type Perspective_Text = Perspective[Text.Edit, Text.Perspective]
type Perspective_Command = Perspective[Command.Edit, Command.Perspective]
val no_perspective_text: Perspective_Text =
Perspective(false, Text.Perspective.empty, Overlays.empty)
val no_perspective_command: Perspective_Command =
Perspective(false, Command.Perspective.empty, Overlays.empty)
def is_no_perspective_command(perspective: Perspective_Command): Boolean =
!perspective.required &&
perspective.visible.is_empty &&
perspective.overlays.is_empty
/* commands */
object Commands
{
def apply(commands: Linear_Set[Command]): Commands = new Commands(commands)
val empty: Commands = apply(Linear_Set.empty)
def starts(commands: Iterator[Command], offset: Text.Offset = 0)
: Iterator[(Command, Text.Offset)] =
{
var i = offset
for (command <- commands) yield {
val start = i
i += command.length
(command, start)
}
}
private val block_size = 256
}
final class Commands private(val commands: Linear_Set[Command])
{
lazy val load_commands: List[Command] =
commands.iterator.filter(cmd => !cmd.blobs.isEmpty).toList
private lazy val full_index: (Array[(Command, Text.Offset)], Text.Range) =
{
val blocks = new mutable.ListBuffer[(Command, Text.Offset)]
var next_block = 0
var last_stop = 0
for ((command, start) <- Commands.starts(commands.iterator)) {
last_stop = start + command.length
while (last_stop + 1 > next_block) {
blocks += (command -> start)
next_block += Commands.block_size
}
}
(blocks.toArray, Text.Range(0, last_stop))
}
private def full_range: Text.Range = full_index._2
def iterator(i: Text.Offset = 0): Iterator[(Command, Text.Offset)] =
{
if (!commands.isEmpty && full_range.contains(i)) {
val (cmd0, start0) = full_index._1(i / Commands.block_size)
Node.Commands.starts(commands.iterator(cmd0), start0) dropWhile {
case (cmd, start) => start + cmd.length <= i }
}
else Iterator.empty
}
}
val empty: Node = new Node()
}
final class Node private(
val get_blob: Option[Document.Blob] = None,
val header: Node.Header = Node.no_header,
val perspective: Node.Perspective_Command = Node.no_perspective_command,
_commands: Node.Commands = Node.Commands.empty)
{
def is_empty: Boolean =
get_blob.isEmpty &&
header == Node.no_header &&
Node.is_no_perspective_command(perspective) &&
commands.isEmpty
def commands: Linear_Set[Command] = _commands.commands
def load_commands: List[Command] = _commands.load_commands
def clear: Node = new Node(header = header)
def init_blob(blob: Document.Blob): Node = new Node(Some(blob.unchanged))
def update_header(new_header: Node.Header): Node =
new Node(get_blob, new_header, perspective, _commands)
def update_perspective(new_perspective: Node.Perspective_Command): Node =
new Node(get_blob, header, new_perspective, _commands)
def same_perspective(other_perspective: Node.Perspective_Command): Boolean =
perspective.required == other_perspective.required &&
perspective.visible.same(other_perspective.visible) &&
perspective.overlays == other_perspective.overlays
def update_commands(new_commands: Linear_Set[Command]): Node =
if (new_commands eq _commands.commands) this
else new Node(get_blob, header, perspective, Node.Commands(new_commands))
def command_iterator(i: Text.Offset = 0): Iterator[(Command, Text.Offset)] =
_commands.iterator(i)
def command_iterator(range: Text.Range): Iterator[(Command, Text.Offset)] =
command_iterator(range.start) takeWhile { case (_, start) => start < range.stop }
def command_start(cmd: Command): Option[Text.Offset] =
Node.Commands.starts(commands.iterator).find(_._1 == cmd).map(_._2)
}
/* development graph */
object Nodes
{
val empty: Nodes = new Nodes(Graph.empty(Node.Name.Ordering))
}
final class Nodes private(graph: Graph[Node.Name, Node])
{
def apply(name: Node.Name): Node =
graph.default_node(name, Node.empty).get_node(name)
def is_hidden(name: Node.Name): Boolean =
{
val graph1 = graph.default_node(name, Node.empty)
graph1.is_maximal(name) && graph1.get_node(name).is_empty
}
def + (entry: (Node.Name, Node)): Nodes =
{
val (name, node) = entry
val imports = node.header.imports
val graph1 =
(graph.default_node(name, Node.empty) /: imports)((g, p) => g.default_node(p, Node.empty))
val graph2 = (graph1 /: graph1.imm_preds(name))((g, dep) => g.del_edge(dep, name))
val graph3 = (graph2 /: imports)((g, dep) => g.add_edge(dep, name))
new Nodes(
if (graph3.is_maximal(name) && node.is_empty) graph3.del_node(name)
else graph3.map_node(name, _ => node)
)
}
def iterator: Iterator[(Node.Name, Node)] =
graph.iterator.map({ case (name, (node, _)) => (name, node) })
def load_commands(file_name: Node.Name): List[Command] =
(for {
(_, node) <- iterator
cmd <- node.load_commands.iterator
name <- cmd.blobs_names.iterator
if name == file_name
} yield cmd).toList
def descendants(names: List[Node.Name]): List[Node.Name] = graph.all_succs(names)
def topological_order: List[Node.Name] = graph.topological_order
override def toString: String = topological_order.mkString("Nodes(", ",", ")")
}
/** versioning **/
/* particular document versions */
object Version
{
val init: Version = new Version()
def make(syntax: Option[Prover.Syntax], nodes: Nodes): Version =
new Version(Document_ID.make(), syntax, nodes)
}
final class Version private(
val id: Document_ID.Version = Document_ID.none,
val syntax: Option[Prover.Syntax] = None,
val nodes: Nodes = Nodes.empty)
{
override def toString: String = "Version(" + id + ")"
}
/* changes of plain text, eventually resulting in document edits */
object Change
{
val init: Change = new Change()
def make(previous: Future[Version], edits: List[Edit_Text], version: Future[Version]): Change =
new Change(Some(previous), edits.reverse, version)
}
final class Change private(
val previous: Option[Future[Version]] = Some(Future.value(Version.init)),
val rev_edits: List[Edit_Text] = Nil,
val version: Future[Version] = Future.value(Version.init))
{
def is_finished: Boolean =
(previous match { case None => true case Some(future) => future.is_finished }) &&
version.is_finished
def truncate: Change = new Change(None, Nil, version)
}
/* history navigation */
object History
{
val init: History = new History()
}
final class History private(
val undo_list: List[Change] = List(Change.init)) // non-empty list
{
def tip: Change = undo_list.head
def + (change: Change): History = new History(change :: undo_list)
def prune(check: Change => Boolean, retain: Int): Option[(List[Change], History)] =
{
val n = undo_list.iterator.zipWithIndex.find(p => check(p._1)).get._2 + 1
val (retained, dropped) = undo_list.splitAt(n max retain)
retained.splitAt(retained.length - 1) match {
case (prefix, List(last)) => Some(dropped, new History(prefix ::: List(last.truncate)))
case _ => None
}
}
}
/* snapshot */
object Snapshot
{
val init = State.init.snapshot()
}
abstract class Snapshot
{
val state: State
val version: Version
val is_outdated: Boolean
def convert(i: Text.Offset): Text.Offset
def revert(i: Text.Offset): Text.Offset
def convert(range: Text.Range): Text.Range
def revert(range: Text.Range): Text.Range
val node_name: Node.Name
val node: Node
val load_commands: List[Command]
def is_loaded: Boolean
def eq_content(other: Snapshot): Boolean
def cumulate[A](
range: Text.Range,
info: A,
elements: Markup.Elements,
result: List[Command.State] => (A, Text.Markup) => Option[A],
status: Boolean = false): List[Text.Info[A]]
def select[A](
range: Text.Range,
elements: Markup.Elements,
result: List[Command.State] => Text.Markup => Option[A],
status: Boolean = false): List[Text.Info[A]]
}
/** global state -- document structure, execution process, editing history **/
type Assign_Update =
List[(Document_ID.Command, List[Document_ID.Exec])] // update of exec state assignment
object State
{
class Fail(state: State) extends Exception
object Assignment
{
val init: Assignment = new Assignment()
}
final class Assignment private(
val command_execs: Map[Document_ID.Command, List[Document_ID.Exec]] = Map.empty,
val is_finished: Boolean = false)
{
def check_finished: Assignment = { require(is_finished); this }
def unfinished: Assignment = new Assignment(command_execs, false)
def assign(update: Assign_Update): Assignment =
{
require(!is_finished)
val command_execs1 =
(command_execs /: update) {
case (res, (command_id, exec_ids)) =>
if (exec_ids.isEmpty) res - command_id
else res + (command_id -> exec_ids)
}
new Assignment(command_execs1, true)
}
}
val init: State =
State().define_version(Version.init, Assignment.init).assign(Version.init.id, Nil)._2
}
final case class State private(
/*reachable versions*/
val versions: Map[Document_ID.Version, Version] = Map.empty,
/*inlined auxiliary files*/
val blobs: Set[SHA1.Digest] = Set.empty,
/*static markup from define_command*/
val commands: Map[Document_ID.Command, Command.State] = Map.empty,
/*dynamic markup from execution*/
val execs: Map[Document_ID.Exec, Command.State] = Map.empty,
/*command-exec assignment for each version*/
val assignments: Map[Document_ID.Version, State.Assignment] = Map.empty,
/*commands with markup produced by other commands (imm_succs)*/
val commands_redirection: Graph[Document_ID.Command, Unit] = Graph.long,
/*explicit (linear) history*/
val history: History = History.init,
/*intermediate state between remove_versions/removed_versions*/
val removing_versions: Boolean = false)
{
private def fail[A]: A = throw new State.Fail(this)
def define_version(version: Version, assignment: State.Assignment): State =
{
val id = version.id
copy(versions = versions + (id -> version),
assignments = assignments + (id -> assignment.unfinished))
}
def define_blob(digest: SHA1.Digest): State = copy(blobs = blobs + digest)
def defined_blob(digest: SHA1.Digest): Boolean = blobs.contains(digest)
def define_command(command: Command): State =
{
val id = command.id
copy(commands = commands + (id -> command.init_state))
}
def defined_command(id: Document_ID.Command): Boolean = commands.isDefinedAt(id)
def find_command(version: Version, id: Document_ID.Generic): Option[(Node, Command)] =
commands.get(id) orElse execs.get(id) match {
case None => None
case Some(st) =>
val command = st.command
val node = version.nodes(command.node_name)
if (node.commands.contains(command)) Some((node, command)) else None
}
def the_version(id: Document_ID.Version): Version = versions.getOrElse(id, fail)
def the_static_state(id: Document_ID.Command): Command.State = commands.getOrElse(id, fail)
def the_dynamic_state(id: Document_ID.Exec): Command.State = execs.getOrElse(id, fail)
def the_assignment(version: Version): State.Assignment = assignments.getOrElse(version.id, fail)
private def self_id(st: Command.State)(id: Document_ID.Generic): Boolean =
id == st.command.id ||
(execs.get(id) match { case Some(st1) => st1.command.id == st.command.id case None => false })
private def other_id(id: Document_ID.Generic)
: Option[(Symbol.Text_Chunk.Id, Symbol.Text_Chunk)] = None
/* FIXME
(execs.get(id) orElse commands.get(id)).map(st =>
((Symbol.Text_Chunk.Id(st.command.id), st.command.chunk)))
*/
private def redirection(st: Command.State): Graph[Document_ID.Command, Unit] =
(commands_redirection /: st.markups.redirection_iterator)({ case (graph, id) =>
graph.default_node(id, ()).default_node(st.command.id, ()).add_edge(id, st.command.id) })
def accumulate(id: Document_ID.Generic, message: XML.Elem): (Command.State, State) =
{
execs.get(id) match {
case Some(st) =>
val new_st = st.accumulate(self_id(st), other_id _, message)
val execs1 = execs + (id -> new_st)
(new_st, copy(execs = execs1, commands_redirection = redirection(new_st)))
case None =>
commands.get(id) match {
case Some(st) =>
val new_st = st.accumulate(self_id(st), other_id _, message)
val commands1 = commands + (id -> new_st)
(new_st, copy(commands = commands1, commands_redirection = redirection(new_st)))
case None => fail
}
}
}
def assign(id: Document_ID.Version, update: Assign_Update): (List[Command], State) =
{
val version = the_version(id)
def upd(exec_id: Document_ID.Exec, st: Command.State)
: Option[(Document_ID.Exec, Command.State)] =
if (execs.isDefinedAt(exec_id)) None else Some(exec_id -> st)
val (changed_commands, new_execs) =
((Nil: List[Command], execs) /: update) {
case ((commands1, execs1), (command_id, exec)) =>
val st = the_static_state(command_id)
val command = st.command
val commands2 = command :: commands1
val execs2 =
exec match {
case Nil => execs1
case eval_id :: print_ids =>
execs1 ++ upd(eval_id, st) ++
(for (id <- print_ids; up <- upd(id, command.empty_state)) yield up)
}
(commands2, execs2)
}
val new_assignment = the_assignment(version).assign(update)
val new_state = copy(assignments = assignments + (id -> new_assignment), execs = new_execs)
(changed_commands, new_state)
}
def is_assigned(version: Version): Boolean =
assignments.get(version.id) match {
case Some(assgn) => assgn.is_finished
case None => false
}
def is_stable(change: Change): Boolean =
change.is_finished && is_assigned(change.version.get_finished)
def recent_finished: Change = history.undo_list.find(_.is_finished) getOrElse fail
def recent_stable: Change = history.undo_list.find(is_stable) getOrElse fail
def tip_stable: Boolean = is_stable(history.tip)
def tip_version: Version = history.tip.version.get_finished
def continue_history(
previous: Future[Version],
edits: List[Edit_Text],
version: Future[Version]): State =
{
val change = Change.make(previous, edits, version)
copy(history = history + change)
}
def remove_versions(retain: Int = 0): (List[Version], State) =
{
history.prune(is_stable, retain) match {
case Some((dropped, history1)) =>
val old_versions = dropped.map(change => change.version.get_finished)
val removing = !old_versions.isEmpty
val state1 = copy(history = history1, removing_versions = removing)
(old_versions, state1)
case None => fail
}
}
def removed_versions(removed: List[Document_ID.Version]): State =
{
val versions1 = versions -- removed
val assignments1 = assignments -- removed
var blobs1 = Set.empty[SHA1.Digest]
var commands1 = Map.empty[Document_ID.Command, Command.State]
var execs1 = Map.empty[Document_ID.Exec, Command.State]
for {
(version_id, version) <- versions1.iterator
command_execs = assignments1(version_id).command_execs
(_, node) <- version.nodes.iterator
command <- node.commands.iterator
} {
for ((_, digest) <- command.blobs_defined; if !blobs1.contains(digest))
blobs1 += digest
if (!commands1.isDefinedAt(command.id))
commands.get(command.id).foreach(st => commands1 += (command.id -> st))
for (exec_id <- command_execs.getOrElse(command.id, Nil)) {
if (!execs1.isDefinedAt(exec_id))
execs.get(exec_id).foreach(st => execs1 += (exec_id -> st))
}
}
copy(
versions = versions1,
blobs = blobs1,
commands = commands1,
execs = execs1,
commands_redirection = commands_redirection.restrict(commands1.isDefinedAt(_)),
assignments = assignments1,
removing_versions = false)
}
private def command_states_self(version: Version, command: Command)
: List[(Document_ID.Generic, Command.State)] =
{
require(is_assigned(version))
try {
the_assignment(version).check_finished.command_execs.getOrElse(command.id, Nil)
.map(id => id -> the_dynamic_state(id)) match {
case Nil => fail
case res => res
}
}
catch {
case _: State.Fail =>
try { List(command.id -> the_static_state(command.id)) }
catch { case _: State.Fail => List(command.id -> command.init_state) }
}
}
def command_states(version: Version, command: Command): List[Command.State] =
{
val self = command_states_self(version, command)
val others =
if (commands_redirection.defined(command.id)) {
(for {
command_id <- commands_redirection.imm_succs(command.id).iterator
(id, st) <- command_states_self(version, the_static_state(command_id).command)
if !self.exists(_._1 == id)
} yield (id, st)).toMap.valuesIterator.toList
}
else Nil
self.map(_._2) ::: others.flatMap(_.redirect(command))
}
def command_results(version: Version, command: Command): Command.Results =
Command.State.merge_results(command_states(version, command))
def command_markup(version: Version, command: Command, index: Command.Markup_Index,
range: Text.Range, elements: Markup.Elements): Markup_Tree =
Command.State.merge_markup(command_states(version, command), index, range, elements)
def markup_to_XML(version: Version, node: Node, elements: Markup.Elements): XML.Body =
(for {
command <- node.commands.iterator
markup =
command_markup(version, command, Command.Markup_Index.markup, command.range, elements)
tree <- markup.to_XML(command.range, command.source, elements)
} yield tree).toList
// persistent user-view
def snapshot(name: Node.Name = Node.Name.empty, pending_edits: List[Text.Edit] = Nil)
: Snapshot =
{
val stable = recent_stable
val latest = history.tip
/* pending edits and unstable changes */
val rev_pending_changes =
for {
change <- history.undo_list.takeWhile(_ != stable)
(a, edits) <- change.rev_edits
if a == name
} yield edits
val is_cleared = rev_pending_changes.exists({ case Node.Clear() => true case _ => false })
val edits =
if (is_cleared) Nil
else
(pending_edits /: rev_pending_changes)({
case (edits, Node.Edits(es)) => es ::: edits
case (edits, _) => edits
})
lazy val reverse_edits = edits.reverse
new Snapshot
{
/* global information */
val state = State.this
val version = stable.version.get_finished
val is_outdated = !(pending_edits.isEmpty && latest == stable)
/* local node content */
def convert(offset: Text.Offset) =
if (is_cleared) 0 else (offset /: edits)((i, edit) => edit.convert(i))
def revert(offset: Text.Offset) =
if (is_cleared) 0 else (offset /: reverse_edits)((i, edit) => edit.revert(i))
def convert(range: Text.Range) = range.map(convert(_))
def revert(range: Text.Range) = range.map(revert(_))
val node_name = name
val node = version.nodes(name)
val load_commands: List[Command] =
if (node_name.is_theory) Nil
else version.nodes.load_commands(node_name)
val is_loaded: Boolean = node_name.is_theory || !load_commands.isEmpty
def eq_content(other: Snapshot): Boolean =
{
def eq_commands(commands: (Command, Command)): Boolean =
{
val states1 = state.command_states(version, commands._1)
val states2 = other.state.command_states(other.version, commands._2)
states1.length == states2.length &&
(states1 zip states2).forall({ case (st1, st2) => st1 eq_content st2 })
}
!is_outdated && !other.is_outdated &&
node.commands.size == other.node.commands.size &&
(node.commands.iterator zip other.node.commands.iterator).forall(eq_commands) &&
load_commands.length == other.load_commands.length &&
(load_commands zip other.load_commands).forall(eq_commands)
}
/* cumulate markup */
def cumulate[A](
range: Text.Range,
info: A,
elements: Markup.Elements,
result: List[Command.State] => (A, Text.Markup) => Option[A],
status: Boolean = false): List[Text.Info[A]] =
{
val former_range = revert(range).inflate_singularity
val (chunk_name, command_iterator) =
load_commands match {
case command :: _ => (Symbol.Text_Chunk.File(node_name.node), Iterator((command, 0)))
case _ => (Symbol.Text_Chunk.Default, node.command_iterator(former_range))
}
val markup_index = Command.Markup_Index(status, chunk_name)
(for {
(command, command_start) <- command_iterator
chunk <- command.chunks.get(chunk_name).iterator
states = state.command_states(version, command)
res = result(states)
markup_range <- (former_range - command_start).try_restrict(chunk.range).iterator
markup = Command.State.merge_markup(states, markup_index, markup_range, elements)
Text.Info(r0, a) <- markup.cumulate[A](markup_range, info, elements,
{
case (a, Text.Info(r0, b)) => res(a, Text.Info(convert(r0 + command_start), b))
}).iterator
r1 <- convert(r0 + command_start).try_restrict(range).iterator
} yield Text.Info(r1, a)).toList
}
def select[A](
range: Text.Range,
elements: Markup.Elements,
result: List[Command.State] => Text.Markup => Option[A],
status: Boolean = false): List[Text.Info[A]] =
{
def result1(states: List[Command.State]): (Option[A], Text.Markup) => Option[Option[A]] =
{
val res = result(states)
(_: Option[A], x: Text.Markup) =>
res(x) match {
case None => None
case some => Some(some)
}
}
for (Text.Info(r, Some(x)) <- cumulate(range, None, elements, result1 _, status))
yield Text.Info(r, x)
}
/* output */
override def toString: String =
"Snapshot(node = " + node_name.node + ", version = " + version.id +
(if (is_outdated) ", outdated" else "") + ")"
}
}
}
}
| wneuper/libisabelle | pide/2014/src/main/scala/PIDE/document.scala | Scala | mit | 28,724 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import java.nio._
import scala.math._
import kafka.utils._
/**
* Constants related to messages
*/
object Message {
/**
* The current offset and size for all the fixed-length fields
*/
val CrcOffset = 0
val CrcLength = 4
val MagicOffset = CrcOffset + CrcLength
val MagicLength = 1
val AttributesOffset = MagicOffset + MagicLength
val AttributesLength = 1
val KeySizeOffset = AttributesOffset + AttributesLength
val KeySizeLength = 4
val KeyOffset = KeySizeOffset + KeySizeLength
val ValueSizeLength = 4
/** The amount of overhead bytes in a message */
val MessageOverhead = KeyOffset + ValueSizeLength
/**
* The minimum valid size for the message header
*/
val MinHeaderSize = CrcLength + MagicLength + AttributesLength + KeySizeLength + ValueSizeLength
/**
* The current "magic" value
*/
val CurrentMagicValue: Byte = 0
/**
* Specifies the mask for the compression code. 3 bits to hold the compression codec.
* 0 is reserved to indicate no compression
*/
val CompressionCodeMask: Int = 0x07
/**
* Compression code for uncompressed messages
*/
val NoCompression: Int = 0
}
/**
* A message. The format of an N byte message is the following:
*
* 1. 4 byte CRC32 of the message
* 2. 1 byte "magic" identifier to allow format changes, value is 2 currently
* 3. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used)
* 4. 4 byte key length, containing length K
* 5. K byte key
* 6. 4 byte payload length, containing length V
* 7. V byte payload
*
* Default constructor wraps an existing ByteBuffer with the Message object with no change to the contents.
*/
class Message(val buffer: ByteBuffer) {
import kafka.message.Message._
/**
* A constructor to create a Message
* @param bytes The payload of the message
* @param compressionCodec The compression codec used on the contents of the message (if any)
* @param key The key of the message (null, if none)
* @param payloadOffset The offset into the payload array used to extract payload
* @param payloadSize The size of the payload to use
*/
def this(bytes: Array[Byte],
key: Array[Byte],
codec: CompressionCodec,
payloadOffset: Int,
payloadSize: Int) = {
this(ByteBuffer.allocate(Message.CrcLength +
Message.MagicLength +
Message.AttributesLength +
Message.KeySizeLength +
(if(key == null) 0 else key.length) +
Message.ValueSizeLength +
(if(bytes == null) 0
else if(payloadSize >= 0) payloadSize
else bytes.length - payloadOffset)))
// skip crc, we will fill that in at the end
buffer.position(MagicOffset)
buffer.put(CurrentMagicValue)
var attributes: Byte = 0
if (codec.codec > 0)
attributes = (attributes | (CompressionCodeMask & codec.codec)).toByte
buffer.put(attributes)
if(key == null) {
buffer.putInt(-1)
} else {
buffer.putInt(key.length)
buffer.put(key, 0, key.length)
}
val size = if(bytes == null) -1
else if(payloadSize >= 0) payloadSize
else bytes.length - payloadOffset
buffer.putInt(size)
if(bytes != null)
buffer.put(bytes, payloadOffset, size)
buffer.rewind()
// now compute the checksum and fill it in
Utils.writeUnsignedInt(buffer, CrcOffset, computeChecksum)
}
def this(bytes: Array[Byte], key: Array[Byte], codec: CompressionCodec) =
this(bytes = bytes, key = key, codec = codec, payloadOffset = 0, payloadSize = -1)
def this(bytes: Array[Byte], codec: CompressionCodec) =
this(bytes = bytes, key = null, codec = codec)
def this(bytes: Array[Byte], key: Array[Byte]) =
this(bytes = bytes, key = key, codec = NoCompressionCodec)
def this(bytes: Array[Byte]) =
this(bytes = bytes, key = null, codec = NoCompressionCodec)
/**
* Compute the checksum of the message from the message contents
*/
def computeChecksum(): Long =
Utils.crc32(buffer.array, buffer.arrayOffset + MagicOffset, buffer.limit - MagicOffset)
/**
* Retrieve the previously computed CRC for this message
*/
def checksum: Long = Utils.readUnsignedInt(buffer, CrcOffset)
/**
* Returns true if the crc stored with the message matches the crc computed off the message contents
*/
def isValid: Boolean = checksum == computeChecksum
/**
* Throw an InvalidMessageException if isValid is false for this message
*/
def ensureValid() {
if(!isValid)
throw new InvalidMessageException("Message is corrupt (stored crc = " + checksum + ", computed crc = " + computeChecksum() + ")")
}
/**
* The complete serialized size of this message in bytes (including crc, header attributes, etc)
*/
def size: Int = buffer.limit
/**
* The length of the key in bytes
*/
def keySize: Int = buffer.getInt(Message.KeySizeOffset)
/**
* Does the message have a key?
*/
def hasKey: Boolean = keySize >= 0
/**
* The position where the payload size is stored
*/
private def payloadSizeOffset = Message.KeyOffset + max(0, keySize)
/**
* The length of the message value in bytes
*/
def payloadSize: Int = buffer.getInt(payloadSizeOffset)
/**
* Is the payload of this message null
*/
def isNull(): Boolean = payloadSize < 0
/**
* The magic version of this message
*/
def magic: Byte = buffer.get(MagicOffset)
/**
* The attributes stored with this message
*/
def attributes: Byte = buffer.get(AttributesOffset)
/**
* The compression codec used with this message
*/
def compressionCodec: CompressionCodec =
CompressionCodec.getCompressionCodec(buffer.get(AttributesOffset) & CompressionCodeMask)
/**
* A ByteBuffer containing the content of the message
*/
def payload: ByteBuffer = sliceDelimited(payloadSizeOffset)
/**
* A ByteBuffer containing the message key
*/
def key: ByteBuffer = sliceDelimited(KeySizeOffset)
/**
* Read a size-delimited byte buffer starting at the given offset
*/
private def sliceDelimited(start: Int): ByteBuffer = {
val size = buffer.getInt(start)
if(size < 0) {
null
} else {
var b = buffer.duplicate
b.position(start + 4)
b = b.slice()
b.limit(size)
b.rewind
b
}
}
override def toString(): String =
"Message(magic = %d, attributes = %d, crc = %d, key = %s, payload = %s)".format(magic, attributes, checksum, key, payload)
override def equals(any: Any): Boolean = {
any match {
case that: Message => this.buffer.equals(that.buffer)
case _ => false
}
}
override def hashCode(): Int = buffer.hashCode
}
| Parth-Brahmbhatt/kafka | core/src/main/scala/kafka/message/Message.scala | Scala | apache-2.0 | 7,886 |
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
package ducttape.syntax
import ducttape.util.AbstractTest
import ducttape.util.Files
import ducttape.syntax.GrammarParser.Parser
import java.io.File
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.collection.mutable.Set
import scala.io.Source
@RunWith(classOf[JUnitRunner])
class BlocksTest extends AbstractTest("blocks", Grammar.elements(new File("tutorial"))) {
def successCases = {
val tutorialDir = new File("tutorial")
val set: Set[String] = Set.empty[String]
Files.ls(tutorialDir).filter(_.getName.endsWith(".tape")).foreach(tapeFile => {
val source = Source.fromFile(tapeFile)
set.add(source.mkString)
source.close()
})
if (set.isEmpty)
fail("No tutorial files found in " + tutorialDir.getAbsolutePath)
set
}
def failureCases = Set(
)
def errorCases = Set(
// " ",
//"""[funky] < in=foo > out
// bar {
// function die () {
// echo "$@" >&2
// exit 1
// }
//
// # Now do it!
// die()
//}""" ,
// """[hello] {
// echo "hello, world!"
// }""",
//"""[hello] {
// echo "hello, world!"
//} yay""",
//"""[hello] { // Comments are not allowed after opening { braces
// echo "hello, world!"
//}""",
//"""[hello] { # Comments are not allowed after opening { braces
// echo "hello, world!"
//}""",
// "A-variable_Name__",
// "",
// "> x y_txt",
// "< a=/etc/passwd b=/etc/hosts",
// "> x",
// "< a=$x@first > x",
// "tokenizer < in=(DataSet: train=a.txt tune=b.txt test=c.txt) > out",
// "< in=$out@tokenize[DataSet:train] > model",
// "< in=$out@tokenize[DataSet:tune] > weights",
// "moses tokenizerr giza < in=$out@tokenize[DataSet:test] > hyps",
// """moses tokenizerr giza
// < in=$out@tokenize[DataSet:test] > hyps""",
// """moses tokenizerr giza
// // Do some inputs
// < in=$out@tokenize[DataSet:test]
// // Here's the result
// > hyps""" ,
// """// Package comments
// moses tokenizerr giza
// // Do some inputs
// < in=$out@tokenize[DataSet:test]
// // Here's the result
// > hyps"""
)
}
| jhclark/ducttape | src/test/scala/ducttape/syntax/BlocksTest.scala | Scala | mpl-2.0 | 2,378 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.TypeCheckFailure
import org.apache.spark.sql.types._
/**
* Represent the session window.
*
* @param timeColumn the start time of session window
* @param gapDuration the duration of session gap, meaning the session will close if there is
* no new element appeared within "the last element in session + gap".
*/
case class SessionWindow(timeColumn: Expression, gapDuration: Long) extends UnaryExpression
with ImplicitCastInputTypes
with Unevaluable
with NonSQLExpression {
//////////////////////////
// SQL Constructors
//////////////////////////
def this(timeColumn: Expression, gapDuration: Expression) = {
this(timeColumn, TimeWindow.parseExpression(gapDuration))
}
override def child: Expression = timeColumn
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType)
override def dataType: DataType = new StructType()
.add(StructField("start", TimestampType))
.add(StructField("end", TimestampType))
// This expression is replaced in the analyzer.
override lazy val resolved = false
/** Validate the inputs for the gap duration in addition to the input data type. */
override def checkInputDataTypes(): TypeCheckResult = {
val dataTypeCheck = super.checkInputDataTypes()
if (dataTypeCheck.isSuccess) {
if (gapDuration <= 0) {
return TypeCheckFailure(s"The window duration ($gapDuration) must be greater than 0.")
}
}
dataTypeCheck
}
override protected def withNewChildInternal(newChild: Expression): Expression =
copy(timeColumn = newChild)
}
object SessionWindow {
val marker = "spark.sessionWindow"
def apply(
timeColumn: Expression,
gapDuration: String): SessionWindow = {
SessionWindow(timeColumn,
TimeWindow.getIntervalInMicroSeconds(gapDuration))
}
}
| hvanhovell/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SessionWindow.scala | Scala | apache-2.0 | 2,794 |
/*
* This file is part of the sohva project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.sohva
import mango._
import spray.json._
import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.settings._
import akka.http.scaladsl.model._
import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.stream.scaladsl._
import akka.stream.{
KillSwitches,
UniqueKillSwitch
}
import akka.util._
import scala.util.{
Try,
Success
}
import scala.concurrent._
import scala.concurrent.duration.Duration
import java.util.concurrent.atomic.AtomicLong
/** A stream that represents a connection to the `_changes` stream of a database.
*
* @author Lucas Satabin
*/
class ChangeStream(database: Database) {
import SohvaProtocol._
import SprayJsonSupport._
import database.couch.system
import database.couch.ec
private def makeSince(old: Option[Either[String, JsValue]]) = old match {
case Some(Left("now")) => Now
case Some(Left(_)) => Origin
case Some(Right(js)) => UpdateSequence(js)
case None => Origin
}
/** Returns a one-shot view of changes for this database. */
@deprecated("Use the `current` stream instead.", "Sohva 2.2.0")
def once(
docIds: Iterable[String] = Vector.empty[String],
conflicts: Boolean = false,
descending: Boolean = false,
filter: Option[String] = None,
selector: Option[Selector] = None,
designOnly: Boolean = false,
includeDocs: Boolean = false,
attachments: Boolean = false,
attEncodingInfo: Boolean = false,
lastEventId: Option[Int] = None,
limit: Option[Int] = None,
since: Option[Either[String, JsValue]] = None,
style: Option[String] = None,
view: Option[String] = None): Future[Changes] =
current(
docIds = docIds,
conflicts = conflicts,
descending = descending,
filter = filter,
selector = selector,
designOnly = designOnly,
includeDocs = includeDocs,
attachments = attachments,
attEncodingInfo = attEncodingInfo,
lastEventId = lastEventId,
limit = limit,
since = makeSince(since),
style = style,
view = view)
/** Returns a one-shot view of changes for this database. */
def current(
docIds: Iterable[String] = Vector.empty[String],
conflicts: Boolean = false,
descending: Boolean = false,
filter: Option[String] = None,
selector: Option[Selector] = None,
designOnly: Boolean = false,
includeDocs: Boolean = false,
attachments: Boolean = false,
attEncodingInfo: Boolean = false,
lastEventId: Option[Int] = None,
limit: Option[Int] = None,
since: Since = Origin,
style: Option[String] = None,
view: Option[String] = None): Future[Changes] = {
val parameters = List(
if (conflicts) Some("conflicts" -> "true") else None,
if (descending) Some("descending" -> "true") else None,
filter.map(s => "filter" -> s),
if (includeDocs) Some("include_docs" -> "true") else None,
if (attachments) Some("attachments" -> "true") else None,
if (attEncodingInfo) Some("att_encoding_info" -> "true") else None,
lastEventId.map(n => "last-event-id" -> n.toString),
limit.map(n => "limit" -> n.toString),
since.option.map(s => "since" -> s),
style.map(s => "style" -> s),
view.map(v => "view" -> v)).flatten.toMap
val request = selector match {
case Some(selector) =>
for {
entity <- Marshal(Map("selector" -> selector)).to[RequestEntity]
} yield HttpRequest(HttpMethods.POST, uri = uri <<? parameters.updated("filter", "_selector"), entity = entity)
case None =>
if (docIds.isEmpty)
Future.successful(HttpRequest(uri = uri <<? parameters))
else if (designOnly)
Future.successful(HttpRequest(uri = uri <<? parameters.updated("filter", "_design")))
else
for {
entity <- Marshal(docIds).to[RequestEntity]
} yield HttpRequest(HttpMethods.POST, uri = uri <<? parameters.updated("filter", "_doc_ids"), entity = entity)
}
for {
req <- request
resp <- database.http(req)
} yield resp.convertTo[Changes]
}
/** Returns a continuous stream representing the changes in the database. Each change produces an element in the stream.
* The returned stream can be cancelled using the kill switch returned by materializing it.
* E.g. if you want to log the changes to the console and shut it down after a while, you can write
* {{{
* val stream = db.changes.stream()
* val killSwitch = stream.toMat(Sink.foreach(println _))(Keep.left).run()
* ...
* killSwitch.shutdown()
* }}}
*/
@deprecated("Use the `all` stream instead.", "Sohva 2.2.0")
def stream(
docIds: Iterable[String] = Vector.empty[String],
conflicts: Boolean = false,
descending: Boolean = false,
filter: Option[String] = None,
selector: Option[Selector] = None,
designOnly: Boolean = false,
includeDocs: Boolean = false,
attachments: Boolean = false,
attEncodingInfo: Boolean = false,
lastEventId: Option[Int] = None,
limit: Option[Int] = None,
since: Option[Either[String, JsValue]] = None,
style: Option[String] = None,
view: Option[String] = None): Source[Change, UniqueKillSwitch] =
all(
docIds = docIds,
conflicts = conflicts,
descending = descending,
filter = filter,
selector = selector,
designOnly = designOnly,
includeDocs = includeDocs,
attachments = attachments,
attEncodingInfo = attEncodingInfo,
lastEventId = lastEventId,
limit = limit,
since = makeSince(since),
style = style,
view = view)
/** Returns a continuous stream representing the changes in the database. Each change produces an element in the stream.
* The returned stream can be cancelled using the kill switch returned by materializing it.
* E.g. if you want to log the changes to the console and shut it down after a while, you can write
* {{{
* val stream = db.changes.stream()
* val killSwitch = stream.toMat(Sink.foreach(println _))(Keep.left).run()
* ...
* killSwitch.shutdown()
* }}}
*/
def all(
docIds: Iterable[String] = Vector.empty[String],
conflicts: Boolean = false,
descending: Boolean = false,
filter: Option[String] = None,
selector: Option[Selector] = None,
designOnly: Boolean = false,
includeDocs: Boolean = false,
attachments: Boolean = false,
attEncodingInfo: Boolean = false,
lastEventId: Option[Int] = None,
limit: Option[Int] = None,
since: Since = Origin,
style: Option[String] = None,
view: Option[String] = None): Source[Change, UniqueKillSwitch] = {
val parameters = List(
Some("heartbeat" -> "5000"),
Some("feed" -> "continuous"),
if (conflicts) Some("conflicts" -> "true") else None,
if (descending) Some("descending" -> "true") else None,
filter.map(s => "filter" -> s),
if (includeDocs) Some("include_docs" -> "true") else None,
if (attachments) Some("attachments" -> "true") else None,
if (attEncodingInfo) Some("att_encoding_info" -> "true") else None,
lastEventId.map(n => "last-event-id" -> n.toString),
limit.map(n => "limit" -> n.toString),
since.option.map(s => "since" -> s),
style.map(s => "style" -> s),
view.map(v => "view" -> v)).flatten.toMap
val request = selector match {
case Some(selector) =>
for {
entity <- Marshal(Map("selector" -> selector)).to[RequestEntity]
} yield HttpRequest(HttpMethods.POST, uri = uri <<? parameters.updated("filter", "_selector"), entity = entity)
case None =>
if (docIds.isEmpty)
Future.successful(HttpRequest(uri = uri <<? parameters))
else if (designOnly)
Future.successful(HttpRequest(uri = uri <<? parameters.updated("filter", "_design")))
else
for {
entity <- Marshal(docIds).to[RequestEntity]
} yield HttpRequest(HttpMethods.POST, uri = uri <<? parameters.updated("filter", "_doc_ids"), entity = entity)
}
Source.fromFuture(
for (req <- request)
yield database.couch.prepare(req))
.via(database.couch.connectionFlow)
.flatMapConcat(_.entity.dataBytes)
.via(Framing.delimiter(ByteString("\\n"), Int.MaxValue))
.mapConcat(bs => if (bs.isEmpty) collection.immutable.Seq() else collection.immutable.Seq(JsonParser(bs.utf8String).convertTo[Change]))
.viaMat(KillSwitches.single)(Keep.right)
}
override def toString =
uri.toString
private val uri = database.uri / "_changes"
}
case class Change(seq: JsValue, id: String, changes: Vector[Rev], deleted: Boolean, doc: Option[JsObject])
case class Rev(rev: String)
case class Changes(last_seq: JsValue, pending: Int, results: Vector[Change])
| gnieh/sohva | src/main/scala/gnieh/sohva/ChangeStream.scala | Scala | apache-2.0 | 9,525 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher
import java.io.{File, IOException}
import java.net.URLClassLoader
import java.util.concurrent.atomic.AtomicInteger
import org.apache.commons.io.FileUtils
import org.apache.spark.deploy.SparkSubmit
import org.apache.spark.launcher.CommandBuilderUtils._
import org.apache.spark.launcher.SparkAppHandle.{Listener, State}
import org.apache.spark.launcher.SparkLauncher.{DRIVER_EXTRA_CLASSPATH, EXECUTOR_EXTRA_CLASSPATH}
import org.schedoscope.dsl.transformations.SparkTransformation._
import scala.collection.JavaConversions._
/**
* We have to rig SparkLauncher so that it can launch Spark jobs in a local test mode as well.
* The normal SparkLauncher just starts the spark-submit shell script, which we do not have in a test environment.
*
* In that environment, we just want to start the Java class SparkSubmit (which is called by the spark-submit script)
* in a subprocess directly (SparkSubmitCommandBuilder offers a simple method to do that).
*
* Now this would all be very easy if only the method createBuilder in SparkLauncher wasn't private :-(.
*
* Sorry for this hack.
*/
class SparkSubmitLauncher extends SparkLauncher {
val COUNTER: AtomicInteger = new AtomicInteger()
def setChildEnv(key: String, value: String): SparkSubmitLauncher = {
CommandBuilderUtils.checkNotNull(key, "childEnv key")
CommandBuilderUtils.checkNotNull(value, "childEnv value")
builder.childEnv.put(key, value)
this
}
def setScalaVersion(scalaVersion: String): SparkSubmitLauncher = {
CommandBuilderUtils.checkNotNull(scalaVersion, "scalaVersion")
builder.childEnv.put("SPARK_SCALA_VERSION", scalaVersion)
this
}
def setAssemblyPath(assemblyPath: String): SparkSubmitLauncher = {
CommandBuilderUtils.checkNotNull(assemblyPath, "assemblyPath")
builder.childEnv.put("_SPARK_ASSEMBLY", assemblyPath)
this
}
def addLocalClasspath(): SparkSubmitLauncher = {
val cp = ClassLoader.getSystemClassLoader().asInstanceOf[URLClassLoader].getURLs.map(_.getFile).toList.mkString(File.pathSeparator)
builder.childEnv.put("SPARK_DIST_CLASSPATH",
cp + (
if (builder.conf.containsKey(DRIVER_EXTRA_CLASSPATH))
File.pathSeparator + builder.conf.get(DRIVER_EXTRA_CLASSPATH)
else
""
) + (
if (builder.conf.containsKey(EXECUTOR_EXTRA_CLASSPATH))
File.pathSeparator + builder.conf.get(EXECUTOR_EXTRA_CLASSPATH)
else
""
)
)
builder.conf.remove(DRIVER_EXTRA_CLASSPATH)
builder.conf.remove(EXECUTOR_EXTRA_CLASSPATH)
this
}
def createFakeLibManaged(): SparkSubmitLauncher = {
val dir = new File(builder.getSparkHome, "lib_managed" + File.separator + "jars")
FileUtils.forceMkdir(dir)
this
}
def setLocalTestMode(): SparkSubmitLauncher = {
setSparkHome("target")
setAssemblyPath(jarOf(SparkSubmit))
addLocalClasspath()
createFakeLibManaged()
setScalaVersion("2.11")
setMaster("local")
this
}
def getConf(key: String): String = builder.conf.get(key)
@throws[IOException]
override def startApplication(listeners: Listener*) = {
//
// create LauncherServer app handle
//
val handle = ExitCodeAwareChildProcAppHandle(LauncherServer.newAppHandle())
for (l <- listeners)
handle.addListener(l)
//
// Set logger name
//
val childLoggerName: String =
if (builder.getEffectiveConfig.get(SparkLauncher.CHILD_PROCESS_LOGGER_NAME) != null)
builder.getEffectiveConfig.get(SparkLauncher.CHILD_PROCESS_LOGGER_NAME)
else if (builder.appName != null)
builder.appName
else if (builder.mainClass != null) {
val endOfPackage = builder.mainClass.lastIndexOf(".")
if (endOfPackage >= 0 && endOfPackage < builder.mainClass.length() - 1)
builder.mainClass.substring(endOfPackage + 1, builder.mainClass.length())
else
builder.mainClass
} else if (builder.appResource != null)
new File(builder.appResource).getName
else
s"${COUNTER.incrementAndGet()}"
val fullLoggerName = s"${getClass.getPackage.getName}.app.$childLoggerName"
//
// Build either SparkSubmit or spark-submit call
//
val sparkSubmitCall: List[String] =
if (builder.master.startsWith("local")) {
//
// Local mode => test framework
//
setChildEnv("SPARK_TESTING", "1")
setChildEnv("SPARK_SQL_TESTING", "1")
builder.buildCommand(Map[String, String]()).toList
} else {
//
// Non-local mode => shell script
//
{
List(
join(File.separator, builder.getSparkHome, "bin", if (isWindows) "spark-submit.cmd" else "spark-submit")
) ++ builder.buildSparkSubmitArgs()
}.map { arg =>
if (isWindows) quoteForBatchScript(arg) else arg
}
}
//
// Create subprocess
//
val process = new ProcessBuilder(sparkSubmitCall)
for ((k, v) <- builder.childEnv)
process.environment().put(k, v)
process.environment().put(LauncherProtocol.ENV_LAUNCHER_PORT, String.valueOf(LauncherServer.getServerInstance.getPort))
process.environment().put(LauncherProtocol.ENV_LAUNCHER_SECRET, handle.getSecret)
process.redirectErrorStream(true)
//
// Start process and return LauncherServer app handle to caller
//
try {
handle.setChildProc(process.start(), fullLoggerName)
handle
} catch {
case e: IOException =>
handle.kill()
throw e
}
}
}
/**
* Sad wrapper class required to get access to the process object within the handle for the SparkSubmit
* sub process. Sadly, the default implementation of ChildProcAppHandle does not return a failure when the exit
* code of the SparkSubmit sub process is > 0. And of course everything is private and has limited visibility.
*
* @param childProcAppHandle the wrapped child process handle
*/
case class ExitCodeAwareChildProcAppHandle(childProcAppHandle: ChildProcAppHandle) extends SparkAppHandle {
var childProc: Option[Process] = None
override def stop(): Unit = childProcAppHandle.stop()
override def disconnect(): Unit = childProcAppHandle.disconnect()
override def kill(): Unit = childProcAppHandle.kill()
override def getState: State = childProcAppHandle.getState
override def addListener(listener: Listener): Unit = childProcAppHandle.addListener(listener)
override def getAppId: String = childProcAppHandle.getAppId
def getSecret = childProcAppHandle.getSecret
def setChildProc(childProc: Process, loggerName: String): Unit = {
this.childProc = Some(childProc)
childProcAppHandle.setChildProc(childProc, loggerName)
}
def getExitCode = childProc.flatMap { p =>
try {
Some(p.exitValue())
} catch {
case t: IllegalThreadStateException => None
}
}
} | utzwestermann/schedoscope | schedoscope-transformation-spark/src/main/scala/org/apache/spark/launcher/SparkSubmitLauncher.scala | Scala | apache-2.0 | 7,822 |
package controllers.filters
import javax.inject.Inject
import play.api.mvc.{EssentialAction,EssentialFilter,RequestHeader}
import scala.concurrent.ExecutionContext
/** Adds Strict-Transport-Security to all SSL responses.
*
* https://www.owasp.org/index.php/HTTP_Strict_Transport_Security_Cheat_Sheet
*/
class StrictTransportSecurityFilter @Inject() (implicit ec: ExecutionContext) extends EssentialFilter {
private val Headers = Seq(
"Strict-Transport-Security" -> "max-age=31536000"
// We'd like to includeSubDomains, but for now we're using HTTP on jenkins-ci.overviewdocs.com
)
def apply(nextFilter: EssentialAction) = new EssentialAction {
def apply(requestHeader: RequestHeader) = {
if (requestHeader.secure) {
nextFilter(requestHeader).map(_.withHeaders(Headers: _*))
} else {
nextFilter(requestHeader)
}
}
}
}
| overview/overview-server | web/app/controllers/filters/StrictTransportSecurityFilter.scala | Scala | agpl-3.0 | 884 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.raster.iterators
import org.apache.accumulo.core.client.IteratorSetting
import org.geotools.process.vector.TransformProcess
import org.locationtech.geomesa.accumulo.index.IndexValueEncoder.IndexValueEncoderImpl
import org.locationtech.geomesa.features.SerializationType.SerializationType
import org.locationtech.geomesa.features._
import org.locationtech.geomesa.filter.factory.FastFilterFactory
import org.locationtech.geomesa.raster.iterators.IteratorExtensions._
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
/**
* Defines common iterator functionality in traits that can be mixed-in to iterator implementations
*/
trait IteratorExtensions {
def init(featureType: SimpleFeatureType, options: OptionMap)
}
object IteratorExtensions {
type OptionMap = java.util.Map[String, String]
val ST_FILTER_PROPERTY_NAME = "geomesa.index.filter"
val DEFAULT_CACHE_SIZE_NAME = "geomesa.index.cache-size"
val FEATURE_ENCODING = "geomesa.feature.encoding"
val GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE = "geomesa.iterators.aggregator-types"
val GEOMESA_ITERATORS_SFT_NAME = "geomesa.iterators.sft-name"
val GEOMESA_ITERATORS_SFT_INDEX_VALUE = "geomesa.iterators.sft.index-value-schema"
val GEOMESA_ITERATORS_ECQL_FILTER = "geomesa.iterators.ecql-filter"
val GEOMESA_ITERATORS_TRANSFORM = "geomesa.iterators.transform"
val GEOMESA_ITERATORS_TRANSFORM_SCHEMA = "geomesa.iterators.transform.schema"
val GEOMESA_ITERATORS_IS_DENSITY_TYPE = "geomesa.iterators.is-density-type"
val USER_DATA = ".userdata."
/**
* Copy UserData entries taken from a SimpleFeatureType into an IteratorSetting for later transfer back into
* a SimpleFeatureType
*
* This works around the fact that SimpleFeatureTypes.encodeType ignores the UserData
*
*/
def encodeUserData(cfg: IteratorSetting, userData: java.util.Map[AnyRef,AnyRef], keyPrefix: String): Unit = {
import scala.collection.JavaConversions._
val fullPrefix = keyPrefix + USER_DATA
userData.foreach { case (k, v) => cfg.addOption(fullPrefix + k.toString, v.toString)}
}
/**
* Copy UserData entries taken from an IteratorSetting/Options back into
* a SimpleFeatureType
*
* This works around the fact that SimpleFeatureTypes.encodeType ignores the UserData
*
*/
def decodeUserData(sft: SimpleFeatureType, options: java.util.Map[String,String], keyPrefix:String): Unit = {
import scala.collection.JavaConversions._
val fullPrefix = keyPrefix + USER_DATA
val ud = options.collect { case (k, v) if k.startsWith(fullPrefix) => k.stripPrefix(fullPrefix) -> v }
sft.getUserData.putAll(ud)
}
}
/**
* We need a concrete class to mix the traits into. This way they can share a common 'init' method
* that will be called for each trait. See http://stackoverflow.com/a/1836619
*/
class HasIteratorExtensions extends IteratorExtensions {
override def init(featureType: SimpleFeatureType, options: OptionMap) = {}
}
/**
* Provides a feature type based on the iterator config
*/
trait HasFeatureType {
var featureType: SimpleFeatureType = null
// feature type config
def initFeatureType(options: OptionMap) = {
val sftName = Option(options.get(GEOMESA_ITERATORS_SFT_NAME)).getOrElse(this.getClass.getSimpleName)
featureType = SimpleFeatureTypes.createType(sftName, options.get(GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE))
decodeUserData(featureType, options, GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE)
}
}
/**
* Provides an index value decoder
*/
trait HasIndexValueDecoder extends IteratorExtensions {
var indexSft: SimpleFeatureType = null
var indexEncoder: SimpleFeatureSerializer = null
// index value encoder/decoder
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
indexSft = SimpleFeatureTypes.createType(featureType.getTypeName,
options.get(GEOMESA_ITERATORS_SFT_INDEX_VALUE))
indexEncoder = new IndexValueEncoderImpl(featureType)
}
}
/**
* Provides a feature encoder and decoder
*/
trait HasFeatureDecoder extends IteratorExtensions {
var featureDecoder: SimpleFeatureSerializer = null
var featureEncoder: SimpleFeatureSerializer = null
val defaultEncoding = SerializationType.KRYO
// feature encoder/decoder
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
// this encoder is for the source sft
val encoding = Option(options.get(FEATURE_ENCODING)).map(SerializationType.withName).getOrElse(defaultEncoding)
featureDecoder = SimpleFeatureDeserializers(featureType, encoding)
featureEncoder = SimpleFeatureSerializers(featureType, encoding)
}
}
/**
* Provides a spatio-temporal filter (date and geometry only) if the iterator config specifies one
*/
trait HasSpatioTemporalFilter extends IteratorExtensions {
import IteratorExtensions.ST_FILTER_PROPERTY_NAME
var stFilter: Filter = null
// spatio-temporal filter config
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
if (options.containsKey(ST_FILTER_PROPERTY_NAME)) {
val filter = FastFilterFactory.toFilter(featureType, options.get(ST_FILTER_PROPERTY_NAME))
if (filter != Filter.INCLUDE) {
stFilter = filter
}
}
}
}
/**
* Provides an arbitrary filter if the iterator config specifies one
*/
trait HasFilter extends IteratorExtensions {
var filter: Filter = null
// other filter config
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
if (options.containsKey(GEOMESA_ITERATORS_ECQL_FILTER)) {
val ecql = FastFilterFactory.toFilter(featureType, options.get(GEOMESA_ITERATORS_ECQL_FILTER))
if (ecql != Filter.INCLUDE) {
filter = ecql
}
}
}
}
/**
* Provides a feature type transformation if the iterator config specifies one
*/
trait HasTransforms extends IteratorExtensions {
type TransformFunction = (SimpleFeature) => Array[Byte]
var transform: TransformFunction = null
// feature type transforms
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
if (options.containsKey(GEOMESA_ITERATORS_TRANSFORM_SCHEMA) &&
options.containsKey(GEOMESA_ITERATORS_TRANSFORM)) {
val transformSchema = options.get(GEOMESA_ITERATORS_TRANSFORM_SCHEMA)
val targetFeatureType = SimpleFeatureTypes.createType(this.getClass.getCanonicalName, transformSchema)
decodeUserData(targetFeatureType, options, GEOMESA_ITERATORS_TRANSFORM_SCHEMA)
val transformString = options.get(GEOMESA_ITERATORS_TRANSFORM)
val transformEncoding = Option(options.get(FEATURE_ENCODING)).map(SerializationType.withName)
.getOrElse(SerializationType.KRYO)
transform = TransformCreator.createTransform(targetFeatureType, transformEncoding, transformString)
}
}
}
/**
* Provides deduplication if the iterator config specifies it
*/
trait HasInMemoryDeduplication extends IteratorExtensions {
import IteratorExtensions.DEFAULT_CACHE_SIZE_NAME
type CheckUniqueId = (String) => Boolean
private var deduplicate: Boolean = false
// each thread maintains its own (imperfect!) list of the unique identifiers it has seen
private var maxInMemoryIdCacheEntries = 10000
private var inMemoryIdCache: java.util.HashSet[String] = null
/**
* Returns a local estimate as to whether the current identifier
* is likely to be a duplicate.
*
* Because we set a limit on how many unique IDs will be preserved in
* the local cache, a TRUE response is always accurate, but a FALSE
* response may not be accurate. (That is, this cache allows for false-
* negatives, but no false-positives.) We accept this, because there is
* a final, client-side filter that will eliminate all duplicate IDs
* definitively. The purpose of the local cache is to reduce traffic
* through the remainder of the iterator/aggregator pipeline as quickly as
* possible.
*
* @return False if this identifier is in the local cache; True otherwise
*/
var checkUniqueId: CheckUniqueId = null
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
// check for dedupe - we don't need to dedupe for density queries
if (!options.containsKey(GEOMESA_ITERATORS_IS_DENSITY_TYPE)) {
deduplicate = featureType.nonPoints
if (deduplicate) {
if (options.containsKey(DEFAULT_CACHE_SIZE_NAME)) {
maxInMemoryIdCacheEntries = options.get(DEFAULT_CACHE_SIZE_NAME).toInt
}
inMemoryIdCache = new java.util.HashSet[String](maxInMemoryIdCacheEntries)
checkUniqueId =
(id: String) => if (inMemoryIdCache.size < maxInMemoryIdCacheEntries) {
inMemoryIdCache.add(id)
} else {
!inMemoryIdCache.contains(id)
}
}
}
}
}
object TransformCreator {
/**
* Create a function to transform a feature from one sft to another...this will
* result in a new feature instance being created and encoded.
*
* The function returned may NOT be ThreadSafe to due the fact it contains a
* SimpleFeatureEncoder instance which is not thread safe to optimize performance
*/
def createTransform(targetFeatureType: SimpleFeatureType,
featureEncoding: SerializationType,
transformString: String): (SimpleFeature => Array[Byte]) = {
import scala.collection.JavaConversions._
val encoder = SimpleFeatureSerializers(targetFeatureType, featureEncoding)
val defs = TransformProcess.toDefinition(transformString)
val newSf = new ScalaSimpleFeature(targetFeatureType, "reusable")
(feature: SimpleFeature) => {
newSf.setId(feature.getIdentifier.getID)
defs.foreach { t => newSf.setAttribute(t.name, t.expression.evaluate(feature)) }
encoder.serialize(newSf)
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-raster/src/main/scala/org/locationtech/geomesa/raster/iterators/IteratorExtensions.scala | Scala | apache-2.0 | 10,898 |
package com.cloudera.hue.livy.repl
import java.util.concurrent.TimeUnit
import com.cloudera.hue.livy.repl.scala.SparkSession
import org.json4s.JsonAST.JValue
import org.json4s.{DefaultFormats, Extraction}
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
import _root_.scala.concurrent.Await
import _root_.scala.concurrent.duration.Duration
class SparkSessionSpec extends FunSpec with Matchers with BeforeAndAfter {
implicit val formats = DefaultFormats
var session: Session = null
before {
session = SparkSession.create()
}
after {
session.close()
}
describe("A spark session") {
it("should start in the starting or idle state") {
session.state should (equal (Session.Starting()) or equal (Session.Idle()))
}
it("should eventually become the idle state") {
session.waitForStateChange(Session.Starting(), Duration(10, TimeUnit.SECONDS))
session.state should equal (Session.Idle())
}
it("should execute `1 + 2` == 3") {
val result = Await.result(session.execute("1 + 2"), Duration.Inf)
val expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"text/plain" -> "res0: Int = 3"
)
))
result should equal (expectedResult)
}
it("should execute `x = 1`, then `y = 2`, then `x + y`") {
var result = Await.result(session.execute("val x = 1"), Duration.Inf)
var expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"text/plain" -> "x: Int = 1"
)
))
result should equal (expectedResult)
result = Await.result(session.execute("val y = 2"), Duration.Inf)
expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 1,
"data" -> Map(
"text/plain" -> "y: Int = 2"
)
))
result should equal (expectedResult)
result = Await.result(session.execute("x + y"), Duration.Inf)
expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 2,
"data" -> Map(
"text/plain" -> "res0: Int = 3"
)
))
result should equal (expectedResult)
}
it("should capture stdout") {
val result = Await.result(session.execute("""println("Hello World")"""), Duration.Inf)
val expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"text/plain" -> "Hello World"
)
))
result should equal (expectedResult)
}
it("should report an error if accessing an unknown variable") {
val result = Await.result(session.execute("""x"""), Duration.Inf)
val expectedResult = Extraction.decompose(Map(
"status" -> "error",
"execution_count" -> 0,
"ename" -> "Error",
"evalue" ->
"""<console>:8: error: not found: value x
| x
| ^""".stripMargin
))
result should equal (expectedResult)
}
it("should report an error if exception is thrown") {
val result = Await.result(session.execute("""throw new Exception()"""), Duration.Inf)
val resultMap = result.extract[Map[String, JValue]]
// Manually extract the values since the line numbers in the exception could change.
resultMap("status").extract[String] should equal ("error")
resultMap("execution_count").extract[Int] should equal (0)
resultMap("ename").extract[String] should equal ("Error")
resultMap("evalue").extract[String] should include ("java.lang.Exception")
resultMap.get("traceback") should equal (None)
}
it("should access the spark context") {
val result = Await.result(session.execute("""sc"""), Duration.Inf)
val resultMap = result.extract[Map[String, JValue]]
// Manually extract the values since the line numbers in the exception could change.
resultMap("status").extract[String] should equal ("ok")
resultMap("execution_count").extract[Int] should equal (0)
val data = resultMap("data").extract[Map[String, JValue]]
data("text/plain").extract[String] should include ("res0: org.apache.spark.SparkContext = org.apache.spark.SparkContext")
}
it("should execute spark commands") {
val result = Await.result(session.execute(
"""
|sc.parallelize(0 to 1).map{i => i+1}.collect
|""".stripMargin), Duration.Inf)
val expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"text/plain" -> "res0: Array[Int] = Array(1, 2)"
)
))
result should equal (expectedResult)
}
}
}
| nvoron23/hue | apps/spark/java/livy-repl/src/test/scala/com/cloudera/hue/livy/repl/SparkSessionSpec.scala | Scala | apache-2.0 | 4,865 |
package io.udash
package rest
import com.avsystem.commons.meta.MacroInstances
/**
* Base class for companions of REST API traits used only for REST clients to external services.
* Injects `Future` as the wrapper for asynchronous responses and `GenCodec`/`GenKeyCodec` based serialization
* for parameters and responses.
*/
abstract class DefaultRestClientApiCompanion[Real](implicit
inst: MacroInstances[DefaultRestImplicits, ClientInstances[Real]]
) extends RestClientApiCompanion[DefaultRestImplicits, Real](DefaultRestImplicits)
/**
* Base class for companions of REST API traits used only for REST servers exposed to external world.
* Injects `Future` as the wrapper for asynchronous responses and `GenCodec`/`GenKeyCodec` based serialization
* for parameters and responses.
* Also, forces derivation of [[io.udash.rest.openapi.OpenApiMetadata OpenApiMetadata]].
*/
abstract class DefaultRestServerApiCompanion[Real](implicit
inst: MacroInstances[DefaultRestImplicits, OpenApiServerInstances[Real]]
) extends RestServerOpenApiCompanion[DefaultRestImplicits, Real](DefaultRestImplicits)
/**
* Base class for companions of REST API traits used for both REST clients and servers.
* Injects `Future` as the wrapper for asynchronous responses and `GenCodec`/`GenKeyCodec` based serialization
* for parameters and responses.
* Also, forces derivation of [[io.udash.rest.openapi.OpenApiMetadata OpenApiMetadata]].
*/
abstract class DefaultRestApiCompanion[Real](implicit
inst: MacroInstances[DefaultRestImplicits, OpenApiFullInstances[Real]]
) extends RestOpenApiCompanion[DefaultRestImplicits, Real](DefaultRestImplicits)
abstract class DefaultRestServerApiImplCompanion[Real](implicit
inst: MacroInstances[DefaultRestImplicits, OpenApiServerImplInstances[Real]]
) extends RestServerOpenApiImplCompanion[DefaultRestImplicits, Real](DefaultRestImplicits)
| UdashFramework/udash-core | rest/src/main/scala/io/udash/rest/DefaultRestApiCompanion.scala | Scala | apache-2.0 | 1,888 |
/*
The MIT License (MIT)
Copyright (c) 2015-2016 Raymond Dodge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package com.rayrobdod.divReduce
import java.nio.file.{Paths, Files}
import java.nio.file.StandardOpenOption.CREATE
import java.nio.charset.StandardCharsets.UTF_8
object Runner {
/**
* @param args[0] the input divreduce file to process
* @param args[1] the location to write the output html file to
*/
def main(args:Array[String]):Unit = {
if (args.length < 2) {
System.out.println("java -jar div-reduce.jar <inputfile> <outputfile>")
} else {
val inputFile = Paths.get(args(0));
val outputFile = Paths.get(args(1));
val input = Files.readAllLines(inputFile, UTF_8)
val output = toHtml(parse(input))
Files.createDirectories(outputFile.getParent)
Files.write(outputFile, java.util.Arrays.asList[String](output), UTF_8, CREATE)
}
}
}
| rayrobdod/div-reduce | console/src/main/scala/Runner.scala | Scala | mit | 1,885 |
package scala.test.junit
import org.junit.Test
class FirstFilterTest {
@Test def method1 { println(this.getClass.getName + "#method1") }
@Test def method2 { println(this.getClass.getName + "#method2") }
@Test def method3 { println(this.getClass.getName + "#method3") }
}
class SecondFilterTest {
@Test def method1 { println(this.getClass.getName + "#method1") }
@Test def method2 { println(this.getClass.getName + "#method2") }
@Test def method3 { println(this.getClass.getName + "#method3") }
}
class ThirdFilterTest {
@Test def method1 { println(this.getClass.getName + "#method1") }
@Test def method2 { println(this.getClass.getName + "#method2") }
@Test def method3 { println(this.getClass.getName + "#method3") }
}
| sdtwigg/rules_scala | test/src/main/scala/scala/test/junit/FilterTest.scala | Scala | apache-2.0 | 742 |
// Copyright 2014-2018 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import sbt._
import Keys._
import com.twitter.scrooge.ScroogeSBT._
import sbtassembly.AssemblyPlugin.autoImport.assembly
import sbtunidoc.Plugin.{ScalaUnidoc, UnidocKeys}
import UnidocKeys.{unidoc, unidocProjectFilter}
import au.com.cba.omnia.uniform.core.standard.StandardProjectPlugin._
import au.com.cba.omnia.uniform.core.version.UniqueVersionPlugin._
import au.com.cba.omnia.uniform.dependency.UniformDependencyPlugin._
import au.com.cba.omnia.uniform.thrift.UniformThriftPlugin._
import au.com.cba.omnia.uniform.assembly.UniformAssemblyPlugin._
import au.com.cba.omnia.humbug.HumbugSBT._
object build extends Build {
type Sett = Def.Setting[_]
val thermometerVersion = "1.6.11-20190730062717-f203e44"
val ebenezerVersion = "0.24.9-20190730094137-20cd049"
val beeswaxVersion = "0.2.11-20190730083634-78faba5"
val omnitoolVersion = "1.15.9-20190730073144-b52646c"
val permafrostVersion = "0.15.9-20190730083617-8bb13bc"
val edgeVersion = "3.8.9-20190730094133-4be8b98"
val humbugVersion = "0.8.8-20190730062733-7025390"
val parlourVersion = "1.14.2-20190730073214-152deaa"
lazy val standardSettings: Seq[Sett] =
Defaults.coreDefaultSettings ++
uniformDependencySettings ++
strictDependencySettings ++
uniform.docSettings("https://github.com/CommBank/maestro") ++
Seq(
logLevel in assembly := Level.Error,
updateOptions := updateOptions.value.withCachedResolution(true),
// Run tests sequentially across the subprojects.
concurrentRestrictions in Global := Seq(
Tags.limit(Tags.Test, 1)
)
)
lazy val all = Project(
id = "all"
, base = file(".")
, settings =
standardSettings
++ uniform.project("maestro-all", "au.com.cba.omnia.maestro")
++ uniform.ghsettings
++ Seq[Sett](
publishArtifact := false
, addCompilerPlugin(depend.macroParadise())
, unidocProjectFilter in (ScalaUnidoc, unidoc) := inAnyProject -- inProjects(example, schema, benchmark)
)
, aggregate = Seq(core, macros, scalding, api, test, schema)
)
lazy val api = Project(
id = "api"
, base = file("maestro-api")
, settings =
standardSettings
++ uniform.project("maestro", "au.com.cba.omnia.maestro.api")
++ Seq[Sett](
libraryDependencies ++= depend.hadoopClasspath ++ depend.hadoop() ++ depend.testing()
)
).dependsOn(core)
.dependsOn(macros)
.dependsOn(scalding)
lazy val core = Project(
id = "core"
, base = file("maestro-core")
, settings =
standardSettings
++ uniformThriftSettings
++ uniform.project("maestro-core", "au.com.cba.omnia.maestro.core")
++ humbugSettings
++ Seq[Sett](
scroogeThriftSourceFolder in Test := sourceDirectory.value / "test" / "thrift" / "scrooge",
humbugThriftSourceFolder in Test := sourceDirectory.value / "test" / "thrift" / "humbug",
libraryDependencies ++=
depend.scalaz()
++ depend.hadoopClasspath
++ depend.hadoop()
++ depend.shapeless() ++ depend.testing()
++ depend.omnia("beeswax", beeswaxVersion)
++ depend.omnia("ebenezer", ebenezerVersion)
++ depend.omnia("ebenezer-test", ebenezerVersion, "test")
++ depend.omnia("permafrost", permafrostVersion)
++ depend.omnia("edge", edgeVersion)
++ depend.omnia("humbug-core", humbugVersion)
++ depend.omnia("omnitool-time", omnitoolVersion)
++ depend.omnia("omnitool-file", omnitoolVersion)
++ depend.omnia("parlour", parlourVersion)
++ depend.scalikejdbc()
++ Seq(
noHadoop("commons-validator" % "commons-validator" % "1.4.0"),
"com.opencsv" % "opencsv" % "3.3"
exclude ("org.apache.commons", "commons-lang3") // conflicts with hive
),
parallelExecution in Test := false
)
)
lazy val macros = Project(
id = "macros"
, base = file("maestro-macros")
, settings =
standardSettings
++ uniform.project("maestro-macros", "au.com.cba.omnia.maestro.macros")
++ Seq[Sett](
libraryDependencies ++= Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion.value
) ++ depend.testing()
, addCompilerPlugin(depend.macroParadise())
)
).dependsOn(core)
.dependsOn(test % "test")
lazy val scalding = Project(
id = "scalding"
, base = file("maestro-scalding")
, settings =
standardSettings
++ uniformThriftSettings
++ uniform.project("maestro-scalding", "au.com.cba.omnia.maestro.scalding")
++ Seq[Sett](
libraryDependencies ++=
depend.scalaz()
++ depend.scalding()
++ depend.hadoopClasspath
++ depend.hadoop()
++ depend.parquet()
++ depend.testing()
++ depend.omnia("omnitool-core", omnitoolVersion, "test").map(_ classifier "tests")
++ depend.omnia("thermometer-hive", thermometerVersion, "test"),
parallelExecution in Test := false
)
).dependsOn(core % "compile->compile;test->test")
lazy val schema = Project(
id = "schema"
, base = file("maestro-schema")
, settings =
standardSettings
++ uniform.project("maestro-schema", "au.com.cba.omnia.maestro.schema")
++ uniformAssemblySettings
++ Seq[Sett](
libraryDependencies ++= Seq(
"com.quantifind" %% "sumac" % "0.3.0"
, "org.scala-lang" % "scala-reflect" % scalaVersion.value
, "org.apache.commons" % "commons-lang3" % "3.1"
) ++ depend.scalding() ++ depend.hadoopClasspath ++ depend.hadoop()
)
)
lazy val example = Project(
id = "example"
, base = file("maestro-example")
, settings =
standardSettings
++ uniform.project("maestro-example", "au.com.cba.omnia.maestro.example")
++ uniformAssemblySettings
++ uniformThriftSettings
++ Seq[Sett](
libraryDependencies ++= depend.hadoopClasspath ++ depend.hadoop() ++ depend.parquet() ++
depend.scalikejdbc().map(_.copy(configurations = Some("test")))
, parallelExecution in Test := false
, sources in doc in Compile := List()
, addCompilerPlugin(depend.macroParadise())
)
).dependsOn(core)
.dependsOn(macros)
.dependsOn(api)
.dependsOn(test % "test")
lazy val benchmark = Project(
id = "benchmark"
, base = file("maestro-benchmark")
, settings =
standardSettings
++ uniform.project("maestro-benchmark", "au.com.cba.omnia.maestro.benchmark")
++ humbugSettings
++ Seq[Sett](
libraryDependencies ++= Seq(
"com.storm-enroute" %% "scalameter" % "0.6"
exclude("org.scala-lang.modules", "scala-parser-combinators_2.11")
exclude("org.scala-lang.modules", "scala-xml_2.11")
) ++ depend.testing()
, testFrameworks += new TestFramework("org.scalameter.ScalaMeterFramework")
, parallelExecution in Test := false
, logBuffered := false
)
).dependsOn(core)
.dependsOn(macros)
.dependsOn(api)
lazy val test = Project(
id = "test"
, base = file("maestro-test")
, settings =
standardSettings
++ uniform.project("maestro-test", "au.com.cba.omnia.maestro.test")
++ uniformThriftSettings
++ humbugSettings
++ Seq[Sett](
scroogeThriftSourceFolder in Compile := sourceDirectory.value / "main" / "thrift" / "scrooge"
, humbugThriftSourceFolder in Compile := sourceDirectory.value / "main" / "thrift" / "humbug"
, libraryDependencies ++=
depend.omnia("ebenezer-test", ebenezerVersion)
++ depend.hadoopClasspath ++ depend.hadoop()
++ depend.testing(configuration = "test")
)
).dependsOn(core, scalding)
}
| CommBank/maestro | project/build.scala | Scala | apache-2.0 | 8,472 |
package lila.team
import reactivemongo.api._
import reactivemongo.bson._
import lila.db.dsl._
object MemberRepo {
// dirty
private val coll = Env.current.colls.member
import BSONHandlers._
type ID = String
def userIdsByTeam(teamId: ID): Fu[Set[ID]] =
coll.distinct[String, Set]("user", $doc("team" -> teamId).some)
def teamIdsByUser(userId: ID): Fu[Set[ID]] =
coll.distinct[String, Set]("team", $doc("user" -> userId).some)
def removeByteam(teamId: ID): Funit =
coll.remove(teamQuery(teamId)).void
def removeByUser(userId: ID): Funit =
coll.remove(userQuery(userId)).void
def exists(teamId: ID, userId: ID): Fu[Boolean] =
coll.exists(selectId(teamId, userId))
def add(teamId: String, userId: String): Funit =
coll.insert(Member.make(team = teamId, user = userId)).void
def remove(teamId: String, userId: String): Funit =
coll.remove(selectId(teamId, userId)).void
def countByTeam(teamId: String): Fu[Int] =
coll.countSel(teamQuery(teamId))
def selectId(teamId: ID, userId: ID) = $id(Member.makeId(teamId, userId))
def teamQuery(teamId: ID) = $doc("team" -> teamId)
def userQuery(userId: ID) = $doc("user" -> userId)
}
| clarkerubber/lila | modules/team/src/main/MemberRepo.scala | Scala | agpl-3.0 | 1,195 |
package com.github.pimterry.loglevel
import scala.scalajs.js
/**
* @author steven
*
*/
@js.native
trait Logger extends js.Object {
def trace(msg: Any, addl: Any*): Unit = js.native
def debug(msg: Any, addl: Any*): Unit = js.native
def info(msg: Any, addl: Any*): Unit = js.native
def warn(msg: Any, addl: Any*): Unit = js.native
def error(msg: Any, addl: Any*): Unit = js.native
def setLevel(level: Level): Unit = js.native
def setLevel(level: String): Unit = js.native
def enableAll(): Unit = js.native
def disableAll(): Unit = js.native
}
| CodeMettle/scalajs-loglevel | src/main/scala/com/github/pimterry/loglevel/Logger.scala | Scala | mit | 566 |
package com.bostontechnologies.amqp
import org.specs2.mutable.Specification
import akka.actor.{Props, ActorRef, ActorSystem}
import scalaz._
import Scalaz._
import shapeless.TypeOperators._
import com.bostontechnologies.amqp.ConnectionModel.{Disconnect, ConnectionConfig, ConnectionRef}
import com.bostontechnologies.amqp.ChannelModel.{QueueBinding, Queue, fanout, Exchange}
import com.bostontechnologies.amqp.ConsumerModel.AckableMessage
import com.rabbitmq.client.{ConnectionFactory, Address}
import Connection._
import util.Random
import com.bostontechnologies.amqp.ConnectionModel.ConnectionConfig
import com.bostontechnologies.amqp.ChannelModel.Exchange
import com.bostontechnologies.amqp.ChannelModel.QueueBinding
import com.bostontechnologies.amqp.ChannelModel.Queue
import java.util.concurrent.{CountDownLatch, TimeUnit}
import com.bostontechnologies.amqp.SenderModel.SendMessage
/**
* This test follows the pattern of:
*
* 1) Create Connection Object
* 2) Create Producer/Consumer given connection object
* 3) Actually "Connect" using Connection Object (i.e. make TCP/IP connection etc.)
*/
object EndToEnd extends Specification{
val addressList: List[Address] = List(new Address("localhost"))
val exchangeName = "testExchange" + Random.nextInt(10000)
val routingKey = "routingKey" + Random.nextInt(10000)
val exchange = Exchange(exchangeName, fanout, durable = true, autoDelete = false)
val queue = Queue("tesla.quotes.queue", exchange.durable, exclusive = false, autoDelete = exchange.autoDelete)
"Amqp Sender And Receiver" should {
"work" in {
implicit val amqpActorSystem = ActorSystem("AmqpActorSystem")
val rabbitConnectionFactory = new com.rabbitmq.client.ConnectionFactory()
rabbitConnectionFactory.setRequestedHeartbeat(2)
val countdownLatch = new CountDownLatch(1)
//consumer
(for {
consumerConnection <- newConnection(ConnectionConfig(rabbitConnectionFactory, addressList), "TigerAmqpEntryPointConnection")
_ <- Connection.autoAckingConsumerOf({(_,_) => countdownLatch.countDown() }, Set(queue), Set(exchange), Set(QueueBinding(queue.name, exchangeName, routingKey)), "Consumer")(consumerConnection)
_ <- Connection.connect()(consumerConnection)
} yield consumerConnection).fold(
throwable => {
throwable.printStackTrace()
false
}, consumerConnection => {
(for {
producerConnection <- newConnection(ConnectionConfig(new ConnectionFactory(), addressList), "s")
producerSender <- senderOf(exchange.some, "s")(producerConnection)
_ <- connect()(producerConnection)
} yield (producerConnection, producerSender)).fold(
throwable => {
throwable.printStackTrace()
false
},{
case (producerConnection, producerSender) => {
producerSender ! SendMessage(routingKey, "hi".getBytes(), exchange.some)
val result = countdownLatch.await(5, TimeUnit.SECONDS)
producerConnection ! Disconnect
consumerConnection ! Disconnect
result
}
}
)
})
}
}
}
| Forexware/scala-amqp | src/test/scala/com/bostontechnologies/amqp/EndToEnd.scala | Scala | apache-2.0 | 3,208 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.async.stubs
import java.util.concurrent.Executors
import scala.util.Random
import com.treode.async.Scheduler
import org.scalatest.{Informing, Suite}
trait AsyncChecks {
this: Suite with Informing =>
/** The number of times to run a psuedo-random test. Uses `NSEEDS` from the environment, or
* defaults to 1.
*/
val nseeds: Int = {
val envseeds = System.getenv("NSEEDS")
if (envseeds == null) 1 else Integer.parseInt(envseeds)
}
/** Run the test with `seed`; add `seed` to the test info on failure. */
def forSeed (seed: Long) (test: Long => Any) {
try {
test (seed)
} catch {
case t: Throwable =>
info (s"Test failed; seed = ${seed}L")
throw t
}}
/** Run a test many times, each time with a different seed. Use `NSEEDS` from the environment to
* determine how many times, or defaults to 1.
*/
def forAllSeeds (test: Long => Any) {
for (_ <- 0 until nseeds)
forSeed (Random.nextLong) (test)
}
/** Run a psuedo-random test many times, each time with a PRNG seeded differently. Use `NSEEDS`
* from the environment to determine how many times, or defaults to 1.
*/
def forAllRandoms (test: Random => Any): Unit =
forAllSeeds (seed => test (new Random (seed)))
}
| Treode/store | core/stub/com/treode/async/stubs/AsyncChecks.scala | Scala | apache-2.0 | 1,889 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.finagle
import com.outworkers.phantom.PhantomSuite
class CreateQueryFinagleTests extends PhantomSuite with TwitterFutures {
it should "execute a simple query with secondary indexes with Twitter futures" in {
whenReady(database.secondaryIndexTable.create.ifNotExists().future()) { res =>
info("The creation query of secondary indexes should execute successfully")
res.forall(_.wasApplied() == true) shouldEqual true
}
}
}
| outworkers/phantom | phantom-finagle/src/test/scala/com/outworkers/phantom/finagle/CreateQueryFinagleTests.scala | Scala | apache-2.0 | 1,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.backend.api.rdds
import ai.h2o.sparkling.utils.SparkSessionUtils
import water.exceptions.H2ONotFoundArgumentException
trait RDDCommons {
def validateRDDId(rddId: Int): Unit = {
SparkSessionUtils.active.sparkContext.getPersistentRDDs
.getOrElse(rddId, throw new H2ONotFoundArgumentException(s"RDD with ID '$rddId' does not exist!"))
}
}
| h2oai/sparkling-water | core/src/main/scala/ai/h2o/sparkling/backend/api/rdds/RDDCommons.scala | Scala | apache-2.0 | 1,178 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2014 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.id
import com.google.common.io.Files
import java.io.File
import org.bouncycastle.openpgp.{ PGPPublicKey, PGPPublicKeyRing, PGPSecretKey, PGPSecretKeyRing }
import org.digimead.digi.lib.api.XDependencyInjection
import org.digimead.tabuddy.desktop.core.support.App
import scala.language.implicitConversions
/**
* TABuddy identification information.
*/
class ID extends ID.Interface with Someone with Universe with Guest {
/** Someone's secret key ring. */
val thisPlainSecretKeyRing: String = {
if (thisPlainPublicKeyRing == guestPlainPublicKeyRing)
// Unregistered version. Use guest key as Someone's
guestPlainSecretKeyRing
else {
if (!ID.secretKeyRingLocation.exists())
throw new RuntimeException("Unable to find secret key at " + ID.secretKeyRingLocation.getAbsolutePath())
Files.toString(ID.secretKeyRingLocation, io.Codec.UTF8.charSet)
}
}
}
/**
* Identificator for the fuzzy group of users.
*/
object ID {
implicit def ID2implementation(i: ID.type): ID = i.inner
/** Get ID implementation. */
def inner = DI.implementation
/** Get secret key location. */
def secretKeyRingLocation = DI.secretKeyRingLocation
/** ID interface. */
trait Interface {
/** Universe public key. */
def thatPublicKey: PGPPublicKey
/** Encryption public key of fuzzy group.*/
def thisPublicEncryptionKey: PGPPublicKey
/** Public key ring of fuzzy group. */
def thisPublicKeyRing: PGPPublicKeyRing
/** Signing public key of fuzzy group.*/
def thisPublicSigningKey: PGPPublicKey
/** Encryption secret key of fuzzy group.*/
def thisSecretEncryptionKey: PGPSecretKey
/** Secret key ring of fuzzy group. */
def thisSecretKeyRing: PGPSecretKeyRing
/** Signing secret key of fuzzy group.*/
def thisSecretSigningKey: PGPSecretKey
}
/**
* Dependency injection routines
*/
private object DI extends XDependencyInjection.PersistentInjectable {
/** ID implementation. */
lazy val implementation = injectOptional[ID] getOrElse new ID
/** Someone's secret key ring location. */
lazy val secretKeyRingLocation = injectOptional[File]("My.Key.Location") getOrElse new File(App.data, "tabuddy.key")
}
}
| digimead/digi-TABuddy-desktop | part-id/src/main/scala/org/digimead/tabuddy/desktop/id/ID.scala | Scala | agpl-3.0 | 4,502 |
package com.twitter.util
import com.twitter.conversions.DurationOps._
import scala.collection.mutable
import org.scalatest.funsuite.AnyFunSuite
class SlowProbeProxyTimerTest extends AnyFunSuite {
private type Task = () => Unit
private val NullTask = new TimerTask { def cancel(): Unit = () }
private val maxRuntime = 20.milliseconds
private class TestSlowProbeProxyTimer extends SlowProbeProxyTimer(maxRuntime) {
val scheduledTasks: mutable.Queue[Task] = new mutable.Queue[Task]()
var slowTaskDuration: Option[Duration] = None
var slowTaskExecuting: Option[Duration] = None
protected def slowTaskCompleted(elapsed: Duration): Unit = { slowTaskDuration = Some(elapsed) }
protected def slowTaskExecuting(elapsed: Duration): Unit = { slowTaskExecuting = Some(elapsed) }
protected val self: Timer = new Timer {
protected def scheduleOnce(when: Time)(f: => Unit): TimerTask = {
scheduledTasks.enqueue(() => f)
NullTask
}
protected def schedulePeriodically(when: Time, period: Duration)(f: => Unit): TimerTask =
schedule(when)(f)
def stop(): Unit = ()
}
}
test("tasks that don't exceed the deadline are not counted and the slow-task hook is not fired") {
val meteredTimer = new TestSlowProbeProxyTimer
val now = Time.now
Time.withTimeFunction(now) { control =>
meteredTimer.schedule(Time.now) {
control.advance(maxRuntime)
}
assert(meteredTimer.slowTaskDuration.isEmpty)
assert(meteredTimer.slowTaskExecuting.isEmpty)
val task = meteredTimer.scheduledTasks.dequeue()
task() // execute the task
assert(meteredTimer.slowTaskDuration.isEmpty)
assert(meteredTimer.slowTaskExecuting.isEmpty) // no work was scheduled
}
}
test("slow tasks are counted even if other work is not scheduled") {
val meteredTimer = new TestSlowProbeProxyTimer
val now = Time.now
val taskDuration = maxRuntime + 1.millisecond
Time.withTimeFunction(now) { control =>
meteredTimer.schedule(Time.now) {
control.advance(taskDuration)
}
assert(meteredTimer.slowTaskDuration.isEmpty)
assert(meteredTimer.slowTaskExecuting.isEmpty)
val task = meteredTimer.scheduledTasks.dequeue()
task() // execute the task
assert(meteredTimer.slowTaskDuration == Some(taskDuration))
assert(meteredTimer.slowTaskExecuting.isEmpty) // no work was scheduled
}
}
test("scheduling work during a slow task fires the slow-tast hook") {
val meteredTimer = new TestSlowProbeProxyTimer
val now = Time.now
val taskDuration = maxRuntime + 1.millisecond
Time.withTimeFunction(now) { control =>
meteredTimer.schedule(Time.now) {
// A task that takes 21 milliseconds to schedule more work.
control.advance(taskDuration)
meteredTimer.schedule(Time.now) { () /* Boring task :/ */ }
}
assert(meteredTimer.slowTaskDuration.isEmpty)
assert(meteredTimer.slowTaskExecuting.isEmpty)
val task = meteredTimer.scheduledTasks.dequeue()
task() // execute the task
assert(meteredTimer.slowTaskDuration == Some(taskDuration))
assert(meteredTimer.slowTaskExecuting == Some(taskDuration))
}
}
}
| twitter/util | util-core/src/test/scala/com/twitter/util/SlowProbeProxyTimerTest.scala | Scala | apache-2.0 | 3,270 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.actions
import java.lang.Iterable
import java.util.Map.Entry
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter
import com.linkedin.data.Data
import com.linkedin.data.DataList
import com.linkedin.data.DataMap
import com.linkedin.data.codec.JacksonDataCodec
import com.linkedin.data.codec.JacksonDataCodec.JsonTraverseCallback
import org.coursera.pegasus.TypedDefinitionDataCoercer
import org.coursera.naptime.AllFields
import org.coursera.naptime.DelegateFields
import org.coursera.naptime.RestError
import org.coursera.naptime.FacetField
import org.coursera.naptime.Fields
import org.coursera.naptime.Ok
import org.coursera.naptime.QueryIncludes
import org.coursera.naptime.Redirect
import org.coursera.naptime.RequestFields
import org.coursera.naptime.RequestPagination
import org.coursera.naptime.ResourceName
import org.coursera.naptime.RestResponse
import play.api.http.ContentTypes
import play.api.http.HeaderNames
import play.api.http.Status
import play.api.mvc.RequestHeader
import play.api.mvc.Result
import play.api.mvc.Results
import java.io.IOException
import org.coursera.naptime.RequestIncludes
import org.coursera.naptime.actions.util.DataMapUtils
import org.coursera.common.stringkey.StringKey
import org.coursera.naptime.ETag
import org.coursera.naptime.ResponsePagination
import org.coursera.naptime.ari.TopLevelRequest
import org.coursera.naptime.ari.TopLevelResponse
import org.coursera.naptime.ari.{Response => AriResponse}
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.model.Keyed
import scala.collection.JavaConversions._
import scala.concurrent.Future
object RestActionCategoryEngine2 extends RestActionCategoryEngine2Impls
trait RestActionCategoryEngine2[Category, Key, Resource, Response]
extends RestActionCategoryEngine[Category, Key, Resource, Response] {
/**
* Engine2's support this additional method for response construction to support the
* local fetcher for ARI.
*/
def mkResponse(request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Response],
resourceName: ResourceName,
topLevelRequest: TopLevelRequest): Future[AriResponse]
}
/**
* 2nd generation engines with Pegasus DataMaps at the core. To use, import them at the top of your
* file.
*/
trait RestActionCategoryEngine2Impls {
private[this] def mkOkResult[T](r: RestResponse[T])(fn: Ok[T] => Result) = {
r match {
case ok: Ok[T] => fn(ok)
case error: RestError => error.error.result
case redirect: Redirect => redirect.result
}
}
private[this] def mkOkResponse[T](r: RestResponse[T])
(fn: Ok[T] => Future[AriResponse]): Future[AriResponse] = {
r match {
case ok: Ok[T] => fn(ok)
case error: RestError => Future.failed(error.error)
case redirect: Redirect =>
Future.failed(new IllegalArgumentException("ARI Response cannot handle redirect responses"))
}
}
private[this] def buildOkResponse[K, V](
things: scala.collection.Iterable[Keyed[K, V]],
ok: Ok[_],
keyFormat: KeyFormat[K],
serializer: NaptimeSerializer[V],
requestFields: RequestFields,
requestIncludes: RequestIncludes,
fields: Fields[V],
pagination: RequestPagination,
resourceName: ResourceName,
topLevelRequest: TopLevelRequest,
uri: String): Future[AriResponse] = {
val schemaOpt = for {
headElem <- things.headOption
schema <- serializer.schema(headElem.value)
} yield schema
schemaOpt.map { schema =>
val wireConverter = Some(new TypedDefinitionDataCoercer(schema))
val topLevelDataList = new DataList()
val serialized = for (elem <- things) yield {
val dataMap = new DataMap()
serializeItem(dataMap, elem, keyFormat, serializer, wireConverter)
topLevelDataList.add(dataMap.get("id"))
dataMap
}
val resourceMap = serialized.map { dataMap =>
dataMap.get("id") -> dataMap
}.toMap
// TODO: serialized related ones too!
val topLevelResponseMap = Map(topLevelRequest ->
TopLevelResponse(topLevelDataList, ok.pagination.getOrElse(ResponsePagination.empty)))
val responseDataMap = Map(resourceName -> resourceMap)
Future.successful(AriResponse(topLevelResponseMap, responseDataMap))
}.getOrElse {
if (things.isEmpty) {
Future.successful(AriResponse.empty)
} else {
Future.failed(new IllegalArgumentException("Could not compute schema for resource value."))
}
}
}
private[this] object ETagHelpers {
private[this] def constructEtagHeader(etag: ETag): (String, String) = {
HeaderNames.ETAG -> StringKey(etag).key
}
private[naptime] def addProvidedETag[T](ok: Ok[T]): Option[(String, String)] = {
ok.eTag.map { eTag =>
constructEtagHeader(eTag)
}
}
private[naptime] def computeETag(dataMap: DataMap,
pagination: RequestPagination): (String, String) = {
// For now, use Play!'s built-in hashcode implementations. This should be good enough for now.
// Note: upgrading Play! versions (even point releases) could change the output of hashCode.
// Because ETags are just an optimization, we are okay with that for now.
// Note: for pagination, we explicitly call the eTagHashCode that excludes some fields.
val hashCode = Set(dataMap.hashCode(), pagination.eTagHashCode()).hashCode()
constructEtagHeader(ETag(hashCode.toString))
}
}
private[naptime] def mkETagHeader[T](pagination: RequestPagination, ok: Ok[T],
jsRepresentation: DataMap): (String, String) =
ETagHelpers.addProvidedETag(ok).getOrElse(ETagHelpers.computeETag(jsRepresentation, pagination))
private[naptime] def mkETagHeaderOpt[T](pagination: RequestPagination, ok: Ok[T],
jsRepresentation: Option[DataMap]): Option[(String, String)] =
ETagHelpers.addProvidedETag(ok).orElse(
jsRepresentation.map(ETagHelpers.computeETag(_, pagination)))
private[naptime] def serializeItem[K, V](
into: DataMap,
thing: Keyed[K, V],
keyFormat: KeyFormat[K],
serializer: NaptimeSerializer[V],
wireConverter: Option[TypedDefinitionDataCoercer]): scala.collection.Set[String] = {
val key = keyFormat.format.writes(thing.key)
// Make a new dataMap and copy other things into it, because the others are locked.
val valueDataMap = serializer.serialize(thing.value)
val keyMap = NaptimeSerializer.PlayJson.serialize(key)
// Insert all the value entries into the data map.
for (elem <- valueDataMap.entrySet) {
into.put(elem.getKey, DataMapUtils.ensureMutable(elem.getValue))
}
wireConverter.foreach { converter =>
converter.convertUnionToTypedDefinitionInPlace(into)
}
// Insert all the key entries into the data map, overriding any previously set values.
for (elem <- keyMap.entrySet()) {
into.put(elem.getKey, elem.getValue)
}
// Include the id field if it hasn't been included already.
if (!into.containsKey("id")) {
into.put("id", keyFormat.stringKeyFormat.writes(thing.key).key)
}
key.keys
}
/**
* Serializes a collection of Keyed resources into the provided DataList
*
* Note: be sure to pre-insert the dataList into the larger response before calling this function
* in order to avoid expensive and unnecessary cycle checks.
*
* @return The complete collection of fields that should be included in the response (includes the
* key fields)
*/
private[naptime] def serializeCollection[K, V](
dataList: DataList,
things: scala.collection.Iterable[Keyed[K, V]],
keyFormat: KeyFormat[K],
serializer: NaptimeSerializer[V],
requestFields: RequestFields,
fields: Fields[V]): RequestFields = {
// Compute the set of field names provided by the Key type to avoid filtering them out in the
// response serializer. (This is to maintain backwards compatibility with the legacy Rest
// Engines.)
val wireConverter = for {
first <- things.headOption
schema <- serializer.schema(first.value)
} yield new TypedDefinitionDataCoercer(schema)
// TODO: Verify this is a performant way of computing this. (i.e. consider mutability)
var keyFields = Set("id")
for (elem <- things) {
// Make a new dataMap and copy other things into it, because the others are locked.
val dataMap = new DataMap()
dataList.add(dataMap) // Eagerly insert.
keyFields ++= serializeItem(dataMap, elem, keyFormat, serializer, wireConverter)
}
requestFields.mergeWithDefaults(keyFields ++ fields.defaultFields)
}
/**
* Call this after calling [[serializeCollection()]], passing in the returned [[RequestFields]]
*
* @return Pass the returned RequestFields to construct the [[FlattenedFilteringJacksonDataCodec]]
*/
private[this] def serializeRelated[T](
linked: DataMap,
response: Ok[T],
resourceFields: Fields[_],
requestIncludes: RequestIncludes,
requestFields: RequestFields): RequestFields = {
val firstHopModelsToInclude = resourceFields.relations.filter { relationTuple =>
requestIncludes.includeFieldsRelatedResource(relationTuple._1)
}.values
val multiHopModelsToInclude = for {
(resourceName, relation) <- response.related
queryIncludes <- requestIncludes.forResource(resourceName).toIterable
hopRelation <- relation.fields.relations
if queryIncludes.includeFieldsRelatedResource(hopRelation._1)
} yield hopRelation._2
val modelsToInclude = firstHopModelsToInclude.toSet ++ multiHopModelsToInclude
val updatedRelatedFields = for {
relationName <- modelsToInclude
relation <- response.related.get(relationName)
} yield {
val dataList = new DataList()
linked.put(relationName.identifier, dataList)
val relationFields = requestFields.forResource(relationName).getOrElse(RequestFields.empty)
relationName -> relation.toPegasus(relationFields, dataList)
}
DelegateFields(requestFields, updatedRelatedFields.toMap)
}
private[this] def mkDataCollections() = {
val response = new DataMap()
val elements = new DataList()
response.put("elements", elements)
val paging = new DataMap()
response.put("paging", paging)
val linked = new DataMap()
response.put("linked", linked)
(response, elements, paging, linked)
}
case class ProcessedResponse(
response: DataMap,
codec: FlattenedFilteringJacksonDataCodec,
etag: (String, String)) {
def elements: DataMap = response.get("elements").asInstanceOf[DataMap]
def paging: DataMap = response.get("paging").asInstanceOf[DataMap]
def linked: DataMap = response.get("linked").asInstanceOf[DataMap]
def playResponse(code: Int, ifNoneMatchHeader: Option[String]): Result = {
if (ifNoneMatchHeader.contains(etag._2)) {
Results.NotModified.withHeaders(etag)
} else {
Results.Status(code)(codec.mapToBytes(response)).as(ContentTypes.JSON).withHeaders(etag)
}
}
}
private[this] def serializeFacets(dataMap: DataMap, facets: Map[String, FacetField]): Unit = {
for {
(key, value) <- facets
if value.facetEntries.nonEmpty || value.fieldCardinality.isDefined
} {
val facetArray = new DataList()
val facetMap = new DataMap()
facetMap.put("facetEntries", facetArray)
dataMap.put(key, facetMap)
value.fieldCardinality.foreach { cardinality =>
facetMap.put("fieldCardinality", new java.lang.Long(cardinality))
}
for (facetEntry <- value.facetEntries) {
val facetEntryDataMap = new DataMap()
facetArray.add(facetEntryDataMap)
facetEntryDataMap.put("id", facetEntry.id)
facetEntryDataMap.put("count", new java.lang.Long(facetEntry.count))
facetEntry.name.foreach { name =>
facetEntryDataMap.put("name", name)
}
}
}
}
private[this] def addLinks(
response: DataMap,
request: RequestIncludes,
requestFields: RequestFields,
fields: Fields[_],
ok: Ok[_]): Unit = {
if (request.includeFieldsRelatedResource("_links")) {
val links = new DataMap()
response.put("links", links)
val visibleIncludes = ok.related.filterKeys(requestFields.forResource(_).isDefined)
visibleIncludes.foreach { case (name, related) =>
related.fields.makeLinksRelationsMap(
links,
name.identifier,
request.forResource(name).getOrElse(QueryIncludes.empty))
}
fields.makeLinksRelationsMap(links, "elements", request)
}
}
private[this] def buildOkResult[K, V](
things: scala.collection.Iterable[Keyed[K, V]],
ok: Ok[_],
keyFormat: KeyFormat[K],
serializer: NaptimeSerializer[V],
requestFields: RequestFields,
requestIncludes: RequestIncludes,
fields: Fields[V],
pagination: RequestPagination): ProcessedResponse = {
val (response, elements, paging, linked) = mkDataCollections()
val elementsFields = serializeCollection(
elements, things, keyFormat, serializer, requestFields, fields)
ok.pagination.foreach { pagination =>
pagination.next.foreach { next =>
paging.put("next", next)
}
pagination.total.foreach { total =>
paging.put("total", new java.lang.Long(total))
}
pagination.facets.foreach { facets =>
val facetsMap = new DataMap()
paging.put("facets", facetsMap)
serializeFacets(facetsMap, facets)
}
}
val newFields = serializeRelated(linked, ok, fields, requestIncludes, elementsFields)
val codec = new FlattenedFilteringJacksonDataCodec(newFields)
val etag = mkETagHeader(pagination, ok, response)
addLinks(response, requestIncludes, newFields, fields, ok)
ProcessedResponse(response, codec, etag)
}
implicit def getActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine2[GetRestActionCategory, Key, Resource, Keyed[Key, Resource]] = {
new RestActionCategoryEngine2[GetRestActionCategory, Key, Resource, Keyed[Key, Resource]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Keyed[Key, Resource]]): Result = {
mkOkResult(response) { ok =>
val response = buildOkResult(List(ok.content), ok, keyFormat, naptimeSerializer,
requestFields, requestIncludes, resourceFields, pagination)
response.playResponse(Status.OK, request.headers.get(HeaderNames.IF_NONE_MATCH))
}
}
override def mkResponse(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Keyed[Key, Resource]],
resourceName: ResourceName,
topLevelRequest: TopLevelRequest): Future[AriResponse] = {
mkOkResponse(response) { ok =>
buildOkResponse(List(ok.content), ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination, resourceName, topLevelRequest, request.uri)
}
}
}
}
implicit def createActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine[CreateRestActionCategory, Key, Resource,
Keyed[Key, Option[Resource]]] = {
new RestActionCategoryEngine[CreateRestActionCategory, Key, Resource,
Keyed[Key, Option[Resource]]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Keyed[Key, Option[Resource]]]): Result = {
mkOkResult(response) { ok =>
val key = keyFormat.stringKeyFormat.writes(ok.content.key).key
val newLocation = if (request.path.endsWith("/")) {
request.path + key
} else {
request.path + "/" + key
}
val baseHeaders = List(HeaderNames.LOCATION -> newLocation, "X-Coursera-Id" -> key)
ok.content.value.map { value =>
val response = buildOkResult(List(Keyed(ok.content.key, value)), ok, keyFormat,
naptimeSerializer, requestFields, requestIncludes, resourceFields, pagination)
response.playResponse(Status.CREATED, None).withHeaders(baseHeaders: _*)
}.getOrElse {
// No body, just a 201 Created.
Results.Created.withHeaders(mkETagHeaderOpt(pagination, ok, None).toList ++
baseHeaders: _*)
}
}
}
}
}
implicit def updateActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine[UpdateRestActionCategory, Key, Resource,
Option[Keyed[Key, Resource]]] = {
new RestActionCategoryEngine[UpdateRestActionCategory, Key, Resource,
Option[Keyed[Key, Resource]]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Option[Keyed[Key, Resource]]]): Result = {
mkOkResult(response) { ok =>
ok.content.map { result =>
val response = buildOkResult(List(result), ok, keyFormat,
naptimeSerializer, requestFields, requestIncludes, resourceFields, pagination)
response.playResponse(Status.OK, None)
}.getOrElse {
Results.NoContent.withHeaders(mkETagHeaderOpt(pagination, ok, None).toList: _*)
}
}
}
}
}
implicit def patchActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine[PatchRestActionCategory, Key, Resource, Keyed[Key, Resource]] = {
new RestActionCategoryEngine[PatchRestActionCategory, Key, Resource, Keyed[Key, Resource]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Keyed[Key, Resource]]): Result = {
mkOkResult(response) { ok =>
val response = buildOkResult(List(ok.content), ok, keyFormat, naptimeSerializer,
requestFields, requestIncludes, resourceFields, pagination)
response.playResponse(Status.OK, None)
}
}
}
}
implicit def deleteActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine[DeleteRestActionCategory, Key, Resource, Unit] = {
new RestActionCategoryEngine[DeleteRestActionCategory, Key, Resource, Unit] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Unit]): Result = {
mkOkResult(response) { ok =>
Results.NoContent.withHeaders(mkETagHeaderOpt(pagination, ok, None).toList: _*)
}
}
}
}
implicit def multiGetActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine2[
MultiGetRestActionCategory, Key, Resource, Seq[Keyed[Key, Resource]]] = {
new RestActionCategoryEngine2[
MultiGetRestActionCategory, Key, Resource, Seq[Keyed[Key, Resource]]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Seq[Keyed[Key, Resource]]]): Result = {
mkOkResult(response) { ok =>
val response = buildOkResult(ok.content, ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination)
response.playResponse(Status.OK, request.headers.get(HeaderNames.IF_NONE_MATCH))
}
}
override def mkResponse(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Seq[Keyed[Key, Resource]]],
resourceName: ResourceName,
topLevelRequest: TopLevelRequest): Future[AriResponse] = {
mkOkResponse(response) { ok =>
buildOkResponse(ok.content, ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination, resourceName, topLevelRequest, request.uri)
}
}
}
}
implicit def getAllActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine2[GetAllRestActionCategory, Key, Resource,
Seq[Keyed[Key, Resource]]] = {
new RestActionCategoryEngine2[GetAllRestActionCategory, Key, Resource,
Seq[Keyed[Key, Resource]]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Seq[Keyed[Key, Resource]]]): Result = {
mkOkResult(response) { ok =>
val response = buildOkResult(ok.content, ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination)
response.playResponse(Status.OK, request.headers.get(HeaderNames.IF_NONE_MATCH))
}
}
override def mkResponse(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Seq[Keyed[Key, Resource]]],
resourceName: ResourceName,
topLevelRequest: TopLevelRequest): Future[AriResponse] = {
mkOkResponse(response) { ok =>
buildOkResponse(ok.content, ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination, resourceName, topLevelRequest, request.uri)
}
}
}
}
implicit def finderActionCategoryEngine[Key, Resource](
implicit naptimeSerializer: NaptimeSerializer[Resource], keyFormat: KeyFormat[Key]):
RestActionCategoryEngine2[FinderRestActionCategory, Key, Resource, Seq[Keyed[Key, Resource]]] = {
new RestActionCategoryEngine2[
FinderRestActionCategory, Key, Resource, Seq[Keyed[Key, Resource]]] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Seq[Keyed[Key, Resource]]]): Result = {
mkOkResult(response) { ok =>
val response = buildOkResult(ok.content, ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination)
response.playResponse(Status.OK, request.headers.get(HeaderNames.IF_NONE_MATCH))
}
}
override def mkResponse(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Seq[Keyed[Key, Resource]]],
resourceName: ResourceName,
topLevelRequest: TopLevelRequest): Future[AriResponse] = {
mkOkResponse(response) { ok =>
buildOkResponse(ok.content, ok, keyFormat, naptimeSerializer, requestFields,
requestIncludes, resourceFields, pagination, resourceName, topLevelRequest, request.uri)
}
}
}
}
implicit def actionActionCategoryEngine[Key, Resource, Response](
implicit responseWrites: NaptimeActionSerializer[Response]):
RestActionCategoryEngine[ActionRestActionCategory, Key, Resource, Response] = {
new RestActionCategoryEngine[ActionRestActionCategory, Key, Resource, Response] {
override def mkResult(
request: RequestHeader,
resourceFields: Fields[Resource],
requestFields: RequestFields,
requestIncludes: QueryIncludes,
pagination: RequestPagination,
response: RestResponse[Response]): Result = {
mkOkResult(response) { ok =>
val responseBody = responseWrites.serialize(ok.content)
if (responseBody.isEmpty) {
Results.NoContent
} else {
Results.Ok(responseBody).as(responseWrites.contentType(ok.content))
}
}
}
}
}
private[naptime] class FlattenedFilteringJacksonDataCodec(fields: RequestFields)
extends JacksonDataCodec {
override def writeObject(`object`: scala.Any, generator: JsonGenerator): Unit = {
try {
val callback = new FilteringJsonTraverseCallback(generator)
Data.traverse(`object`, callback)
generator.flush()
} catch {
case e: IOException => throw e
} finally {
try {
generator.close()
} catch {
case e: IOException => // pass
}
}
}
override def objectToJsonGenerator(`object`: scala.Any, generator: JsonGenerator): Unit = {
val callback = new FilteringJsonTraverseCallback(generator)
Data.traverse(`object`, callback)
}
private[this] class FilteringJsonTraverseCallback(jsonGenerator: JsonGenerator)
extends JsonTraverseCallback(jsonGenerator) {
private[this] var inElements = false
private[this] var levelsDeep = 0
private[this] var inLinked = false
private[this] var linkedResourceName: String = null
private[this] var linkedFieldsFilter: Option[RequestFields] = None
override def orderMap(map: DataMap): Iterable[Entry[String, AnyRef]] = {
import scala.collection.JavaConverters._
if (inElements && levelsDeep == 2) {
val unfiltered = super.orderMap(map)
// Use Scala's filtering, as by code inspection it is very efficient.
unfiltered.asScala.filter { entry =>
fields.hasField(entry.getKey)
}.asJava
} else if (inLinked && levelsDeep == 2) {
val unfiltered = super.orderMap(map)
unfiltered.asScala.filter { entry =>
ResourceName.parse(entry.getKey).exists { resourceName =>
fields.forResource(resourceName).isDefined
}
}.asJava
} else if (inLinked && levelsDeep == 3 && linkedResourceName != null) {
val unfiltered = super.orderMap(map)
linkedFieldsFilter.map { fields =>
// Use Scala's filtering, as by code inspection it is very efficient.
unfiltered.asScala.filter { entry =>
fields.hasField(entry.getKey)
}.asJava
}.getOrElse {
unfiltered
}
} else {
super.orderMap(map)
}
}
override def startMap(map: DataMap): Unit = {
levelsDeep += 1
super.startMap(map)
}
override def endMap(): Unit = {
levelsDeep -= 1
if (levelsDeep == 0) {
// Reset because we're top-level now.
inElements = false
inLinked = false
} else if (levelsDeep == 1 && inLinked) {
linkedResourceName = null
linkedFieldsFilter = None
}
super.endMap()
}
override def key(key: String): Unit = {
if (levelsDeep == 1) {
inElements = "elements" == key
inLinked = "linked" == key
} else if (levelsDeep == 2 && inLinked) {
linkedResourceName = key
linkedFieldsFilter = ResourceName.parse(linkedResourceName).flatMap { resourceName =>
fields.forResource(resourceName)
}
}
super.key(key)
}
}
}
}
| vkuo-coursera/naptime | naptime/src/main/scala/org/coursera/naptime/actions/RestActionCategoryEngine2.scala | Scala | apache-2.0 | 29,782 |
/*
* Copyright 2001-2005 Stephen Colebourne
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joda.time.format
import java.util.Locale
import org.joda.time.ReadWritablePeriod
/**
* Internal interface for parsing textual representations of time periods.
* <p>
* Application users will rarely use this class directly. Instead, you
* will use one of the factory classes to create a {@link PeriodFormatter}.
* <p>
* The factory classes are:<br />
* - {@link PeriodFormatterBuilder}<br />
* - {@link PeriodFormat}<br />
* - {@link ISOPeriodFormat}<br />
*
* @author Brian S O'Neill
* @author Stephen Colebourne
* @since 1.0
* @see PeriodFormatter
* @see PeriodFormatterBuilder
* @see PeriodFormat
*/
trait PeriodParser {
/**
* Parses a period from the given text, at the given position, saving the
* result into the fields of the given ReadWritablePeriod. If the parse
* succeeds, the return value is the new text position. Note that the parse
* may succeed without fully reading the text.
* <p>
* If it fails, the return value is negative, but the period may still be
* modified. To determine the position where the parse failed, apply the
* one's complement operator (~) on the return value.
*
* @param period a period that will be modified
* @param periodStr text to parse
* @param position position to start parsing from
* @param locale the locale to use for parsing
* @return new position, if negative, parse failed. Apply complement
* operator (~) to get position of failure
* @throws IllegalArgumentException if any field is out of range
*/
def parseInto(period: ReadWritablePeriod, periodStr: String, position: Int, locale: Locale): Int
} | aparo/scalajs-joda | src/main/scala/org/joda/time/format/PeriodParser.scala | Scala | apache-2.0 | 2,261 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.physical.batch
import org.apache.flink.table.JArrayList
import org.apache.flink.table.api.{AggPhaseEnforcer, PlannerConfigOptions, TableConfig, TableException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.dataformat.BinaryRow
import org.apache.flink.table.functions.aggfunctions.DeclarativeAggregateFunction
import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils._
import org.apache.flink.table.functions.{AggregateFunction, UserDefinedFunction}
import org.apache.flink.table.plan.util.{AggregateUtil, FlinkRelOptUtil}
import org.apache.flink.table.types.LogicalTypeDataTypeConverter.fromDataTypeToLogicalType
import org.apache.flink.table.types.logical.LogicalType
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Aggregate
import org.apache.calcite.rel.{RelCollation, RelCollations, RelFieldCollation}
import org.apache.calcite.util.Util
import scala.collection.JavaConversions._
trait BatchExecAggRuleBase {
protected def inferLocalAggType(
inputRowType: RelDataType,
agg: Aggregate,
groupSet: Array[Int],
auxGroupSet: Array[Int],
aggFunctions: Array[UserDefinedFunction],
aggBufferTypes: Array[Array[LogicalType]]): RelDataType = {
val typeFactory = agg.getCluster.getTypeFactory.asInstanceOf[FlinkTypeFactory]
val aggCallNames = Util.skip(
agg.getRowType.getFieldNames, groupSet.length + auxGroupSet.length).toList.toArray[String]
inferLocalAggType(
inputRowType, typeFactory, aggCallNames, groupSet, auxGroupSet, aggFunctions, aggBufferTypes)
}
protected def inferLocalAggType(
inputRowType: RelDataType,
typeFactory: FlinkTypeFactory,
aggCallNames: Array[String],
groupSet: Array[Int],
auxGroupSet: Array[Int],
aggFunctions: Array[UserDefinedFunction],
aggBufferTypes: Array[Array[LogicalType]]): RelDataType = {
val aggBufferFieldNames = new Array[Array[String]](aggFunctions.length)
var index = -1
aggFunctions.zipWithIndex.foreach {
case (udf, aggIndex) =>
aggBufferFieldNames(aggIndex) = udf match {
case _: AggregateFunction[_, _] =>
Array(aggCallNames(aggIndex))
case agf: DeclarativeAggregateFunction =>
agf.aggBufferAttributes.map { attr =>
index += 1
s"${attr.getName}$$$index"
}
case _: UserDefinedFunction =>
throw new TableException(s"Don't get localAgg merge name")
}
}
// local agg output order: groupSet + auxGroupSet + aggCalls
val aggBufferSqlTypes = aggBufferTypes.flatten.map { t =>
val nullable = !FlinkTypeFactory.isTimeIndicatorType(t)
typeFactory.createFieldTypeFromLogicalType(t)
}
val localAggFieldTypes = (
groupSet.map(inputRowType.getFieldList.get(_).getType) ++ // groupSet
auxGroupSet.map(inputRowType.getFieldList.get(_).getType) ++ // auxGroupSet
aggBufferSqlTypes // aggCalls
).toList
val localAggFieldNames = (
groupSet.map(inputRowType.getFieldList.get(_).getName) ++ // groupSet
auxGroupSet.map(inputRowType.getFieldList.get(_).getName) ++ // auxGroupSet
aggBufferFieldNames.flatten.toArray[String] // aggCalls
).toList
typeFactory.createStructType(localAggFieldTypes, localAggFieldNames)
}
protected def isTwoPhaseAggWorkable(
aggFunctions: Array[UserDefinedFunction],
tableConfig: TableConfig): Boolean = {
getAggEnforceStrategy(tableConfig) match {
case AggPhaseEnforcer.ONE_PHASE => false
case _ => doAllSupportMerge(aggFunctions)
}
}
protected def isOnePhaseAggWorkable(
agg: Aggregate,
aggFunctions: Array[UserDefinedFunction],
tableConfig: TableConfig): Boolean = {
getAggEnforceStrategy(tableConfig) match {
case AggPhaseEnforcer.ONE_PHASE => true
case AggPhaseEnforcer.TWO_PHASE => !doAllSupportMerge(aggFunctions)
case AggPhaseEnforcer.NONE =>
if (!doAllSupportMerge(aggFunctions)) {
true
} else {
// if ndv of group key in aggregate is Unknown and all aggFunctions are splittable,
// use two-phase agg.
// else whether choose one-phase agg or two-phase agg depends on CBO.
val mq = agg.getCluster.getMetadataQuery
mq.getDistinctRowCount(agg.getInput, agg.getGroupSet, null) != null
}
}
}
protected def doAllSupportMerge(aggFunctions: Array[UserDefinedFunction]): Boolean = {
val supportLocalAgg = aggFunctions.forall {
case _: DeclarativeAggregateFunction => true
case a => ifMethodExistInFunction("merge", a)
}
//it means grouping without aggregate functions
aggFunctions.isEmpty || supportLocalAgg
}
protected def isEnforceOnePhaseAgg(tableConfig: TableConfig): Boolean = {
getAggEnforceStrategy(tableConfig) == AggPhaseEnforcer.ONE_PHASE
}
protected def isEnforceTwoPhaseAgg(tableConfig: TableConfig): Boolean = {
getAggEnforceStrategy(tableConfig) == AggPhaseEnforcer.TWO_PHASE
}
protected def getAggEnforceStrategy(tableConfig: TableConfig): AggPhaseEnforcer.Value = {
val aggPrefConfig = tableConfig.getConf.getString(
PlannerConfigOptions.SQL_OPTIMIZER_AGG_PHASE_ENFORCER)
AggPhaseEnforcer.values.find(_.toString.equalsIgnoreCase(aggPrefConfig))
.getOrElse(throw new IllegalArgumentException(
"Agg phase enforcer can only set to be: NONE, ONE_PHASE, TWO_PHASE!"))
}
protected def isAggBufferFixedLength(agg: Aggregate): Boolean = {
val (_, aggCallsWithoutAuxGroupCalls) = AggregateUtil.checkAndSplitAggCalls(agg)
val (_, aggBufferTypes, _) = AggregateUtil.transformToBatchAggregateFunctions(
aggCallsWithoutAuxGroupCalls, agg.getInput.getRowType)
isAggBufferFixedLength(aggBufferTypes.map(_.map(fromDataTypeToLogicalType)))
}
protected def isAggBufferFixedLength(aggBufferTypes: Array[Array[LogicalType]]): Boolean = {
val aggBuffAttributesTypes = aggBufferTypes.flatten
val isAggBufferFixedLength = aggBuffAttributesTypes.forall(
t => BinaryRow.isMutable(t))
// it means grouping without aggregate functions
aggBuffAttributesTypes.isEmpty || isAggBufferFixedLength
}
protected def createRelCollation(groupSet: Array[Int]): RelCollation = {
val fields = new JArrayList[RelFieldCollation]()
for (field <- groupSet) {
fields.add(FlinkRelOptUtil.ofRelFieldCollation(field))
}
RelCollations.of(fields)
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/physical/batch/BatchExecAggRuleBase.scala | Scala | apache-2.0 | 7,392 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.commons.dao
import com.flipkart.connekt.commons.entities.{Bucket, Stencil, StencilsEnsemble}
trait TStencilDao extends Dao {
def getStencils(id: String, version: Option[String] = None): List[Stencil]
def getStencilsByName(name: String, version: Option[String] = None): List[Stencil]
def getStencilsByBucket(name: String): List[Stencil]
def writeStencils(stencil: List[Stencil]): Unit
def deleteStencilByName(name: String, id:String): Unit
def deleteStencil(id:String): Unit
def getBucket(name: String): Option[Bucket]
def writeBucket(bucket: Bucket): Unit
def getStencilsEnsembleByName(name: String): Option[StencilsEnsemble]
def getStencilsEnsemble(id: String): Option[StencilsEnsemble]
def writeStencilsEnsemble(stencilComponents: StencilsEnsemble): Unit
def getAllEnsemble(): List[StencilsEnsemble]
}
| Flipkart/connekt | commons/src/main/scala/com/flipkart/connekt/commons/dao/TStencilDao.scala | Scala | mit | 1,481 |
package spire.math.prime
import spire.math.{SafeLong, log, max}
import SieveUtil._
/**
* Segmented Stream of Eratosthenes implementation
*
* This section really needs some good comments.
*
* Some future optimizations:
*
* 0. Consider an option to use multiple threads
* 1. Faster heap/priority queue
* 2. Tune chunkSize
* 3. Use Long internally until we have to switch to SafeLong.
* 4. Compress the amount of space our heaps take up.
* 5. Read more efficient segmented sieves to get other ideas.
* 6. Try using a delta-encoded prime log
*
* Obviously we are trying to be a bit more flexible than a
* traditional prime finder that knows ahead of time what range it
* will be operating over, which will hurt performance a bit. Also,
* it's not written in C/assembly. So it will probably never be truly
* competitive, but I'd like us to do as well as possible.
*/
/**
* The Siever manages the segmented sieve process.
*
* At any given time, it holds onto a single sieve segment. Thus, the
* siever should be used for a single lookup or traversal.
*
* Sievers are built using 'chunkSize' and 'cutoff' parameters. These
* are passed along to any sieve segments they create. When possible,
* it's probably better to use methods on the companion object, which
* will instantiate a Siever for you with reasonable parameters.
*/
case class Siever(chunkSize: Int, cutoff: SafeLong) {
require(chunkSize % 480 == 0, "chunkSize must be a multiple of 480")
val arr = BitSet.alloc(chunkSize)
var start: SafeLong = SafeLong(0)
var limit: SafeLong = start + chunkSize
val fastq: FastFactors = FastFactors.empty
val slowq: FactorHeap = new FactorHeap
var sieve: SieveSegment = SieveSegment(start, arr, cutoff)
sieve.init(fastq, slowq)
def largestBelow(n: SafeLong): SafeLong = {
if (n < 3) throw new IllegalArgumentException("invalid argument: %s" format n)
if (n == 3) return SafeLong(2)
var i = 3
var k = n - 1
var last = SafeLong(2)
while (true) {
val primes = sieve.primes
val len = primes.length
if (n - start < len) {
var i = 1
val goal = (n - start).toInt
while (i < goal) {
if (primes(i)) last = start + i
i += 2
}
return last
} else {
var i = len - 1
while (1 <= i && !primes(i)) i -= 2
if (1 <= i) last = start + i
}
initNextSieve()
i = 1
}
return SafeLong(0) // impossible
}
def nth(n: Long): SafeLong = {
if (n == 1) return SafeLong(2)
var i = 3
var k = n - 1
while (true) {
val primes = sieve.primes
val len = primes.length
while (i < len) {
if (primes(i)) {
k -= 1
if (k < 1) return sieve.start + i
}
i += 2
}
initNextSieve()
i = 1
}
return SafeLong(0) // impossible
}
private def initNextSieve(): Unit = {
start += chunkSize
limit += chunkSize
val csq = cutoff ** 2
if (limit >= csq) sys.error("too big: %s > %s (%s)" format (limit, csq, cutoff))
arr.clear()
sieve = SieveSegment(start, arr, cutoff)
sieve.init(fastq, slowq)
}
def nextAfter(n: SafeLong): SafeLong = {
var nn = sieve.nextAfter(n)
while (nn == -1L) {
initNextSieve()
nn = sieve.nextAfter(start - 1)
}
nn
}
def streamAfter(p0: SafeLong): Stream[SafeLong] = {
val p = nextAfter(p0)
p #:: streamAfter(p)
}
def arrayAt(p: SafeLong, size: Int): Array[SafeLong] = {
val arr = new Array[SafeLong](size)
def loop(i: Int, p: SafeLong): Unit =
if (i < arr.length) {
arr(i) = p
loop(i + 1, nextAfter(p))
}
loop(0, p)
arr
}
}
| kevinmeredith/spire | core/shared/src/main/scala/spire/math/prime/Siever.scala | Scala | mit | 3,729 |
package tv.camfire.media_server.config.factory
import akka.actor.ActorRef
import tv.camfire.media_server.signal.SignalHelper
import tv.camfire.media_server.webrtc.PeerConnectionObserver
import tv.camfire.media_server.serialization.SerializationHelper
/**
* User: jonathan
* Date: 7/23/13
* Time: 3:10 AM
*/
class PeerConnectionObserverFactoryFactory(mediaService: ActorRef,
serializationHelper: SerializationHelper,
signalHelper: SignalHelper,
sessionId: String) {
case class create() {
def create(sessionId: String)(implicit resourceUuid: String): PeerConnectionObserver = {
new PeerConnectionObserver(mediaService, serializationHelper, signalHelper, sessionId)
}
}
}
| jgrowl/camfire-signaling | signaling-server/src/main/scala/tv/camfire/media_server/config/factory/PeerConnectionObserverFactoryFactory.scala | Scala | mit | 824 |
package psyco.util
/**
* Created by lipeng on 15/8/25.
*/
object CaseUtil extends App {
def camel2underscore(s: String) = "[A-Z\\\\d]".r.replaceAllIn(s, "_" + _.group(0).toLowerCase()
)
def underscore2camel(s: String) = "_([a-zA-Z]?)".r.replaceAllIn(s, _.group(1).toUpperCase()
)
def underscore2camelUppercase(s: String): String = camelUpper(underscore2camel(s))
def camelUpper(name: String): String = if (name.length == 1) name.toUpperCase() else name.substring(0, 1).toUpperCase + name.substring(1)
def camelLower(name: String): String = if (name.length == 1) name.toLowerCase() else name.substring(0, 1).toLowerCase() + name.substring(1)
println(camel2underscore("aFuckShit"))
println(underscore2camel("a_fuck_shit"))
println(underscore2camelUppercase(camel2underscore("aFuckShit")))
}
| psyco4j/mybatis-gen-scala | src/main/scala/psyco/util/CaseUtil.scala | Scala | mit | 816 |
package debop4s.data.slick.examples
import debop4s.data.slick.AbstractSlickFunSuite
import debop4s.data.slick.SlickExampleDatabase._
import debop4s.data.slick.SlickExampleDatabase.driver.simple._
import scala.util.Try
/**
* MainFunSuite
* @author sunghyouk.bae@gmail.com 15. 3. 25.
*/
class MainFunSuite extends AbstractSlickFunSuite {
case class User(id: Int, first: String, last: String)
class Users(tag: Tag) extends Table[(Int, String, Option[String])](tag, "main_users") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def first = column[String]("first", O.Length(64, true))
def last = column[Option[String]]("last", O.Length(254, true))
def * = (id, first, last)
def orders = Orders.filter(_.userId === id)
}
object Users extends TableQuery(new Users(_)) {
val byId = this.findBy(_.id)
}
class Orders(tag: Tag) extends Table[(Int, Int, String, Boolean, Option[Boolean])](tag, "main_orders") {
def orderId = column[Int]("orderId", O.PrimaryKey, O.AutoInc)
def userId = column[Int]("userId")
def product = column[String]("product")
def shipped = column[Boolean]("shipped")
def rebate = column[Option[Boolean]]("rebate")
def * = (userId, orderId, product, shipped, rebate)
}
object Orders extends TableQuery(new Orders(_)) {
val byId = this.findBy(_.orderId)
val byUserId = this.findBy(_.userId)
}
test("main test") {
val ddl = Users.ddl ++ Orders.ddl
ddl.createStatements foreach println
withSession { implicit session =>
Try { ddl.drop }
ddl.create
val insQuery = Users.map(u => (u.first, u.last))
LOG.debug(s"Insert SQL: ${ insQuery.insertStatement }")
val ins1 = insQuery.insert("Homer", Some("Simpson"))
val ins2 = insQuery.insertAll(("Marge", Some("Simpson")),
("Apu", Some("Nahasapeemapetilon")),
("Carl", Some("Carlson")),
("Lenny", Some("Leonard")))
val ins3 = Users.map(u => u.first).insertAll("Santa's Little Helper", "Snowball")
val total: Option[Int] = for (i2 <- ins2; i3 <- ins3) yield ins1 + i2 + i3
LOG.debug(s"Inserted ${ total.getOrElse("<unknown>") } users")
total shouldEqual Some(7)
val q1 = Users.map(x => (x.id, x.first, x.last))
LOG.debug(s"q1: ${ q1.selectStatement }")
q1.run foreach { x => LOG.debug(s"User tuple: $x") }
val allUsers = q1.mapResult { case (id, f, l) => User(id, f, l.orNull) }.list
allUsers foreach { u => LOG.debug(s"User object: $u") }
val expectedUserTuples = Seq(
(1, "Homer", Some("Simpson")),
(2, "Marge", Some("Simpson")),
(3, "Apu", Some("Nahasapeemapetilon")),
(4, "Carl", Some("Carlson")),
(5, "Lenny", Some("Leonard")),
(6, "Santa's Little Helper", None),
(7, "Snowball", None)
)
q1.list shouldEqual expectedUserTuples
allUsers shouldEqual expectedUserTuples.map { case (id, f, l) => User(id, f, l.orNull) }
// select x2."id",
// x2."first",
// x2."last",
// (case when (x2."id" < 3) then 'low' when (x2."id" < 6) then 'medium' else 'high' end)
// from "main_users" x2
val q1b =
for {u <- Users}
yield (u.id,
u.first.?,
u.last,
Case If u.id < 3 Then "low" If u.id < 6 Then "medium" Else "high")
LOG.debug(s"case statement: ${ q1b.selectStatement }")
val r1b = q1b.run
r1b foreach { u => LOG.debug(s"With options and sequence: $u") }
r1b shouldEqual expectedUserTuples.map {
case (id, f, l) => (id, Some(f), l, if (id < 3) "low" else if (id < 6) "medium" else "high")
}
val q2 = for (u <- Users if u.first === "Apu".bind) yield (u.last, u.id)
LOG.debug(s"q2: ${ q2.selectStatement }")
LOG.debug(s"Apu's last name and id are: ${ q2.first }")
q2.first shouldEqual(Some("Nahasapeemapetilon"), 3)
// TODO: verifyable non-random test
for (u <- allUsers if u.first != "Apu" && u.first != "Snowball"; i <- 1 to 2)
Orders.map(o => (o.userId, o.product, o.shipped, o.rebate))
.insert(u.id, s"Gizmo ${ ( math.random * 10 ).toInt }", i == 2, Some(u.first == "Marge"))
// H2:
// select x2."first", x2."last", x3."orderId", x3."product", x3."shipped", x3."rebate"
// from (select x4."id" as "id", x4."first" as "first", x4."last" as "last"
// from "main_users" x4
// order by x4."first"
// ) x2,
// "main_orders" x3
// where (x2."last" is not null)
// and (x3."userId" = x2."id")
val q3 = for {
u <- Users.sortBy(_.first) if u.last.isDefined
o <- u.orders
} yield (u.first, u.last, o.orderId, o.product, o.shipped, o.rebate)
LOG.debug(s"q3=${ q3.selectStatement }")
LOG.debug("All orders by Users with a last name by first name:")
q3.list.foreach { o => LOG.debug("\\t" + o) }
// H2:
// select x2."first", x3."orderId"
// from "main_users" x2, "main_orders" x3
// where (x3."userId" = x2."id")
// and (x3."orderId" = (select max(x4."orderId") from "main_orders" x4 where x4."userId" = x3."userId"))
val q4 = for {
u <- Users
o <- u.orders if o.orderId === ( for {o2 <- Orders.filter(_.userId === o.userId)} yield o2.orderId ).max
} yield (u.first, o.orderId)
LOG.debug(s"q4=${ q4.selectStatement }")
LOG.debug("Latest Order per User: ")
q4.list foreach { x => LOG.debug("\\t" + x) }
q4.list.toSet shouldEqual Set(("Homer", 2), ("Marge", 4), ("Carl", 6), ("Lenny", 8), ("Santa's Little Helper", 10))
// custom 함수
def maxOfPer[T <: Table[_], C[_]](c: Query[T, _, C])(m: T => Column[Int], p: T => Column[Int]) = {
c filter { o => m(o) === ( for (o2 <- c if p(o) === p(o2)) yield m(o2) ).max }
}
// H2:
// select x2."first", x3."orderId"
// from "main_users" x2, "main_orders" x3
// where (x3."orderId" = (select max(x4."orderId") from "main_orders" x4 where x3."userId" = x4."userId"))
// and (x3."userId" = x2."id")
val q4b = for {
u <- Users
o <- maxOfPer(Orders)(_.orderId, _.userId) if o.userId === u.id
} yield (u.first, o.orderId)
LOG.debug(s"q4b: ${ q4b.selectStatement }")
q4b.foreach(o => LOG.debug(" " + o))
q4b.list.toSet shouldEqual Set(("Homer", 2), ("Marge", 4), ("Carl", 6), ("Lenny", 8), ("Santa's Little Helper", 10))
// H2:
// select x2.x3, x2.x4, x2.x5, x2.x6
// from (select x7."first" as x3, 1 + x8."orderId" as x4, 1 as x5, x8."product" as x6
// from "main_users" x7, "main_orders" x8
// where (x7."first" in (?, ?))
// and (x8."userId" = x7."id")) x2
val q4d = for {
u <- Users if u.first inSetBind Seq("Homer", "Marge")
o <- Orders if o.userId === u.id
} yield (u.first, (LiteralColumn(1) + o.orderId, 1), o.product)
LOG.debug(s"q4d: ${ q4d.selectStatement }")
LOG.debug("orders for Homer and Marge:")
q4d.run.foreach { o => LOG.debug(" " + o) }
// && 는 and 로 변환, || 는 or 로 변환
val b1 = Orders.filter(o => o.shipped && o.shipped).map(o => o.shipped && o.shipped)
val b2 = Orders.filter(o => o.shipped && o.rebate).map(o => o.shipped && o.rebate)
val b3 = Orders.filter(o => o.rebate && o.shipped).map(o => o.rebate && o.shipped)
val b4 = Orders.filter(o => o.rebate && o.rebate).map(o => o.rebate && o.rebate)
val b5 = Orders.filter(o => !o.shipped).map(o => !o.shipped)
val b6 = Orders.filter(o => !o.rebate).map(o => !o.rebate)
val b7 = Orders.map(o => o.shipped === o.shipped)
val b8 = Orders.map(o => o.rebate === o.shipped)
val b9 = Orders.map(o => o.shipped === o.rebate)
val b10 = Orders.map(o => o.rebate === o.rebate)
LOG.debug("b1: " + b1.selectStatement)
LOG.debug("b2: " + b2.selectStatement)
LOG.debug("b3: " + b3.selectStatement)
LOG.debug("b4: " + b4.selectStatement)
LOG.debug("b5: " + b5.selectStatement)
LOG.debug("b6: " + b6.selectStatement)
LOG.debug("b7: " + b7.selectStatement)
LOG.debug("b8: " + b8.selectStatement)
LOG.debug("b9: " + b9.selectStatement)
LOG.debug("b10: " + b10.selectStatement)
// H2:
// select x2."id", x2."first", x2."last"
// from "main_users" x2
// where not (x2."id" in (select x3."userId" from "main_orders" x3))
val q5 = Users filterNot { _.id in Orders.map(_.userId) }
LOG.debug(s"q5 = ${ q5.selectStatement }")
LOG.debug("Order가 없는 사용자:")
q5.run foreach { u => LOG.debug(" " + u) }
q5.run shouldEqual Seq((3, "Apu", Some("Nahasapeemapetilon")), (7, "Snowball", None))
LOG.debug(s"q5 delete: ${ q5.deleteStatement }")
LOG.debug("delete users...")
val deleted = q5.delete
LOG.debug(s"Deleted $deleted users")
deleted shouldEqual 2
val q6 = Query(q5.length)
LOG.debug(s"q6: ${ q6.selectStatement }")
LOG.debug("Order가 없는 사용자:" + q6.first)
q6.first shouldEqual 0
// H2 :
// update "main_users" set "first" = ? where "main_users"."first" = ?
val q7 = Compiled { (s: Column[String]) => Users.filter(_.first === s).map(_.first) }
LOG.debug("q7: " + q7("Homer").updateStatement)
val updated1 = q7("Homer").update("Homer Jay")
LOG.debug(s"Updated $updated1 row(s)")
updated1 shouldEqual 1
val q7b = Compiled { Users.filter(_.first === "Homer Jay").map(_.first) }
LOG.debug("q7b: " + q7b.updateStatement)
val updated1b = q7b.update("Homer")
LOG.debug(s"Updated $updated1b row(s)")
updated1b shouldEqual 1
// H2: select x2.x3 from (select count(1) as x3 from (select x4."first" as x5 from "main_users" x4 where x4."first" = 'Marge') x6) x2
q7("Marge").map(_.length).run shouldEqual 1
q7("Marge").map(_.exists).run shouldEqual true
q7("Marge").delete
q7("Marge").map(_.length).run shouldEqual 0
q7("Marge").map(_.exists).run shouldEqual false
val q8 = for (u <- Users if u.last.isEmpty) yield (u.first, u.last)
LOG.debug("q8: " + q8.updateStatement)
val updated2 = q8.update("n/a", Some("n/a"))
LOG.debug(s"Updated $updated2 row(s)")
updated2 shouldEqual 1
// H2:
// select x2.x3 from (select count(1) as x3 from (select x4."id" as x5, x4."first" as x6, x4."last" as x7 from "main_users" x4) x8) x2
Users.list
val q9 = Users.length
q9.run shouldEqual 4
// H2:
// select x2."first", x2."last" from "main_users" x2 where false
val q10 = Users.filter(_.last inSetBind Seq()).map(u => (u.first, u.last))
q10.run shouldEqual Nil
}
}
}
| debop/debop4s | debop4s-data-slick/src/test/scala/debop4s/data/slick/examples/MainFunSuite.scala | Scala | apache-2.0 | 11,194 |
package de.tu_berlin.formic.common.datastructure.client
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import de.tu_berlin.formic.common.datastructure.client.AbstractClientDataStructure.RemoteInstantiation
import de.tu_berlin.formic.common.datastructure.client.AbstractClientDataStructureFactory.{LocalCreateRequest, NewDataStructureCreated, WrappedCreateRequest}
import de.tu_berlin.formic.common.datastructure.{DataStructureName, FormicDataStructure}
import de.tu_berlin.formic.common.message.CreateRequest
import de.tu_berlin.formic.common.{ClientId, DataStructureInstanceId, OperationId}
import scala.reflect.ClassTag
/**
* @author Ronny Bräunlich
*/
//Why the ClassTag? See http://stackoverflow.com/questions/18692265/no-classtag-available-for-t-not-for-array
abstract class AbstractClientDataStructureFactory[T <: AbstractClientDataStructure : ClassTag, S <: FormicDataStructure : ClassTag] extends Actor with ActorLogging {
override def receive: Receive = {
case WrappedCreateRequest(outgoingConnection, data, lastOperationId, req, localClientId) =>
log.debug(s"Factory for $name received CreateRequest: $req from sender: $sender")
val id: DataStructureInstanceId = req.dataStructureInstanceId
val initialData = if(data == null || data.isEmpty) Option.empty else Option(data)
val actor = context.actorOf(Props(createDataStructure(id, outgoingConnection, initialData, lastOperationId)), id.id)
val wrapper = createWrapper(id, actor, localClientId)
actor ! RemoteInstantiation
sender ! NewDataStructureCreated(id, actor, wrapper)
case local: LocalCreateRequest =>
log.debug(s"Factory for $name received LocalCreateRequest: $local from sender: $sender")
val id: DataStructureInstanceId = local.dataStructureInstanceId
val actor = context.actorOf(Props(createDataStructure(id, local.outgoingConnection, Option.empty, Option.empty)), id.id)
sender ! NewDataStructureCreated(id, actor, null)
}
/**
* Creates a new data type.
*
* @param dataStructureInstanceId the id of the data type
* @param outgoingConnection the connection to send messages to the server
* @param data the initial data as JSON, might be empty
* @param lastOperationId the operation id the data is based on, might be empty
* @return
*/
def createDataStructure(dataStructureInstanceId: DataStructureInstanceId, outgoingConnection: ActorRef, data: Option[String], lastOperationId: Option[OperationId]): T
def createWrapper(dataStructureInstanceId: DataStructureInstanceId, dataType: ActorRef, localClientId: ClientId): S
val name: DataStructureName
}
object AbstractClientDataStructureFactory {
/**
* Local means that a client created the FormicDataType itself by calling new and using FormicSystem.init().
* Therefore no wrapper data type needs to be created.
*/
case class LocalCreateRequest(outgoingConnection: ActorRef, dataStructureInstanceId: DataStructureInstanceId)
case class NewDataStructureCreated(dataStructureInstanceId: DataStructureInstanceId, dataStructureActor: ActorRef, wrapper: FormicDataStructure)
/**
* To be able to pass the outgoing connection and the initial data to the factory, the CreateRequest has to be wrapped.
*/
case class WrappedCreateRequest(outgoingConnection: ActorRef, data: String, lastOperationId: Option[OperationId], createRequest: CreateRequest, localClientId: ClientId)
}
| rbraeunlich/formic | common/shared/src/main/scala/de/tu_berlin/formic/common/datastructure/client/AbstractClientDataStructureFactory.scala | Scala | apache-2.0 | 3,462 |
package synereo.client.facades
import scala.scalajs.js
/**
* Created by mandar.k on 8/31/2016.
*/
@js.native
object SynereoSelectizeFacade extends js.Object {
def addOption(selectizeId: String, text: String, value: String): js.Any = js.native
def initilizeSelectize(selectizeId: String, maximumItems: Int, maxCharLimit: Int, allowCreate: Boolean = false): js.native = js.native
}
| LivelyGig/ProductWebUI | sclient/src/main/scala/synereo/client/facades/SynereoSelectizeFacade.scala | Scala | apache-2.0 | 392 |
package hu.bme.mit.ire.nodes.binary
import akka.actor.{Actor, Stash}
import hu.bme.mit.ire._
import hu.bme.mit.ire.messages._
abstract class BinaryNode(val expectedTerminatorCount: Int = 2) extends Actor with Forwarder with Stash with TerminatorHandler {
val name = self.path.name
var primaryPause: Option[Pause] = None
var secondaryPause: Option[Pause] = None
def onPrimary(changeSet: ChangeSet)
def onSecondary(changeSet: ChangeSet)
override def receive: Actor.Receive = {
case Primary(reteMessage: ReteMessage) => {
primaryPause match {
case Some(pause) => {
reteMessage match {
case resume: Resume => {
if (resume.messageID == pause.messageID)
primaryPause = None
if (secondaryPause.isEmpty)
unstashAll()
}
case terminator: TerminatorMessage => {
if (terminator.messageID == pause.messageID)
handleTerminator(terminator)
else
stash()
}
case other => stash()
}
}
case None => reteMessage match {
case pause: Pause => primaryPause = Some(pause)
case cs: ChangeSet => onPrimary(cs); // printForwarding(cs)
case t: TerminatorMessage => handleTerminator(t)
}
}
}
case Secondary(reteMessage: ReteMessage) => {
secondaryPause match {
case Some(pause) => {
reteMessage match {
case resume: Resume => {
if (resume.messageID == pause.messageID)
secondaryPause = None
if (primaryPause.isEmpty)
unstashAll()
}
case terminator: TerminatorMessage => {
if (terminator.messageID == pause.messageID)
handleTerminator(terminator)
else
stash()
}
case other => stash()
}
}
case None => reteMessage match {
case pause: Pause => secondaryPause = Some(pause)
case cs: ChangeSet => onSecondary(cs); //printForwarding(cs)
case t: TerminatorMessage => handleTerminator(t)
}
}
}
case _: SizeRequest => sender() ! onSizeRequest()
case other: ReteMessage =>
throw new UnsupportedOperationException(
s"$name received raw message, needs to be wrapped as Primary or Secondary")
}
def onSizeRequest(): Long
}
| FTSRG/ire | src/main/scala/hu/bme/mit/ire/nodes/binary/BinaryNode.scala | Scala | epl-1.0 | 2,489 |
package ro.redeul.katas.trees
import scala.annotation.tailrec
object TreeUtilities {
def countPaths(node: Tree, target: Int): Int = {
val delta = target - node.data
(if (delta == 0) 1 else 0) + (node match {
case Tree(_, null, null) => 0
case Tree(_, l, null) => countPaths(l, delta)
case Tree(_, null, r) => countPaths(r, delta)
case Tree(_, l, r) => countPaths(l, delta) + countPaths(r, delta)
})
}
def findPaths(n: Tree, target: Int): Seq[Seq[Int]] = {
def _findPaths(n: Tree, target: Int, path: Seq[Int]): Seq[Seq[Int]] = {
val newPath = path :+ n.data
val delta = target - n.data
(if (delta == 0) Seq(newPath) else Nil) ++ (n match {
case Tree(_, null, null) => Nil
case Tree(_, l, null) => _findPaths(l, delta, newPath)
case Tree(_, null, r) => _findPaths(r, delta, newPath)
case Tree(_, l, r) => _findPaths(l, delta, newPath) ++ _findPaths(r, delta, newPath)
})
}
_findPaths(n, target, Nil)
}
def balanceTree(t: Tree, plateWeight: Int): Option[Tree] = {
def _balance(n: Tree): Option[(Int, Tree)] = {
n match {
case Tree(w, null, null) => Some(w -> n)
case Tree(w, _l, _r) if _l != null && _r != null =>
_balance(_l) flatMap {
case (lWeight, lNode) =>
_balance(_r) map {
case (rWeight, rNode) =>
if (lWeight < rWeight)
(rWeight, new Tree(lNode.data + (rWeight - lWeight), lNode.left, lNode.right), rNode)
else
(lWeight, lNode, new Tree(rNode.data + (lWeight - rWeight), rNode.left, rNode.right))
}
} map {
case (maxChildWeight, lNode, rNode) =>
w + 2 * (maxChildWeight + plateWeight) -> new Tree(w, lNode, rNode)
}
case _ => None
}
}
_balance(t) map { case (w, tree) => tree }
}
def traverse(node: Tree): Seq[Int] = {
node match {
case null => Nil
case Tree(data, left, right) => (traverse(left) :+ data) ++ traverse(right)
}
}
def nonRecursiveTraversal(node: Tree): Seq[Int] = {
@tailrec
def _traverse(node: Tree, queue: Seq[Tree], out: Seq[Int]): Seq[Int] = {
node match {
case Tree(d, null, null) =>
queue match {
case Nil => out :+ d
case h :: t => _traverse(h, t, out :+ d)
}
case Tree(d, null, r) => _traverse(r, queue, out :+ d)
case Tree(d, l, r) => _traverse(l, node.copy(left = null) +: queue, out)
}
}
_traverse(node, Seq(), Seq())
}
}
| mtoader/katas | trees/src/main/scala/ro/redeul/katas/trees/TreeUtilities.scala | Scala | mit | 2,682 |
package com.github.chaabaj.openid.apis.google
import akka.http.scaladsl.model.{HttpRequest, StatusCodes}
import com.github.chaabaj.openid.HttpClient
import com.github.chaabaj.openid.exceptions.{OAuthException, WebServiceException}
import com.github.chaabaj.openid.oauth.{AccessTokenError, AccessTokenRequest, AccessTokenSuccess}
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import spray.json._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
class GoogleOAuthClientSpec extends Specification with Mockito {
private def createService(): GoogleOAuthClient =
new GoogleOAuthClient {
override val httpClient: HttpClient = smartMock[HttpClient]
override protected def accessTokenUrl: String = "http://example.com"
override def getUserInfo(token: AccessTokenSuccess)(implicit exc: ExecutionContext): Future[UserInfo] = ???
}
private val duration = 10.seconds
import scala.concurrent.ExecutionContext.Implicits.global
"#should retrieve token" >> {
val service = createService()
val response =
"""
|{
| "access_token": "ya29.Ci_OA_Y9x0Bm19gpLdkw0nAdeE4oGrO5zC_9GgO8Xif77cPCqPYM0pi2YVby7BrZMw",
| "token_type": "Bearer",
| "expires_in": 3600,
| "refresh_token": "1/u_yqwtrZepXQSiX3pWB-m5WxXFp6TaW1Jybu83rJlBbZ4W-rkFhlkCeTmfs-4SGy"
|}
""".stripMargin.parseJson
service.httpClient.request(any[HttpRequest])(any[ExecutionContext]) returns Future.successful(response)
val token = Await.result(service.issueOAuthToken(AccessTokenRequest("test", "http://test.com", "id", "")), duration)
val expectedToken = response.convertTo[AccessTokenSuccess]
token must equalTo(expectedToken)
}
"should fails to retreive a token with an OAuthException" >> {
val service = createService()
val response =
"""
| {
| "error": "invalid_grant",
| "error_description": "Invalid grant"
| }
""".stripMargin.parseJson
val error = WebServiceException(StatusCodes.BadRequest, response)
service.httpClient.request(any[HttpRequest])(any[ExecutionContext]) returns Future.failed(error)
Await.result(service.issueOAuthToken(AccessTokenRequest("test", "http://test.com", "id", "")), duration) must throwA[OAuthException[AccessTokenError]]
}
"should fails with a RuntimeException" >> {
val service = createService()
service.httpClient.request(any[HttpRequest])(any[ExecutionContext]) returns Future.failed(new RuntimeException)
Await.result(service.issueOAuthToken(AccessTokenRequest("test", "http://test.com", "id", "")), duration) must throwA[RuntimeException]
}
}
| chaabaj/openid-scala | src/test/scala/com/github/chaabaj/openid/apis/google/GoogleOAuthClientSpec.scala | Scala | mit | 2,721 |
package org.littlewings.javaee7.beanvalidation
import javax.validation.{ConstraintValidator, ConstraintValidatorContext}
import org.jboss.logging.Logger
class MySizeValidator extends ConstraintValidator[MySize, String] {
var min: Int = _
var max: Int = _
override def initialize(constraintAnnotation: MySize): Unit = {
min = constraintAnnotation.min
max = constraintAnnotation.max
}
override def isValid(value: String, context: ConstraintValidatorContext): Boolean = {
val logger = Logger.getLogger(getClass)
logger.infof("Constraint[%s], property[%s]", classOf[MySize].getSimpleName, value.asInstanceOf[Any])
value != null && value.size >= min && value.size <= max
}
}
| kazuhira-r/javaee7-scala-examples | bean-validation-list/src/main/scala/org/littlewings/javaee7/beanvalidation/MySizeValidator.scala | Scala | mit | 708 |
package org.randi3.utility
import org.scalatest.FunSpec
import org.scalatest.matchers.{ShouldMatchers, MustMatchers}
import org.randi3.model.TrialSubject
class UtilityMailSpec extends FunSpec with MustMatchers {
val utilityMail = TestingEnvironment.utilityMail
describe("The randomization mail method") {
it("should return the i18n mail content") {
val trial = TestingEnvironment.createTrial
val treatmentArm = trial.treatmentArms.head
val trialSubject = TrialSubject(identifier = "abc", investigatorUserName = "username", trialSite = TestingEnvironment.createTrialSite, properties = Nil).toEither match {
case Left(x) => fail(x.toString())
case Right(subject) => subject
}
utilityMail.getRandomizedMailContent(trial, treatmentArm, trialSubject) must not be('Empty)
}
}
describe("The registration mail method") {
it("should return the i18n mail content") {
val user = TestingEnvironment.createUser
utilityMail.getRegisteredMailContent(user) must not be('Empty)
}
}
}
| dschrimpf/randi3-core | src/test/scala/org/randi3/utility/UtilityMailSpec.scala | Scala | gpl-3.0 | 1,058 |
package com.sksamuel.elastic4s
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse
import org.elasticsearch.client.Client
import org.elasticsearch.common.unit.TimeValue
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
trait TaskApi {
implicit object ListTasksDefinitionExecutable
extends Executable[ListTasksDefinition, ListTasksResponse, ListTasksResponse] {
override def apply(client: Client, d: ListTasksDefinition): Future[ListTasksResponse] = {
val builder = client.admin().cluster().prepareListTasks(d.nodeIds: _*)
d.waitForCompletion.foreach(builder.setWaitForCompletion)
d.detailed.foreach(builder.setDetailed)
injectFuture(builder.execute)
}
}
implicit object CancelTasksDefinitionExecutable
extends Executable[CancelTasksDefinition, CancelTasksResponse, CancelTasksResponse] {
override def apply(client: Client, d: CancelTasksDefinition): Future[CancelTasksResponse] = {
val builder = client.admin().cluster().prepareCancelTasks(d.nodeIds: _*)
d.timeout.foreach(duration => builder.setTimeout(TimeValue.timeValueNanos(duration.toNanos)))
builder.setActions(d.actions: _*)
injectFuture(builder.execute)
}
}
implicit object PendingClusterTasksDefinitionExecutable
extends Executable[PendingClusterTasksDefinition, PendingClusterTasksResponse, PendingClusterTasksResponse] {
override def apply(client: Client, d: PendingClusterTasksDefinition): Future[PendingClusterTasksResponse] = {
val builder = client.admin().cluster().preparePendingClusterTasks()
builder.setLocal(d.local)
d.masterNodeTimeout.foreach(duration => builder.setMasterNodeTimeout(TimeValue.timeValueNanos(duration.toNanos)))
injectFuture(builder.execute)
}
}
}
case class CancelTasksDefinition(nodeIds: Seq[String],
timeout: Option[FiniteDuration] = None,
actions: Seq[String] = Nil)
case class PendingClusterTasksDefinition(local: Boolean,
masterNodeTimeout: Option[FiniteDuration] = None)
case class ListTasksDefinition(nodeIds: Seq[String],
detailed: Option[Boolean] = None,
waitForCompletion: Option[Boolean] = None)
| muuki88/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/TaskApi.scala | Scala | apache-2.0 | 2,516 |
package org.bitcoins.core.serializers.p2p.messages
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.gcs.FilterType
import org.bitcoins.core.p2p.CompactFilterCheckPointMessage
import org.bitcoins.testkit.util.BitcoinSUnitTest
import scodec.bits._
class RawCompactFilterCheckPointMessageSerializerTest extends BitcoinSUnitTest {
it must "parse a message" in {
// cribbed from a P2P log dump with Bitcoin-S node
val bytes = hex"00" ++ // type
hex"6f0ee334fbba823804e14042c33bc5dfa5126e5076d8dcff02d4a045f266f427" ++ // stop hash
hex"03" ++ // num filter headers
hex"93daaed620ff44fb7a760860ee8084f7f8c722d6c7ea9d1e1a35059253f876e6" ++ // filter headers
hex"2c2faad9d5e25594914772dc815e157debf385cbe5de5a0aea59d15af42b19ad" ++
hex"dd9cc1baf1453d682d27958c0f64c97a69249d655151c3b25b1ef1a993ec4f4f"
val message = CompactFilterCheckPointMessage.fromBytes(bytes)
assert(message.filterType == FilterType.Basic)
assert(
message.stopHash == DoubleSha256Digest.fromHex(
"6f0ee334fbba823804e14042c33bc5dfa5126e5076d8dcff02d4a045f266f427"))
assert(
message.filterHeaders == Vector(
DoubleSha256Digest.fromHex(
"93daaed620ff44fb7a760860ee8084f7f8c722d6c7ea9d1e1a35059253f876e6"),
DoubleSha256Digest.fromHex(
"2c2faad9d5e25594914772dc815e157debf385cbe5de5a0aea59d15af42b19ad"),
DoubleSha256Digest.fromHex(
"dd9cc1baf1453d682d27958c0f64c97a69249d655151c3b25b1ef1a993ec4f4f")
))
}
it must "have serialization symmetry" in {
val bytes = hex"00" ++
hex"6f0ee334fbba823804e14042c33bc5dfa5126e5076d8dcff02d4a045f266f427" ++
hex"03" ++
hex"93daaed620ff44fb7a760860ee8084f7f8c722d6c7ea9d1e1a35059253f876e6" ++
hex"2c2faad9d5e25594914772dc815e157debf385cbe5de5a0aea59d15af42b19ad" ++
hex"dd9cc1baf1453d682d27958c0f64c97a69249d655151c3b25b1ef1a993ec4f4f"
val message = CompactFilterCheckPointMessage.fromBytes(bytes)
assert(message.bytes == bytes)
val biggerMessage = CompactFilterCheckPointMessage(
filterType = FilterType.Basic,
stopHash = DoubleSha256Digest.fromHex(
"0000000000000000000000000000000000000000000000000000000000000001"),
filterHeaders = 1.to(20000).toVector.map(_ => DoubleSha256Digest.empty)
)
val biggerBytes = biggerMessage.bytes
assert(
biggerBytes.size == 1 + // type size
32 + // stop hash size
3 + // num filter headers size
20000 * 32) // filter headers size
val parsedBiggerMessage =
CompactFilterCheckPointMessage.fromBytes(biggerBytes)
assert(biggerMessage == parsedBiggerMessage)
assert(biggerBytes == parsedBiggerMessage.bytes)
}
}
| bitcoin-s/bitcoin-s-core | core-test/src/test/scala/org/bitcoins/core/serializers/p2p/messages/RawCompactFilterCheckPointMessageSerializerTest.scala | Scala | mit | 2,748 |
/**
* Created by fqc on 5/25/16.
*/
//class Point(xc: Int, yc: Int) {
//
// var x: Int = xc
// var y: Int = yc
//
// def move(xc: Int, yc: Int): Unit = {
// x = xc
// y = yc
// println(x)
// println(y)
//
// }
//
//}
class Point(val xc: Int, val yc: Int) {
var x: Int = xc
var y: Int = yc
def move(dx: Int, dy: Int) {
x = x + dx
y = y + dy
println ("x 的坐标点 : " + x);
println ("y 的坐标点 : " + y);
}
}
object Point{
def main(args: Array[String]) {
val point = new Point(1,2)
point.move(1,2)
}
}
| fqc/Scala_sidepro | src/mytest/Point.scala | Scala | mit | 567 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import java.util.{List => JList}
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.{Aggregate, AggregateCall}
import org.apache.calcite.rel.logical.LogicalAggregate
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.util.ImmutableBitSet
import org.apache.flink.table.plan.nodes.FlinkConventions
import scala.collection.JavaConversions._
class FlinkLogicalAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
child: RelNode,
indicator: Boolean,
groupSet: ImmutableBitSet,
groupSets: JList[ImmutableBitSet],
aggCalls: JList[AggregateCall])
extends Aggregate(cluster, traitSet, child, indicator, groupSet, groupSets, aggCalls)
with FlinkLogicalRel {
override def copy(
traitSet: RelTraitSet,
input: RelNode,
indicator: Boolean,
groupSet: ImmutableBitSet,
groupSets: JList[ImmutableBitSet],
aggCalls: JList[AggregateCall]): Aggregate = {
new FlinkLogicalAggregate(cluster, traitSet, input, indicator, groupSet, groupSets, aggCalls)
}
override def computeSelfCost(planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val child = this.getInput
val rowCnt = metadata.getRowCount(child)
val rowSize = this.estimateRowSize(child.getRowType)
val aggCnt = this.getAggCallList.size
planner.getCostFactory.makeCost(rowCnt, rowCnt * aggCnt, rowCnt * rowSize)
}
}
private class FlinkLogicalAggregateConverter
extends ConverterRule(
classOf[LogicalAggregate],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalAggregateConverter") {
override def matches(call: RelOptRuleCall): Boolean = {
val agg = call.rel(0).asInstanceOf[LogicalAggregate]
// we do not support these functions natively
// they have to be converted using the AggregateReduceFunctionsRule
val supported = agg.getAggCallList.map(_.getAggregation.getKind).forall {
case SqlKind.STDDEV_POP | SqlKind.STDDEV_SAMP | SqlKind.VAR_POP | SqlKind.VAR_SAMP => false
case _ => true
}
!agg.containsDistinctCall() && supported
}
override def convert(rel: RelNode): RelNode = {
val agg = rel.asInstanceOf[LogicalAggregate]
val traitSet = rel.getTraitSet.replace(FlinkConventions.LOGICAL)
val newInput = RelOptRule.convert(agg.getInput, FlinkConventions.LOGICAL)
new FlinkLogicalAggregate(
rel.getCluster,
traitSet,
newInput,
agg.indicator,
agg.getGroupSet,
agg.getGroupSets,
agg.getAggCallList)
}
}
object FlinkLogicalAggregate {
val CONVERTER: ConverterRule = new FlinkLogicalAggregateConverter()
}
| haohui/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalAggregate.scala | Scala | apache-2.0 | 3,631 |
package com.github.probe.android
import org.joda.time.Duration
import android.os.{ Message, Looper, Handler }
abstract class RichHandler(looper: Looper) extends Handler(looper) {
final def !(what: Int) = sendEmptyMessage(what)
final def !(what: Int, obj: AnyRef) = sendMessage(obtainMessage(what, obj))
final def !(what: Int, arg1: Int, arg2: Int) = sendMessage(obtainMessage(what, arg1, arg2))
final def !(what: Int, arg1: Int, arg2: Int, obj: AnyRef) = sendMessage(obtainMessage(what, arg1, arg2, obj))
final def postDelayed[T](f: => T, delay: Duration): Boolean = {
postDelayed(new Runnable {
def run() {
f
}
}, delay.getMillis)
}
}
abstract class EnumHandler[W <: Enumeration](looper: Looper, whats: W) extends RichHandler(looper) {
private type WhatT = whats.Value
final def !(what: WhatT): Boolean = this ! (what.id)
final def !(what: WhatT, obj: AnyRef): Boolean = this ! (what.id, obj)
final def !(what: WhatT, arg1: Int, arg2: Int): Boolean = this ! (what.id, arg1, arg2)
final def !(what: WhatT, arg1: Int, arg2: Int, obj: AnyRef): Boolean = this ! (what.id, arg1, arg2, obj)
}
trait RestrictedHandler[T <: AnyRef] extends RichHandler {
val magicArg1 = 42
val magicArg2 = 653643
private final def msgClassId[A <: T](msgClass: Class[A]) = System.identityHashCode(msgClass)
final def !(msg: T): Boolean = this ! (msgClassId(msg.getClass), magicArg1, magicArg2, msg)
final def hasMessages[A <: T](msgClass: Class[A]): Boolean = hasMessages(msgClassId(msgClass))
final override def handleMessage(msg: Message) {
if (msg.arg1 == magicArg1 && msg.arg2 == magicArg2) {
handleMessage(msg.obj.asInstanceOf[T])
} else {
throw new IllegalArgumentException("Unexpected message: %s" format msg)
}
}
def handleMessage(msg: T)
} | khernyo/freezing-ninja | android/src/main/scala/com/github/probe/android/RichHandler.scala | Scala | apache-2.0 | 1,824 |
/* *\\
** \\ \\ / _) \\ \\ / \\ | **
** \\ \\ / | __ \\ _ \\ __| \\ \\ / |\\/ | **
** \\ \\ / | | | __/ | \\ \\ / | | **
** \\_/ _| .__/ \\___| _| \\_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\\* */
package org.vipervm.platform.opencl
import org.vipervm.platform.{Event,EventPollingThread}
import org.vipervm.bindings.{opencl => cl}
class OpenCLEvent(val peer:cl.Event) extends Event {
override def syncWait: Unit = peer.syncWait
override def test:Boolean = {
if (peer.completed)
complete
completed
}
//TODO:we should only use polling with OpenCL 1.0. Callbacks are supported as of OpenCL 1.1
//TODO: ATI CPU implementation doesn't perform asynchronous data transfers if no synchronous wait is performed...
EventPollingThread.monitorEvent(this)
}
| hsyl20/Scala_ViperVM | src/main/scala/org/vipervm/platform/opencl/Event.scala | Scala | gpl-3.0 | 1,239 |
package rtmp.amf
import akka.util.ByteStringBuilder
/**
*
*/
abstract class AmfObjectWriter[T] {
def write(builder:ByteStringBuilder, obj:T)
}
| vimvim/AkkaTest | src/main/scala/rtmp/amf/AmfObjectWriter.scala | Scala | agpl-3.0 | 151 |
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.serialization
import org.apache.spark.SparkContext
import io.deepsense.commons.serialization.Serialization
import io.deepsense.commons.utils.Logging
object CustomPersistence extends Logging {
def save[T](sparkContext: SparkContext, instance: T, path: String): Unit = {
val data: Array[Byte] = Serialization.serialize(instance)
val rdd = sparkContext.parallelize(data, 1)
rdd.saveAsTextFile(path)
}
def load[T](sparkContext: SparkContext, path: String): T = {
logger.debug("Reading objects from: {}", path)
val rdd = sparkContext.textFile(path)
val data: Array[Byte] = rdd.map(_.toByte).collect()
Serialization.deserialize(data)
}
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperables/serialization/CustomPersistence.scala | Scala | apache-2.0 | 1,314 |
/*
* Graph.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape
import de.sciss.fscape.graph.{Constant, ConstantD, ConstantI, ConstantL}
import de.sciss.serial
import de.sciss.serial.{DataInput, DataOutput}
import java.net.URI
import scala.annotation.switch
import scala.collection.immutable.{IndexedSeq => Vec}
import scala.collection.mutable
object Graph {
trait Builder {
def addLazy(x: Lazy): Unit
def removeLazy(x: Lazy): Unit
}
/** This is analogous to `SynthGraph.Builder` in ScalaCollider. */
def builder: Builder = builderRef.get()
/** Installs a custom graph builder on the current thread,
* during the invocation of a closure. This method is typically
* called from other libraries which wish to provide a graph
* builder other than the default.
*
* When the method returns, the previous graph builder has automatically
* been restored. During the execution of the `body`, calling
* `Graph.builder` will return the given `builder` argument.
*
* @param builder the builder to install on the current thread
* @param body the body which is executed with the builder found through `Graph.builder`
* @tparam A the result type of the body
* @return the result of executing the body
*/
def use[A](builder: Builder)(body: => A): A = {
val old = builderRef.get()
builderRef.set(builder)
try {
body
} finally {
builderRef.set(old)
}
}
private[this] val builderRef: ThreadLocal[Builder] = new ThreadLocal[Builder] {
override protected def initialValue: Builder = BuilderDummy
}
private[this] object BuilderDummy extends Builder {
def addLazy (x: Lazy): Unit = ()
def removeLazy(x: Lazy): Unit = ()
}
def apply(thunk: => Any): Graph = {
val b = new BuilderImpl
val old = builderRef.get()
builderRef.set(b)
try {
thunk
b.build
} finally {
builderRef.set(old) // BuilderDummy
}
}
trait ProductReader[+A] {
def read(in: RefMapIn, key: String, arity: Int): A
}
private val mapRead = mutable.Map.empty[String, ProductReader[Product]]
final val DefaultPackage = "de.sciss.fscape.graph"
/** Derives the `productPrefix` served by the reader by the reader's class name itself. */
def addProductReaderSq(xs: Iterable[ProductReader[Product]]): Unit = {
val m = mapRead
m.synchronized {
xs.foreach { value =>
val cn = value.getClass.getName
val nm = cn.length - 1
val isObj = cn.charAt(nm) == '$'
val j = cn.lastIndexOf('.')
val pkg = cn.substring(0, j)
val i = if (pkg == DefaultPackage) DefaultPackage.length + 1 else 0
val key = if (isObj) cn.substring(i, nm) else cn.substring(i)
m += ((key, value))
}
}
}
private final val URI_SER_VERSION = 2
final class RefMapOut(out0: DataOutput) extends serial.RefMapOut(out0) {
override protected def isDefaultPackage(pck: String): Boolean =
pck == DefaultPackage
override def writeElem(e: Any): Unit = e match {
case c: Constant =>
out.writeByte('C')
if (c.isDouble) {
out.writeByte('d')
out.writeDouble(c.doubleValue)
} else if (c.isInt) {
out.writeByte('i')
out.writeInt(c.intValue)
} else if (c.isLong) {
out.writeByte('l')
out.writeLong(c.longValue)
}
case y: Graph => // important to handle Graph explicitly, as `apply` is overloaded!
out.writeByte('Y')
writeIdentifiedGraph(y)
case _ => super.writeElem(e)
}
def writeIdentifiedGraph(y: Graph): Unit =
writeVec(y.sources, writeElem)
override protected def writeCustomElem(e: Any): Any =
e match {
// case f: File =>
// out.writeByte('f')
// out.writeUTF(f.getPath)
// ref0
case u: URI =>
out.writeByte('u')
out.writeByte(URI_SER_VERSION)
out.writeUTF(u.toString)
case _ => super.writeCustomElem(e)
}
}
final class RefMapIn(in0: DataInput) extends serial.RefMapIn[RefMapIn](in0) {
type Const = Constant
type Y = Graph
type U = URI
override protected def readProductWithKey(key: String, arity: Int): Product = {
val r = mapRead.getOrElse(key, throw new NoSuchElementException(s"Unknown element '$key'"))
r.read(this, key, arity)
}
override protected def readIdentifiedConst(): Constant =
(in.readByte().toChar: @switch) match {
case 'd' => ConstantD(in.readDouble())
case 'i' => ConstantI(in.readInt())
case 'l' => ConstantL(in.readLong())
}
// override protected def readCustomElem(cookie: Char): Any =
// if (cookie == 'f') { // backwards compatibility
// val path = in.readUTF()
// fileToURI(path)
// } else {
// super.readCustomElem(cookie)
// }
def readURI(): URI = {
val cookie = in0.readByte().toChar
if (cookie != 'u') unexpectedCookie(cookie, 'u')
readIdentifiedU()
}
override protected def readIdentifiedU(): U = {
// Artifact.Value.read(in)
// XXX TODO: copy from Lucre. Not nice, but we do not want to depend on it in `core`
val ver = in.readByte()
if (ver != URI_SER_VERSION) {
// if (ver == 1) { // old school plain path
// val filePath = in.readUTF()
// return fileToURI(filePath)
// }
sys.error(s"Unexpected serialization version ($ver != $URI_SER_VERSION)")
}
val str = in.readUTF()
if (str.isEmpty) /*Value.empty*/ new URI(null, "", null) else new URI(str)
}
override def readIdentifiedY(): Graph = readIdentifiedGraph()
def readGraph(): Graph = {
val cookie = in0.readByte().toChar
if (cookie != 'Y') unexpectedCookie(cookie, 'Y')
readIdentifiedGraph()
}
def readIdentifiedGraph(): Graph = {
val sources = readVec(readProductT[Lazy]())
Graph(sources /* , controls.result() */)
}
def readGE(): GE =
readProduct().asInstanceOf[GE]
}
private[this] final class BuilderImpl extends Builder {
private var lazies = Vector.empty[Lazy]
override def toString = s"fscape.Graph.Builder@${hashCode.toHexString}"
def build: Graph = Graph(lazies)
def addLazy(g: Lazy): Unit = lazies :+= g
def removeLazy(g: Lazy): Unit =
lazies = if (lazies.last == g) lazies.init else lazies.filterNot(_ == g)
}
}
final case class Graph(sources: Vec[Lazy] /* , controlProxies: Set[ControlProxyLike] */) {
def isEmpty : Boolean = sources.isEmpty // && controlProxies.isEmpty
def nonEmpty: Boolean = !isEmpty
// def expand(implicit ctrl: stream.Control): UGenGraph = UGenGraph.build(this)
}
| Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/Graph.scala | Scala | agpl-3.0 | 7,092 |
package org.bitcoins.core.wallet.rescan
sealed trait RescanState
object RescanState {
/** Finished a rescan */
case object RescanDone extends RescanState
/** A rescan has already been started */
case object RescanInProgress extends RescanState
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/wallet/rescan/RescanState.scala | Scala | mit | 259 |
package plugin
import plugin.PluginSystem.{Action, GlobalMenu, RepositoryMenu}
trait Plugin {
val id: String
val version: String
val author: String
val url: String
val description: String
def repositoryMenus : List[RepositoryMenu]
def globalMenus : List[GlobalMenu]
def repositoryActions : List[Action]
def globalActions : List[Action]
}
| campolake/gitbucketV2.1 | src/main/scala/plugin/Plugin.scala | Scala | apache-2.0 | 371 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.test.integration
import java.time.Duration
import java.util
import java.util.{Collections, Properties}
import java.util.concurrent.{CountDownLatch, TimeUnit}
import javax.security.auth.login.Configuration
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.{CoreUtils, TestUtils}
import kafka.zk.EmbeddedZookeeper
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, Producer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.samza.Partition
import org.apache.samza.checkpoint.Checkpoint
import org.apache.samza.config._
import org.apache.samza.container.TaskName
import org.apache.samza.context.Context
import org.apache.samza.job.local.ThreadJobFactory
import org.apache.samza.job.model.{ContainerModel, JobModel}
import org.apache.samza.job.{ApplicationStatus, JobRunner, StreamJob}
import org.apache.samza.metrics.MetricsRegistryMap
import org.apache.samza.storage.ChangelogStreamManager
import org.apache.samza.system.{IncomingMessageEnvelope, SystemStreamPartition}
import org.apache.samza.task._
import org.apache.samza.util.ScalaJavaUtil.JavaOptionals
import org.junit.Assert._
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, Buffer, HashMap, SynchronizedMap}
/*
* This creates an singleton instance of TestBaseStreamTask and implement the helper functions to
* 1. start the local ZooKeeper server
* 2. start the local Kafka brokers
* 3. create and validate test topics
* 4. shutdown servers and cleanup test directories and files
*/
object StreamTaskTestUtil {
val INPUT_TOPIC = "input"
val TOTAL_TASK_NAMES = 1
val REPLICATION_FACTOR = 3
val zkConnectionTimeout = 6000
val zkSessionTimeout = 6000
var zookeeper: EmbeddedZookeeper = null
var brokers: String = null
def zkPort: Int = zookeeper.port
def zkConnect: String = s"127.0.0.1:$zkPort"
var producer: Producer[Array[Byte], Array[Byte]] = null
var adminClient: AdminClient = null
val cp1 = new Checkpoint(Map(new SystemStreamPartition("kafka", "topic", new Partition(0)) -> "123").asJava)
val cp2 = new Checkpoint(Map(new SystemStreamPartition("kafka", "topic", new Partition(0)) -> "12345").asJava)
/*
* This is the default job configuration. Each test class can override the default configuration below.
*/
var jobConfig = Map(
"job.factory.class" -> classOf[ThreadJobFactory].getCanonicalName,
"job.coordinator.system" -> "kafka",
ApplicationConfig.PROCESSOR_ID -> "1",
"task.inputs" -> "kafka.input",
"serializers.registry.string.class" -> "org.apache.samza.serializers.StringSerdeFactory",
"systems.kafka.samza.factory" -> "org.apache.samza.system.kafka.KafkaSystemFactory",
// Always start consuming at offset 0. This avoids a race condition between
// the producer and the consumer in this test (SAMZA-166, SAMZA-224).
"systems.kafka.samza.offset.default" -> "oldest", // applies to a nonempty topic
"systems.kafka.consumer.auto.offset.reset" -> "smallest", // applies to an empty topic
"systems.kafka.samza.msg.serde" -> "string",
// Since using state, need a checkpoint manager
"task.checkpoint.factory" -> "org.apache.samza.checkpoint.kafka.KafkaCheckpointManagerFactory",
"task.checkpoint.system" -> "kafka",
"task.checkpoint.replication.factor" -> "1",
// However, don't have the inputs use the checkpoint manager
// since the second part of the test expects to replay the input streams.
"systems.kafka.streams.input.samza.reset.offset" -> "false")
def apply(map: Map[String, String]): Unit = {
jobConfig ++= map
TestTask.reset()
}
var servers: Buffer[KafkaServer] = null
def beforeSetupServers {
zookeeper = new EmbeddedZookeeper()
val props = TestUtils.createBrokerConfigs(3, zkConnect, true)
val configs = props.map(p => {
p.setProperty("auto.create.topics.enable","false")
KafkaConfig.fromProps(p)
})
servers = configs.map(TestUtils.createServer(_)).toBuffer
brokers = TestUtils.getBrokerListStrFromServers(servers, SecurityProtocol.PLAINTEXT)
// setup the zookeeper and bootstrap servers for local kafka cluster
jobConfig ++= Map("systems.kafka.consumer.zookeeper.connect" -> zkConnect,
"systems.kafka.producer.bootstrap.servers" -> brokers)
val config = new util.HashMap[String, String]()
config.put("bootstrap.servers", brokers)
config.put("request.required.acks", "-1")
config.put("serializer.class", "kafka.serializer.StringEncoder")
config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1")
config.put(ProducerConfig.RETRIES_CONFIG, (new Integer(Integer.MAX_VALUE-1)).toString())
config.put(ProducerConfig.LINGER_MS_CONFIG, "0")
val producerConfig = new KafkaProducerConfig("kafka", "i001", config)
adminClient = AdminClient.create(config.asInstanceOf[util.Map[String, Object]])
producer = new KafkaProducer[Array[Byte], Array[Byte]](producerConfig.getProducerProperties)
createTopics
validateTopics
}
def createTopics {
adminClient.createTopics(Collections.singleton(new NewTopic(INPUT_TOPIC, TOTAL_TASK_NAMES, REPLICATION_FACTOR.shortValue())))
}
def validateTopics {
var done = false
var retries = 0
while (!done && retries < 10) {
try {
val topicDescriptionFutures = adminClient.describeTopics(Collections.singleton(INPUT_TOPIC)).all()
val topicDescription = topicDescriptionFutures.get(500, TimeUnit.MILLISECONDS)
.get(INPUT_TOPIC)
done = topicDescription.partitions().size() == TOTAL_TASK_NAMES
retries += 1
} catch {
case e: Exception =>
System.err.println("Interrupted during validating test topics", e)
}
}
if (retries >= 10) {
fail("Unable to successfully create topics. Tried to validate %s times." format retries)
}
}
def afterCleanLogDirs {
servers.foreach(_.shutdown())
servers.foreach(server => CoreUtils.delete(server.config.logDirs))
if (adminClient != null)
CoreUtils.swallow(adminClient.close(), null)
if (zookeeper != null)
CoreUtils.swallow(zookeeper.shutdown(), null)
Configuration.setConfiguration(null)
}
}
/* This class implement the base utility to implement an integration test for StreamTask
* It implements helper functions to start/stop the job, send messages to a task, and read all messages from a topic
*/
class StreamTaskTestUtil {
import StreamTaskTestUtil._
/**
* Start a job for TestTask, and do some basic sanity checks around startup
* time, number of partitions, etc.
*/
def startJob = {
// Start task.
val jobRunner = new JobRunner(new MapConfig(jobConfig.asJava))
val job = jobRunner.run()
createStreams
assertEquals(ApplicationStatus.Running, job.waitForStatus(ApplicationStatus.Running, 60000))
TestTask.awaitTaskRegistered
val tasks = TestTask.tasks
assertEquals("Should only have a single partition in this task", 1, tasks.size)
val task = tasks.values.toList.head
task.initFinished.await(60, TimeUnit.SECONDS)
assertEquals(0, task.initFinished.getCount)
(job, task)
}
/**
* Kill a job, and wait for an unsuccessful finish (since this throws an
* interrupt, which is forwarded on to ThreadJob, and marked as a failure).
*/
def stopJob(job: StreamJob) {
// make sure we don't kill the job before it was started.
// eventProcesses guarantees all the consumers have been initialized
val tasks = TestTask.tasks
val task = tasks.values.toList.head
task.eventProcessed.await(60, TimeUnit.SECONDS)
assertEquals(0, task.eventProcessed.getCount)
// Shutdown task.
job.kill
val status = job.waitForFinish(60000)
assertEquals(ApplicationStatus.UnsuccessfulFinish, status)
}
/**
* Send a message to the input topic, and validate that it gets to the test task.
*/
def send(task: TestTask, msg: String) {
producer.send(new ProducerRecord(INPUT_TOPIC, msg.getBytes)).get()
task.awaitMessage
assertEquals(msg, task.received.last)
}
/**
* Read all messages from a topic starting from last saved offset for group.
* To read all from offset 0, specify a unique, new group string.
*/
def readAll(topic: String, maxOffsetInclusive: Int, group: String): List[String] = {
val props = new Properties
props.put("bootstrap.servers", brokers)
props.put("group.id", group)
props.put("auto.offset.reset", "earliest")
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer")
val consumerConnector = new KafkaConsumer(props)
consumerConnector.subscribe(Set(topic).asJava)
var stream = consumerConnector.poll(Duration.ofMillis(10000)).iterator()
var message: ConsumerRecord[Nothing, Nothing] = null
var messages = ArrayBuffer[String]()
while (message == null || message.offset < maxOffsetInclusive) {
if (stream.hasNext) {
message = stream.next
if (message.value() == null) {
messages += null
} else {
messages += new String(message.value, "UTF-8")
}
System.out.println("StreamTaskTestUtil.readAll(): offset=%s, message=%s" format (message.offset, messages.last))
} else {
stream = consumerConnector.poll(Duration.ofMillis(100)).iterator()
}
}
messages.toList
}
def createStreams {
val mapConfig = new MapConfig(jobConfig.asJava)
val containers = new util.HashMap[String, ContainerModel]()
val jobModel = new JobModel(mapConfig, containers)
jobModel.maxChangeLogStreamPartitions = 1
val taskConfig = new TaskConfig(jobModel.getConfig)
val checkpointManagerOption = JavaOptionals.toRichOptional(taskConfig.getCheckpointManager(new MetricsRegistryMap(),
getClass.getClassLoader)).toOption
checkpointManagerOption match {
case Some(checkpointManager) =>
checkpointManager.createResources()
checkpointManager.stop()
case _ => throw new ConfigException("No checkpoint manager factory configured")
}
ChangelogStreamManager.createChangelogStreams(jobModel.getConfig, jobModel.maxChangeLogStreamPartitions)
}
}
object TestTask {
val tasks = new HashMap[TaskName, TestTask] with SynchronizedMap[TaskName, TestTask]
var totalTasks = 1
@volatile var allTasksRegistered = new CountDownLatch(totalTasks)
def reset(): Unit = {
TestTask.totalTasks = StreamTaskTestUtil.TOTAL_TASK_NAMES
TestTask.allTasksRegistered = new CountDownLatch(TestTask.totalTasks)
}
/**
* Static method that tasks can use to register themselves with. Useful so
* we don't have to sneak into the ThreadJob/SamzaContainer to get our test
* tasks.
*/
def register(taskName: TaskName, task: TestTask) {
tasks += taskName -> task
allTasksRegistered.countDown
}
def awaitTaskRegistered {
allTasksRegistered.await(60, TimeUnit.SECONDS)
assertEquals(0, allTasksRegistered.getCount)
assertEquals(totalTasks, tasks.size)
// Reset the registered latch, so we can use it again every time we start a new job.
TestTask.allTasksRegistered = new CountDownLatch(TestTask.totalTasks)
}
}
/**
* This class defines the base class for StreamTask used in integration test
* It implements some basic hooks for synchronization between the test class and the tasks
*/
abstract class TestTask extends StreamTask with InitableTask {
var received = ArrayBuffer[String]()
val initFinished = new CountDownLatch(1)
val eventProcessed = new CountDownLatch(1)
@volatile var gotMessage = new CountDownLatch(1)
def init(context: Context) {
TestTask.register(context.getTaskContext.getTaskModel.getTaskName, this)
testInit(context)
initFinished.countDown()
}
def process(envelope: IncomingMessageEnvelope, collector: MessageCollector, coordinator: TaskCoordinator) {
val msg = envelope.getMessage.asInstanceOf[String]
eventProcessed.countDown()
System.err.println("TestTask.process(): %s" format msg)
received += msg
testProcess(envelope, collector, coordinator)
// Notify sender that we got a message.
gotMessage.countDown
}
def awaitMessage {
assertTrue("Timed out of waiting for message rather than received one.", gotMessage.await(60, TimeUnit.SECONDS))
assertEquals(0, gotMessage.getCount)
gotMessage = new CountDownLatch(1)
}
def testInit(context: Context)
def testProcess(envelope: IncomingMessageEnvelope, collector: MessageCollector, coordinator: TaskCoordinator)
}
| Swrrt/Samza | samza-test/src/test/scala/org/apache/samza/test/integration/StreamTaskTestUtil.scala | Scala | apache-2.0 | 13,695 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.tools.data
import com.beust.jcommander.{Parameter, Parameters}
import org.apache.hadoop.util.ToolRunner
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore
import org.locationtech.geomesa.accumulo.index.AccumuloFeatureIndex
import org.locationtech.geomesa.accumulo.tools.data.AddIndexCommand.AddIndexParameters
import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams}
import org.locationtech.geomesa.jobs.accumulo.AccumuloJobUtils
import org.locationtech.geomesa.jobs.accumulo.index.{WriteIndexArgs, WriteIndexJob}
import org.locationtech.geomesa.tools._
import org.locationtech.geomesa.tools.utils.Prompt
import org.locationtech.geomesa.utils.index.IndexMode
import scala.util.control.NonFatal
/**
*
* 1. Add the new index in write-only mode
* 2. Pause and indicate that the user should bounce live ingestion to pick up the changes -
* after this it will be writing to both the new and old index
* 3. Migrate data through a m/r job, with an optional CQL filter for what gets migrated
* 4. Turn old index off, put new index in read/write mode
* 5. Pause and indicate that the user should bounce live ingestion again
*/
class AddIndexCommand extends AccumuloDataStoreCommand {
override val name = "add-index"
override val params = new AddIndexParameters
override def execute(): Unit = {
// We instantiate the class at runtime to avoid classpath dependencies from commands that are not being used.
new AddIndexCommandExecutor(params).run()
}
}
object AddIndexCommand {
@Parameters(commandDescription = "Add or update indices for an existing GeoMesa feature type")
class AddIndexParameters extends AccumuloDataStoreParams with RequiredTypeNameParam with OptionalCqlFilterParam {
@Parameter(names = Array("--index"), description = "Name of index(es) to add - comma-separate or use multiple flags", required = true)
var indexNames: java.util.List[String] = null
@Parameter(names = Array("--no-back-fill"), description = "Do not copy any existing data into the new index")
var noBackFill: java.lang.Boolean = null
}
}
class AddIndexCommandExecutor(override val params: AddIndexParameters) extends Runnable with AccumuloDataStoreCommand {
import org.locationtech.geomesa.index.metadata.GeoMesaMetadata.ATTRIBUTES_KEY
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
override val name = ""
override def execute(): Unit = {}
override def run(): Unit = withDataStore(addIndex)
def addIndex(ds: AccumuloDataStore): Unit = {
import scala.collection.JavaConversions._
val sft = ds.getSchema(params.featureName)
require(sft != null, s"Schema '${params.featureName}' does not exist in the specified data store")
val indices = params.indexNames.map { name =>
AccumuloFeatureIndex.CurrentIndices.find(_.name == name).getOrElse {
throw new IllegalArgumentException(s"Invalid index '$name'. Valid values are " +
s"${AccumuloFeatureIndex.CurrentIndices.map(_.name).mkString(", ")}")
}
}
val existing = AccumuloFeatureIndex.indices(sft)
require(indices.forall(i => !existing.contains(i)),
s"Requested indices already exist: ${existing.map(_.identifier).mkString("[", "][", "]")}")
require(indices.forall(_.supports(sft)), "Requested indices are not compatible with the simple feature type")
val toDisable = indices.flatMap(i => AccumuloFeatureIndex.replaces(i, existing).map(r => (i, r)))
if (toDisable.nonEmpty) {
if (!Prompt.confirm("The following index versions will be replaced: " +
s"${toDisable.map { case (n, o) => s"[${o.identifier}] by [${n.identifier}]" }.mkString(", ")} " +
"Continue? (y/n): ")) {
return
}
}
if (!Prompt.confirm("If you are ingesting streaming data, you will be required to restart " +
"the streaming ingestion when prompted. Continue? (y/n): ")) {
return
}
// write a backup meta-data entry in case the process fails part-way
val backupKey = s"$ATTRIBUTES_KEY.bak"
ds.metadata.insert(sft.getTypeName, backupKey, ds.metadata.readRequired(sft.getTypeName, ATTRIBUTES_KEY))
val toKeep = sft.getIndices.filter { case (n, v, _) =>
!toDisable.map(_._2).contains(AccumuloFeatureIndex.lookup(n, v))
}
if (params.noBackFill != null && params.noBackFill) {
Command.user.info("Adding new indices and disabling old ones")
sft.setIndices(indices.map(i => (i.name, i.version, IndexMode.ReadWrite)) ++ toKeep)
ds.updateSchema(sft.getTypeName, sft)
} else {
Command.user.info("Adding new indices in write-only mode")
// add new index in write-only mode
sft.setIndices(indices.map(i => (i.name, i.version, IndexMode.Write)) ++ sft.getIndices)
ds.updateSchema(sft.getTypeName, sft)
// wait for the user to bounce ingestion
Prompt.acknowledge("Indices have been added in write-only mode. To pick up the changes, " +
"please bounce any streaming ingestion. Once ingestion has resumed, press 'enter' to continue.")
// run migration job
Command.user.info("Running index back-fill job")
val args = new WriteIndexArgs(Array.empty)
args.inZookeepers = params.zookeepers
args.inInstanceId = params.instance
args.inUser = params.user
args.inPassword = params.password
args.inTableName = params.catalog
args.inFeature = params.featureName
args.inCql = Option(params.cqlFilter).map(ECQL.toCQL).orNull
args.indexNames.addAll(indices.map(_.identifier))
val libjars = Some(AccumuloJobUtils.defaultLibJars, AccumuloJobUtils.defaultSearchPath)
val result = try { ToolRunner.run(new WriteIndexJob(libjars), args.unparse()) } catch {
case NonFatal(e) => Command.user.error("Error running back-fill job:", e); -1
}
def setReadWrite(): Unit = {
Command.user.info("Setting index to read-write mode and disabling old indices")
// set new indices to read-write and turn off disabled indices
sft.setIndices(indices.map(i => (i.name, i.version, IndexMode.ReadWrite)) ++ toKeep)
Command.user.info(sft.getIndices.toString)
ds.updateSchema(sft.getTypeName, sft)
}
if (result == 0) {
setReadWrite()
} else {
var response: String = null
do {
response = Prompt.read("Index back-fill job failed. You may:\\n" +
" 1. Switch the indices to read-write mode without existing data (you may manually back-fill later)\\n" +
" 2. Roll-back index creation\\n" +
"Select an option: ")
} while (response != "1" && response != "2")
response match {
case "1" => setReadWrite()
case "2" =>
val bak = ds.metadata.readRequired(sft.getTypeName, backupKey)
ds.metadata.insert(sft.getTypeName, ATTRIBUTES_KEY, bak)
}
}
}
// final bounce
Command.user.info("Operation complete. Please bounce any streaming ingestion to pick up the changes.")
}
}
| jahhulbert-ccri/geomesa | geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/data/AddIndexCommand.scala | Scala | apache-2.0 | 7,694 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.model
import cc.factorie.util.SingletonIndexedSeq
import cc.factorie.variable._
import scala.collection.immutable.ListSet
import scala.collection.mutable.{ArrayBuffer, HashMap, LinkedHashSet, Set}
import scala.reflect.ClassTag
/** In FACTORIE a Model is a source of factors.
In particular, it can return the collection of factors that touch a collection of variables.
Variables do not know directly about the factors that touch them.
This allows us to consider multiple different Models applied to the same data.
@author Andrew McCallum
*/
trait Model {
// TODO Consider adding "type FactorType <: Factor" here so that Template2.factors can return the right type. -akm
/** Return all Factors in this Model that touch any of the given "variables". The result will not have any duplicate Factors. */
def factors(variables:Iterable[Var]): Iterable[Factor]
/** Return all Factors in this Model that touch the given "variable". The result will not have any duplicate Factors. */
def factors(variable:Var): Iterable[Factor] = factors(new SingletonIndexedSeq(variable))
/** Return all Factors in this Model that are affected by the given Diff. The result will not have any duplicate Factors.
By default returns just the factors that neighbor Diff.variable, but this method may be overridden for special handling of the Diff */
def factors(d:Diff): Iterable[Factor] = if (d.variable eq null) Nil else factors(d.variable)
/** Return all Factors in this Model that are affected by the given DiffList. The result will not have any duplicate Factors.
By default returns just the factors that neighbor the DiffList.variables, but this method may be overridden for special handling of the DiffList */
def factors(dl:DiffList): Iterable[Factor] = if (dl.size == 0) Nil else factors(dl.foldLeft(List[Var]())((vs,d) => if (d.variable ne null) d.variable :: vs else vs))
// TODO Make these addFactors protected? Perhaps not because they could be reasonably useful to outside users. -akm
/** Append to "result" all Factors in this Model that touch any of the given "variables". This method must not append duplicates. */
def addFactors(variables:Iterable[Var], result:Set[Factor]): Unit = result ++= factors(variables)
/** Append to "result" all Factors in this Model that touch the given "variable". This method must not append duplicates. */
def addFactors(variable:Var, result:Set[Factor]): Unit = addFactors(new SingletonIndexedSeq(variable), result)
/** Append to "result" all Factors in this Model that are affected by the given Diff. This method must not append duplicates. */
def addFactors(d:Diff, result:Set[Factor]): Unit = if (d.variable ne null) addFactors(d.variable, result)
/** Append to "result" all Factors in this Model that are affected by the given DiffList. This method must not append duplicates. */
def addFactors(dl:DiffList, result:Set[Factor]): Unit = if (dl.size > 0) addFactors(dl.foldLeft(List[Var]())((vs,d) => if (d.variable ne null) d.variable :: vs else vs), result)
/** The "factors" methods need a new collection to return; this method is used by them to construct this collection. */
def newFactorsCollection: Set[Factor] = new collection.mutable.LinkedHashSet[Factor]
def filterByFactorClass[F<:Factor](factors:Iterable[Factor], fclass:Class[F]): Iterable[F] = factors.filter(f => fclass.isAssignableFrom(f.getClass)).asInstanceOf[Iterable[F]]
def factorsOfClass[F<:Factor](variable:Var, fclass:Class[F]): Iterable[F] = filterByFactorClass(factors(variable), fclass)
def factorsOfClass[F<:Factor](variables:Iterable[Var], fclass:Class[F]): Iterable[F] = filterByFactorClass(factors(variables), fclass)
def factorsOfClass[F<:Factor](variable:Var)(implicit fm:ClassTag[F]): Iterable[F] = factorsOfClass(variable, fm.runtimeClass.asInstanceOf[Class[F]])
def factorsOfClass[F<:Factor](variables:Iterable[Var])(implicit fm:ClassTag[F]): Iterable[F] = factorsOfClass(variables, fm.runtimeClass.asInstanceOf[Class[F]])
def factorsOfClass[F<:Factor](d:DiffList, fclass:Class[F]): Iterable[F] = filterByFactorClass(factors(d), fclass)
def factorsOfClass[F<:Factor](d:DiffList)(implicit fm:ClassTag[F]): Iterable[F] = factorsOfClass[F](d, fm.runtimeClass.asInstanceOf[Class[F]])
// TODO: why can't this just take the ClassTag for F as an implicit and avoid this explicit passing of the "Class" object? -luke
// TODO maybe these methods should be moved to a model companion object since they do not require anything from the model -luke, akm
def filterByFamilyClass[F<:Family](factors:Iterable[Factor], fclass:Class[F]): Iterable[F#Factor] =
factors.filter(f => f match {
case f:Family#Factor => fclass.isAssignableFrom(f.family.getClass)
case _ => false
}).asInstanceOf[Iterable[F#Factor]]
def filterByNotFamilyClass[F<:Family](factors:Iterable[Factor], fclass:Class[F]): Iterable[Factor] =
factors.filterNot({
case f:Family#Factor => fclass.isAssignableFrom(f.family.getClass)
case _ => false
})
def factorsOfFamilyClass[F<:Family](variable:Var, fclass:Class[F]): Iterable[F#Factor] = filterByFamilyClass[F](factors(variable), fclass)
def factorsOfFamilyClass[F<:Family](variables:Iterable[Var], fclass:Class[F]): Iterable[F#Factor] = filterByFamilyClass[F](factors(variables), fclass)
def factorsOfFamilyClass[F<:Family](variable:Var)(implicit fm:ClassTag[F]): Iterable[F#Factor] = factorsOfFamilyClass[F](variable, fm.runtimeClass.asInstanceOf[Class[F]])
def factorsOfFamilyClass[F<:Family](variables:Iterable[Var])(implicit fm:ClassTag[F]): Iterable[F#Factor] = factorsOfFamilyClass[F](variables, fm.runtimeClass.asInstanceOf[Class[F]])
def factorsOfFamilyClass[F<:Family](d:DiffList, fclass:Class[F]): Iterable[F#Factor] = filterByFamilyClass(factors(d), fclass)
def factorsOfFamilyClass[F<:Family](d:DiffList)(implicit fm:ClassTag[F]): Iterable[F#Factor] = filterByFamilyClass[F](factors(d), fm.runtimeClass.asInstanceOf[Class[F]])
def filterByFamily[F<:Family](factors:Iterable[Factor], family:F): Iterable[F#Factor] =
factors.filter(f => f match {
case f:Family#Factor => f.family.equals(family)
case _ => false
}).asInstanceOf[Iterable[F#Factor]]
def filterByFamilies[F<:Family](factors:Iterable[Factor], families:Seq[F]): Iterable[F#Factor] =
factors.filter(f => f match {
case f:Family#Factor => families.contains(f.family)
case _ => false
}).asInstanceOf[Iterable[F#Factor]]
def factorsOfFamily[F<:Family](variable:Var, family:F): Iterable[F#Factor] = filterByFamily(factors(variable), family)
def factorsOfFamily[F<:Family](variables:Iterable[Var], family:F): Iterable[F#Factor] = filterByFamily(factors(variables), family)
def factorsOfFamily[F<:Family](d:DiffList, family:F): Iterable[F#Factor] = filterByFamily(factors(d), family)
def factorsOfFamilies[F<:Family](variable:Var, families:Seq[F]): Iterable[F#Factor] = filterByFamilies(factors(variable), families)
def factorsOfFamilies[F<:Family](variables:Iterable[Var], families:Seq[F]): Iterable[F#Factor] = filterByFamilies(factors(variables), families)
def factorsOfFamilies[F<:Family](d:DiffList, families:Seq[F]): Iterable[F#Factor] = filterByFamilies(factors(d), families)
// Getting sums of scores from all neighboring factors
def currentScore(variable:Var): Double = { var sum = 0.0; for (f <- factors(variable)) sum += f.currentScore; sum }
def currentScore(vars:Iterable[Var]): Double = { var sum = 0.0; for (f <- factors(vars)) sum += f.currentScore; sum }
def currentScore(d:Diff): Double = { var sum = 0.0; for (f <- factors(d)) sum += f.currentScore; sum }
def currentScore(dl:DiffList): Double = { var sum = 0.0; for (f <- factors(dl)) sum += f.currentScore; sum }
// ...using not current values, but the values in an Assignment
def assignmentScore(variable:Var, assignment:Assignment): Double = { var sum = 0.0; for (f <- factors(variable)) sum += f.assignmentScore(assignment); sum }
def assignmentScore(vars:Iterable[Var], assignment:Assignment): Double = { var sum = 0.0; for (f <- factors(vars)) sum += f.assignmentScore(assignment); sum }
def assignmentScore(d:Diff, assignment:Assignment): Double = { var sum = 0.0; for (f <- factors(d)) sum += f.assignmentScore(assignment); sum }
def assignmentScore(dl:DiffList, assignment:Assignment): Double = { var sum = 0.0; for (f <- factors(dl)) sum += f.assignmentScore(assignment); sum }
// Return a fully unrolled model for a given context
def itemizedModel(variable:Var): ItemizedModel = new ItemizedModel(factors(variable))
def itemizedModel(variables:Iterable[Var]): ItemizedModel = new ItemizedModel(factors(variables))
def itemizedModel(d:Diff): ItemizedModel = new ItemizedModel(factors(d))
def itemizedModel(dl:DiffList): ItemizedModel = new ItemizedModel(factors(dl))
}
/** A Model that explicitly stores all factors, with an efficient map from variables to their neighboring factors.
A DirectedModel is a subclass of this.
@author Andrew McCallum
*/
class ItemizedModel(initialFactors:Factor*) extends Model {
def this(initialFactors:Iterable[Factor]) = { this(initialFactors.toSeq:_*) }
private val _factors = new HashMap[Var,scala.collection.Set[Factor]] {
override def default(v:Var) = ListSet.empty[Factor]
}
this ++= initialFactors
override def addFactors(variable:Var, result:Set[Factor]): Unit = result ++= _factors(variable) // This is new primitive
override def addFactors(variables:Iterable[Var], result:Set[Factor]): Unit = variables.foreach(addFactors(_, result))
override def factors(variable:Var): Iterable[Factor] = _factors(variable)
def factors(variables:Iterable[Var]): Iterable[Factor] = { val set = newFactorsCollection; variables.foreach(v => addFactors(v, set)); set }
def factors: Iterable[Factor] = _factors.values.flatten.toSeq.distinct
def +=(f:Factor): Unit = f.variables.foreach(v => _factors(v) match {
case h:ListSet[Factor] =>
if (h.size > 3) _factors(v) = { val nh = new LinkedHashSet[Factor] ++= h; nh += f; nh }
else _factors(v) = h + f
case h:LinkedHashSet[Factor] => h += f
})
def -=(f:Factor): Unit = f.variables.foreach(v => _factors(v) match {
case h:ListSet[Factor] => _factors(v) = h - f
case h:LinkedHashSet[Factor] => h -= f
})
def ++=(fs:Iterable[Factor]): Unit = fs.foreach(f => this.+=(f))
def --=(fs:Iterable[Factor]): Unit = fs.foreach(f => this.-=(f))
}
/** A Model that concatenates the factors of multiple contained models.
@author Andrew McCallum
*/
class CombinedModel(theSubModels:Model*) extends Model {
val subModels = new ArrayBuffer[Model] ++= theSubModels
def +=(model:Model): Unit = subModels += model
def ++=(models:Iterable[Model]): Unit = subModels ++= models
def factors(variables:Iterable[Var]): Iterable[Factor] = {
val result = newFactorsCollection
subModels.foreach(_.addFactors(variables, result))
result
}
//override def factors(variable:Var): Iterable[Factor] = { val result = newFactorsCollection; addFactors(variable, result); result }
override def addFactors(variables:Iterable[Var], result:Set[Factor]): Unit = subModels.foreach(_.addFactors(variables, result))
}
/** A Model whose Factors come from Templates.
If you want your TemplateModel to have learnable parameters, then also extend Parameters.
@author Andrew McCallum */
class TemplateModel(theTemplates:Template*) extends Model {
val templates = new ArrayBuffer[Template] ++= theTemplates
def +=[T<:Template](template:T): T = { templates += template; template }
def ++=[T<:Template](templates:Iterable[T]): Iterable[T] = { this.templates ++= templates; templates }
// Just a method name aliase, aiming to make use inside the TemplateModel subclass prettier.
def addTemplate[T<:Template](template:T): T = { templates += template; template }
// Just a method name alias, aiming to make use inside the TemplateModel subclass prettier.
def addTemplates[T<:Template](templates:T*): Iterable[T] = { this.templates ++= templates; templates }
override def addFactors(variable:Var, result:Set[Factor]): Unit = templates.foreach(_.addFactors(variable, result)) // This is the new primitive
def factors(variables:Iterable[Var]): Iterable[Factor] = { val result = newFactorsCollection; addFactors(variables, result); result }
override def factors(variable:Var): Iterable[Factor] = { val result = newFactorsCollection; addFactors(variable, result); result }
override def addFactors(variables:Iterable[Var], result:Set[Factor]): Unit = variables.foreach(v => addFactors(v, result))
def families: Seq[Template] = templates
def familiesOfClass[F<:Template](fclass:Class[F]): Iterable[F] = families.filter(f => fclass.isAssignableFrom(f.getClass)).asInstanceOf[Iterable[F]]
def limitDiscreteValuesAsIn(vars:Iterable[DiscreteVar]): Unit = templates.foreach(_.limitDiscreteValuesAsIn(vars))
}
| strubell/factorie | src/main/scala/cc/factorie/model/Model.scala | Scala | apache-2.0 | 13,728 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.