code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package usbinstall.os import java.nio.file.Paths import scala.util.matching.Regex import suiryc.scala.io.PathFinder import suiryc.scala.io.NameFilter._ import suiryc.scala.io.PathFinder._ import suiryc.scala.io.RichFile._ import suiryc.scala.sys.Command import suiryc.scala.util.matching.RegexReplacer import suiryc.scala.util.RichEither._ import usbinstall.InstallUI class UbuntuInstall( override val settings: OSSettings, override val ui: InstallUI, override val checkCancelled: () => Unit ) extends OSInstall(settings, ui, checkCancelled) { override def requirements(): Set[String] = { val extra = if (settings.persistent.get) { Set("dd") //Set("cpio", "lzma") } else { Set.empty } super.requirements() ++ extra } override def setup(partMount: PartitionMount): Unit = { val targetRoot = partMount.to.toAbsolutePath val persistent = settings.persistent.get // Without 'casper', we need to patch 'initrd'. See comments below. if (!targetRoot.resolve("casper").isDirectory) { throw new Exception("Ubuntu LiveCD without 'casper' directory are not handled") } // Not always necessary, but without 'fallback.efi' OS may not boot val grubx64EFI = PathFinder(targetRoot) / "(?i)EFI".r / "(?i)BOOT".r / "(?i)grubx64.efi".r grubx64EFI.get().headOption.map(_.toPath).foreach { grubx64EFI => val fallbackEFI = grubx64EFI.getParent.resolve("fallback.efi") if (!fallbackEFI.exists) ui.action("Prepare EFI") { duplicate(grubx64EFI, targetRoot, fallbackEFI, None) } } renameSyslinux(targetRoot) ui.action("Prepare syslinux") { val uuid = settings.partition.optPart.get.uuid.fold(throw _, v => v) // Update 'casper' targetRoot.resolve(Paths.get(".disk", "casper-uuid-override")).toFile.write(s"$uuid\n") val confs = PathFinder(targetRoot) / (("boot" / "grub") ++ "syslinux") * (".*\\.cfg".r | ".*\\.conf".r) val regexUUID = new Regex("""(?i)([ \t]+(?:linux|append)[ \t]+[^\r\n]*boot=casper)""", "pre") val regexUUIDReplacer = RegexReplacer(regexUUID, (m: Regex.Match) => s"${m.group("pre")} uuid=$uuid" ) val rrs = if (persistent) { val regexPers = new Regex("""(?i)([ \t]+(?:linux|append)[ \t]+[^\r\n]*(?:boot=casper|initrd=[^\s]*))""", "pre") val regexPersReplacer = RegexReplacer(regexPers, (m: Regex.Match) => s"${m.group("pre")} persistent" ) regexUUIDReplacer :: regexPersReplacer :: Nil } else { regexUUIDReplacer :: Nil } for (conf <- confs.get()) { regexReplace(targetRoot, conf, rrs:_*) } } if (persistent) { // Generate the persistence file ui.action("Generate persistency file") { val persistenceFile = targetRoot.resolve("casper-rw") // Note: leave 1 MiB for bootloader etc val sizeMB = targetRoot.toFile.getUsableSpace / (1024L * 1024L) - 1 ui.activity(s"There is ${ if (sizeMB < 1024) "only " else "" }${sizeMB}MiB available for persistency") Command.execute(Seq("dd", "bs=1M", s"count=$sizeMB", "if=/dev/zero", s"of=$persistenceFile")).toEither("Failed to create persistency file") && Command.execute(Seq("mkfs.ext4", "-F", persistenceFile.toString)).toEither("Failed to format persistency file") } } () } } // Without 'casper' - which indicates in a file the partition it belongs to by // its UUID - the LiveCD needs its 'initrd' to be patched to be able to boot on // a given partition. In this case, adding a file with our own UUID and patching // syslinux config becomes unnecessary. // Original (bash) code is kept here for reference: // // local parttype=$(blkid -s TYPE -o value "${partpath}") // local partuuid=$(blkid -o value -s UUID "${partpath}") // // local casper=0 // if [ -d "${dirPartMount}"/casper ] // then // casper=1 // fi // // if [ ${casper} -eq 0 ] // then // updateStatus dialogStatus " * Update initrd" // // local initrdPath= // for initrdPath in "${dirPartMount}"/casper/initrd.lz "${dirPartMount}"/casper/initrd.gz "${dirPartMount}"/install/initrd.gz // do // if [ -e "${initrdPath}" ] // then // break // fi // initrdPath= // done // // if [ -z "${initrdPath}" ] // then // exitProgram "Could not find initrd" 2 // fi // // local initrdCompress=( 'gzip' '-9' '--stdout' ) // local initrdDecompress=( 'gzip' '--decompress' '--stdout' ) // if [ "${initrdPath##*.}" == "lz" ] // then // initrdCompress=( 'lzma' '-7' '--stdout' ) // initrdDecompress=( 'lzma' '--decompress' '--stdout' '--suffix=.lz' ) // fi // // ( // if [ -e "${dirTmp}"/initrd ] // then // rm -rf "${dirTmp}"/initrd // fi // // mkdir "${dirTmp}"/initrd \ // && cd "${dirTmp}"/initrd \ // && "${initrdDecompress[@]}" "${initrdPath}" | cpio --extract --make-directories --no-absolute-filenames // ) // checkReturnCode "Failed to extract initrd content" 2 // // # Note: BusyBox's mount does not handle the '-U UUID' option, but blkid or // # findfs shall be present // echo "#"'!'"/bin/sh // //partpath=\$(blkid -U \"${partuuid}\" 2> /dev/null) //if [ -z \"\${partpath}\" ] //then // partpath=\$(findfs \"UUID=${partuuid}\" 2> /dev/null) //fi //if [ -n \"\${partpath}\" ] //then // if [ ! -e /cdrom ] // then // mkdir /cdrom // fi // mount -t ${parttype} -o ro \"\${partpath}\" /cdrom //fi // //exit 0 //" > "${dirTmp}"/initrd/init.extra // chmod +x "${dirTmp}"/initrd/init.extra // // perl -pi -e 's/^(exec .*init.*)$/\/init.extra\n\1/' "${dirTmp}"/initrd/init // grep -cEi "^/init\.extra$" "${dirTmp}"/initrd/init > /dev/null // checkReturnCode "Failed to update initrd" 2 // // ( // cd "${dirTmp}"/initrd \ // && find ./ | cpio --create --format=newc | "${initrdCompress[@]}" > "${initrdPath}" // ) // checkReturnCode "Failed to update initrd" 2 // // rm -rf "${dirTmp}"/initrd // fi
suiryc/usbinstall
src/main/scala/usbinstall/os/UbuntuInstall.scala
Scala
gpl-3.0
6,318
package artisanal.pickle.maker import models._ import parser._ import org.specs2._ import mutable._ import specification._ import scala.reflect.internal.pickling.ByteCodecs import scala.tools.scalap.scalax.rules.scalasig._ import com.novus.salat.annotations.util._ import scala.reflect.ScalaSignature class ListByteSpec extends mutable.Specification { "a ScalaSig for case class MyRecord_ListByte(q: List[Byte])" should { "have the correct string" in { val mySig = new artisanal.pickle.maker.ScalaSig(List("case class"), List("models", "MyRecord_ListByte"), List(("q", "List[Byte]"))) val correctParsedSig = SigParserHelper.parseByteCodeFromAnnotation(classOf[MyRecord_ListByte]).map(ScalaSigAttributeParsers.parse(_)).get val myParsedSig = SigParserHelper.parseByteCodeFromMySig(mySig).map(ScalaSigAttributeParsers.parse(_)).get correctParsedSig.toString === myParsedSig.toString } } }
julianpeeters/artisanal-pickle-maker
src/test/scala/singleValueMember/ListSpecs/List[Byte]Spec.scala
Scala
apache-2.0
930
package errors import eveapi.errors.EveApiError sealed trait Err extends Throwable case class ApiError(error: EveApiError) extends Err { override def getMessage = error.getMessage }
reactormonk/fleet-buddy
server/src/main/scala/utils/Errors.scala
Scala
agpl-3.0
187
package system.cell.sensormanagement.sensors import org.scalatest.{FlatSpec, Matchers} import system.ontologies.messages.{DoubleThresholdInfo, SensorInfoFromConfig, SingleThresholdInfo} import system.ontologies.sensor.SensorCategories /** * Created by Matteo Gabellini on 01/08/2017. */ class SensorsFactoryTest extends FlatSpec with Matchers { //Temperature sensor val tSensorMinValue = 0 val tSensorMaxValue = 100 val tSensorLowThreshold = 20 val tSensorHighThreshold = 80 val tSensConfig: SensorInfoFromConfig = new SensorInfoFromConfig( SensorCategories.Temperature.id, tSensorMinValue, tSensorMaxValue, new DoubleThresholdInfo(tSensorLowThreshold, tSensorHighThreshold) ) val tSensor = SensorsFactory.createASensorFromConfig(tSensConfig) "The sensor factory" should "create a temperature sensor from a temperature sensor config info" in { tSensor shouldBe a[TemperatureSensor] } "The sensor factory" should "create a temperature sensor with the same max value specified in the config" in { tSensor.asInstanceOf[TemperatureSensor].maxValue should be(tSensorMaxValue) } "The sensor factory" should "create a temperature sensor with the same min value specified in the config" in { tSensor.asInstanceOf[TemperatureSensor].minValue should be(tSensorMinValue) } "The sensor factory" should "create a temperature sensor with the same high threshold value specified in the config" in { tSensor.asInstanceOf[TemperatureSensor] .threshold .asInstanceOf[TemperatureThreshold] .high should be(tSensorHighThreshold) } "The sensor factory" should "create a temperature sensor with the same low threshold value specified in the config" in { tSensor.asInstanceOf[TemperatureSensor] .threshold .asInstanceOf[TemperatureThreshold] .low should be(tSensorLowThreshold) } "The sensor factory" should "create a Observable Version of the temperature sensor" in { SensorsFactory.createTheObservableVersion(tSensor) shouldBe a[ObservableTemperatureSensor] } //Humidity sensor val hSensorMinValue = 0 val hSensorMaxValue = 100 val hSensorLowThreshold = 20 val hSensorHighThreshold = 80 val hSensConfig: SensorInfoFromConfig = new SensorInfoFromConfig( SensorCategories.Humidity.id, hSensorMinValue, hSensorMaxValue, new DoubleThresholdInfo(hSensorLowThreshold, hSensorHighThreshold) ) val hSensor = SensorsFactory.createASensorFromConfig(hSensConfig) "The sensor factory" should "create a humidity sensor from a humidity sensor config info" in { hSensor shouldBe a[HumiditySensor] } "The sensor factory" should "create a humidity sensor with the same max value specified in the config" in { hSensor.asInstanceOf[HumiditySensor].maxValue should be(hSensorMaxValue) } "The sensor factory" should "create a humidity sensor with the same min value specified in the config" in { hSensor.asInstanceOf[HumiditySensor].minValue should be(hSensorMinValue) } "The sensor factory" should "create a humidity sensor with the same high threshold value specified in the config" in { hSensor.asInstanceOf[HumiditySensor] .threshold .asInstanceOf[HumidityThreshold] .high should be(hSensorHighThreshold) } "The sensor factory" should "create a humidity sensor with the same low threshold value specified in the config" in { hSensor.asInstanceOf[HumiditySensor] .threshold .asInstanceOf[HumidityThreshold] .low should be(hSensorLowThreshold) } "The sensor factory" should "create a Observable Version of the humidity sensor" in { SensorsFactory.createTheObservableVersion(hSensor) shouldBe a[ObservableHumiditySensor] } //Smoke Sensor val sSensorMinValue = 0 val sSensorMaxValue = 100 val sSensorThreshold = 70 val sSensConfig: SensorInfoFromConfig = new SensorInfoFromConfig( SensorCategories.Smoke.id, sSensorMinValue, sSensorMaxValue, new SingleThresholdInfo(sSensorThreshold) ) val sSensor = SensorsFactory.createASensorFromConfig(sSensConfig) "The sensor factory" should "create a gas sensor from a smoke sensor config info" in { sSensor shouldBe a[GasSensor] } "The sensor factory" should "create a gas sensor from a smoke sensor config " + "that have the smoke category id" in { sSensor.category.id should be(SensorCategories.Smoke.id) } "The sensor factory" should "create a gas sensor from a smoke sensor config " + "that have the smoke threshold" in { sSensor.asInstanceOf[GasSensor].threshold shouldBe a[SmokeThreshold] } "The sensor factory" should "create a gas sensor from a smoke sensor config " + " with the same max value specified in the config" in { sSensor.asInstanceOf[GasSensor].maxValue should be(sSensorMaxValue) } "The sensor factory" should "create a gas sensor from a smoke sensor config " + "with the same min value specified in the config" in { sSensor.asInstanceOf[GasSensor].minValue should be(sSensorMinValue) } "The sensor factory" should "create a gas sensor from a smoke sensor config" + " with the same threshold value specified in the config" in { sSensor.asInstanceOf[GasSensor] .threshold .asInstanceOf[SmokeThreshold] .value should be(sSensorThreshold) } "The sensor factory" should "create a observable gas sensor from the smoke sensor" in { SensorsFactory.createTheObservableVersion(sSensor) shouldBe a[ObservableGasSensor] } //CO2 Sensor val cSensorMinValue = 0 val cSensorMaxValue = 100 val cSensorThreshold = 70 val cSensConfig: SensorInfoFromConfig = new SensorInfoFromConfig( SensorCategories.CO2.id, cSensorMinValue, cSensorMaxValue, new SingleThresholdInfo(cSensorThreshold) ) val cSensor = SensorsFactory.createASensorFromConfig(cSensConfig) "The sensor factory" should "create a gas sensor from a co2 sensor config info" in { cSensor shouldBe a[GasSensor] } "The sensor factory" should "create a gas sensor from a co2 sensor config " + "that have the smoke category id" in { cSensor.category.id should be(SensorCategories.CO2.id) } "The sensor factory" should "create a gas sensor from a co2 sensor config " + "that have the co2 threshold" in { cSensor.asInstanceOf[GasSensor].threshold shouldBe a[CO2Threshold] } "The sensor factory" should "create a gas sensor from a co2 sensor config " + "with the same max value specified in the config" in { cSensor.asInstanceOf[GasSensor].maxValue should be(cSensorMaxValue) } "The sensor factory" should "create a gas sensor from a co2 sensor config " + "with the same min value specified in the config" in { cSensor.asInstanceOf[GasSensor].minValue should be(cSensorMinValue) } "The sensor factory" should "create a gas sensor from a co2 sensor config " + "with the same threshold value specified in the config" in { cSensor.asInstanceOf[GasSensor] .threshold .asInstanceOf[CO2Threshold] .value should be(cSensorThreshold) } "The sensor factory" should "create a observable gas sensor from a co2 sensor config" in { SensorsFactory.createTheObservableVersion(cSensor) shouldBe a[ObservableGasSensor] } //Oxygen sensor val oSensorMinValue = 0 val oSensorMaxValue = 100 val oSensorThreshold = 70 val oSensConfig: SensorInfoFromConfig = new SensorInfoFromConfig( SensorCategories.Oxygen.id, oSensorMinValue, oSensorMaxValue, new SingleThresholdInfo(oSensorThreshold) ) val oSensor = SensorsFactory.createASensorFromConfig(oSensConfig) "The sensor factory" should "create a gas sensor from a oxygen sensor config info" in { oSensor shouldBe a[GasSensor] } "The sensor factory" should "create a gas sensor from a oxygen sensor config " + "that have the smoke category id" in { oSensor.category.id should be(SensorCategories.Oxygen.id) } "The sensor factory" should "create a gas sensor from a oxygen sensor config " + "that have the oxygen threshold" in { oSensor.asInstanceOf[GasSensor].threshold shouldBe a[OxygenThreshold] } "The sensor factory" should "create a gas sensor from a oxygen sensor config " + "with the same max value specified in the config" in { oSensor.asInstanceOf[GasSensor].maxValue should be(oSensorMaxValue) } "The sensor factory" should "create a gas sensor from a oxygen sensor config " + "with the same min value specified in the config" in { oSensor.asInstanceOf[GasSensor].minValue should be(oSensorMinValue) } "The sensor factory" should "create a gas sensor from a oxygen sensor config " + "with the same threshold value specified in the config" in { oSensor.asInstanceOf[GasSensor] .threshold .asInstanceOf[OxygenThreshold] .value should be(oSensorThreshold) } "The sensor factory" should "create a observable gas sensor from a oxygen sensor config" in { SensorsFactory.createTheObservableVersion(oSensor) shouldBe a[ObservableGasSensor] } }
albertogiunta/arianna
src/test/scala/system/cell/sensormanagement/sensors/SensorsFactoryTest.scala
Scala
gpl-3.0
9,681
package com.arcusys.valamis.questionbank.model import org.scalatest.{ FlatSpec, Matchers } class QuestionCategoryTest extends FlatSpec with Matchers { "Question bank question category entity" can "be constructed" in { val category = new QuestionCategory(1, "t", "d", Some(12), Some(0)) category.id should equal(1) category.title should equal("t") category.description should equal("d") category.parentId should equal(Some(12)) } it can "be constructed with empty parent" in { val category = new QuestionCategory(1, "t", "d", None, Some(0)) category.id should equal(1) category.title should equal("t") category.description should equal("d") category.parentId should equal(None) } }
ViLPy/Valamis
valamis-questionbank/src/test/scala/com/arcusys/valamis/questionbank/model/QuestionCategoryTest.scala
Scala
lgpl-3.0
732
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.types import scala.math.Ordering import scala.reflect.runtime.universe.typeTag import org.apache.spark.annotation.Stable import org.apache.spark.unsafe.types.UTF8String /** * The data type representing `String` values. Please use the singleton `DataTypes.StringType`. * * @since 1.3.0 */ @Stable class StringType private() extends AtomicType { // The companion object and this class is separated so the companion object also subclasses // this type. Otherwise, the companion object would be of type "StringType$" in byte code. // Defined with a private constructor so the companion object is the only possible instantiation. private[sql] type InternalType = UTF8String @transient private[sql] lazy val tag = typeTag[InternalType] private[sql] val ordering = implicitly[Ordering[InternalType]] /** * The default size of a value of the StringType is 20 bytes. */ override def defaultSize: Int = 20 private[spark] override def asNullable: StringType = this } /** * @since 1.3.0 */ @Stable case object StringType extends StringType
shaneknapp/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/types/StringType.scala
Scala
apache-2.0
1,896
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import kafka.network.SocketServer import kafka.utils._ import org.apache.kafka.common.protocol.types.Struct import org.apache.kafka.common.protocol.{ApiKeys, Errors, ProtoUtils} import org.apache.kafka.common.requests.{CreateTopicsRequest, CreateTopicsResponse, MetadataRequest, MetadataResponse} import org.junit.Assert._ import org.junit.Test import scala.collection.JavaConverters._ class CreateTopicsRequestTest extends BaseRequestTest { @Test def testValidCreateTopicsRequests() { val timeout = 10000 // Generated assignments validateValidCreateTopicsRequests(new CreateTopicsRequest(Map("topic1" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, timeout)) validateValidCreateTopicsRequests(new CreateTopicsRequest(Map("topic2" -> new CreateTopicsRequest.TopicDetails(1, 3.toShort)).asJava, timeout)) val config3 = Map("min.insync.replicas" -> "2").asJava validateValidCreateTopicsRequests(new CreateTopicsRequest(Map("topic3" -> new CreateTopicsRequest.TopicDetails(5, 2.toShort, config3)).asJava, timeout)) // Manual assignments val assignments4 = replicaAssignmentToJava(Map(0 -> List(0))) validateValidCreateTopicsRequests(new CreateTopicsRequest(Map("topic4" -> new CreateTopicsRequest.TopicDetails(assignments4)).asJava, timeout)) val assignments5 = replicaAssignmentToJava(Map(0 -> List(0, 1), 1 -> List(1, 0), 2 -> List(1, 2))) val config5 = Map("min.insync.replicas" -> "2").asJava validateValidCreateTopicsRequests(new CreateTopicsRequest(Map("topic5" -> new CreateTopicsRequest.TopicDetails(assignments5, config5)).asJava, timeout)) // Mixed val assignments8 = replicaAssignmentToJava(Map(0 -> List(0, 1), 1 -> List(1, 0), 2 -> List(1, 2))) validateValidCreateTopicsRequests(new CreateTopicsRequest(Map( "topic6" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort), "topic7" -> new CreateTopicsRequest.TopicDetails(5, 2.toShort), "topic8" -> new CreateTopicsRequest.TopicDetails(assignments8)).asJava, timeout) ) } private def validateValidCreateTopicsRequests(request: CreateTopicsRequest): Unit = { val response = sendCreateTopicRequest(request, 0) val error = response.errors.values.asScala.find(_ != Errors.NONE) assertTrue(s"There should be no errors, found ${response.errors.asScala}", error.isEmpty) request.topics.asScala.foreach { case (topic, details) => def verifyMetadata(socketServer: SocketServer) = { val metadata = sendMetadataRequest(new MetadataRequest(List(topic).asJava)).topicMetadata.asScala val metadataForTopic = metadata.filter(p => p.topic.equals(topic)).head val partitions = if (!details.replicasAssignments.isEmpty) details.replicasAssignments.size else details.numPartitions val replication = if (!details.replicasAssignments.isEmpty) details.replicasAssignments.asScala.head._2.size else details.replicationFactor assertNotNull("The topic should be created", metadataForTopic) assertEquals("The topic should have the correct number of partitions", partitions, metadataForTopic.partitionMetadata.size) assertEquals("The topic should have the correct replication factor", replication, metadataForTopic.partitionMetadata.asScala.head.replicas.size) } // Verify controller broker has the correct metadata verifyMetadata(controllerSocketServer) // Wait until metadata is propagated and validate non-controller broker has the correct metadata TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0) verifyMetadata(notControllerSocketServer) } } @Test def testErrorCreateTopicsRequests() { val timeout = 10000 val existingTopic = "existing-topic" TestUtils.createTopic(zkUtils, existingTopic, 1, 1, servers) // Basic validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map(existingTopic -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, timeout), Map(existingTopic -> Errors.TOPIC_ALREADY_EXISTS)) validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-partitions" -> new CreateTopicsRequest.TopicDetails(-1, 1.toShort)).asJava, timeout), Map("error-partitions" -> Errors.INVALID_PARTITIONS)) validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-replication" -> new CreateTopicsRequest.TopicDetails(1, (numBrokers + 1).toShort)).asJava, timeout), Map("error-replication" -> Errors.INVALID_REPLICATION_FACTOR)) val invalidConfig = Map("not.a.property" -> "error").asJava validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-config" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort, invalidConfig)).asJava, timeout), Map("error-config" -> Errors.INVALID_CONFIG)) val invalidAssignments = replicaAssignmentToJava(Map(0 -> List(0, 1), 1 -> List(0))) validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-assignment" -> new CreateTopicsRequest.TopicDetails(invalidAssignments)).asJava, timeout), Map("error-assignment" -> Errors.INVALID_REPLICA_ASSIGNMENT)) // Partial validateErrorCreateTopicsRequests( new CreateTopicsRequest(Map( existingTopic -> new CreateTopicsRequest.TopicDetails(1, 1.toShort), "partial-partitions" -> new CreateTopicsRequest.TopicDetails(-1, 1.toShort), "partial-replication" -> new CreateTopicsRequest.TopicDetails(1, (numBrokers + 1).toShort), "partial-assignment" -> new CreateTopicsRequest.TopicDetails(invalidAssignments), "partial-none" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, timeout), Map( existingTopic -> Errors.TOPIC_ALREADY_EXISTS, "partial-partitions" -> Errors.INVALID_PARTITIONS, "partial-replication" -> Errors.INVALID_REPLICATION_FACTOR, "partial-assignment" -> Errors.INVALID_REPLICA_ASSIGNMENT, "partial-none" -> Errors.NONE ) ) validateTopicExists("partial-none") // Timeout // We don't expect a request to ever complete within 1ms. A timeout of 1 ms allows us to test the purgatory timeout logic. validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-timeout" -> new CreateTopicsRequest.TopicDetails(10, 3.toShort)).asJava, 1), Map("error-timeout" -> Errors.REQUEST_TIMED_OUT)) validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-timeout-zero" -> new CreateTopicsRequest.TopicDetails(10, 3.toShort)).asJava, 0), Map("error-timeout-zero" -> Errors.REQUEST_TIMED_OUT)) // Negative timeouts are treated the same as 0 validateErrorCreateTopicsRequests(new CreateTopicsRequest(Map("error-timeout-negative" -> new CreateTopicsRequest.TopicDetails(10, 3.toShort)).asJava, -1), Map("error-timeout-negative" -> Errors.REQUEST_TIMED_OUT)) // The topics should still get created eventually TestUtils.waitUntilMetadataIsPropagated(servers, "error-timeout", 0) TestUtils.waitUntilMetadataIsPropagated(servers, "error-timeout-zero", 0) TestUtils.waitUntilMetadataIsPropagated(servers, "error-timeout-negative", 0) validateTopicExists("error-timeout") validateTopicExists("error-timeout-zero") validateTopicExists("error-timeout-negative") } @Test def testInvalidCreateTopicsRequests() { // Duplicate val singleRequest = new CreateTopicsRequest(Map("duplicate-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000) val duplicateRequest = duplicateFirstTopic(singleRequest) assertFalse("Request doesn't have duplicate topics", duplicateRequest.duplicateTopics().isEmpty) validateErrorCreateTopicsRequests(duplicateRequest, Map("duplicate-topic" -> Errors.INVALID_REQUEST)) // Duplicate Partial val doubleRequest = new CreateTopicsRequest(Map( "duplicate-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort), "other-topic" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000) val duplicateDoubleRequest = duplicateFirstTopic(doubleRequest) assertFalse("Request doesn't have duplicate topics", duplicateDoubleRequest.duplicateTopics().isEmpty) validateErrorCreateTopicsRequests(duplicateDoubleRequest, Map( "duplicate-topic" -> Errors.INVALID_REQUEST, "other-topic" -> Errors.NONE)) // Partitions/ReplicationFactor and ReplicaAssignment val assignments = replicaAssignmentToJava(Map(0 -> List(0))) val assignmentRequest = new CreateTopicsRequest(Map("bad-args-topic" -> new CreateTopicsRequest.TopicDetails(assignments)).asJava, 1000) val badArgumentsRequest = addPartitionsAndReplicationFactorToFirstTopic(assignmentRequest) validateErrorCreateTopicsRequests(badArgumentsRequest, Map("bad-args-topic" -> Errors.INVALID_REQUEST)) } private def duplicateFirstTopic(request: CreateTopicsRequest) = { val struct = request.toStruct val topics = struct.getArray("create_topic_requests") val firstTopic = topics(0).asInstanceOf[Struct] val newTopics = firstTopic :: topics.toList struct.set("create_topic_requests", newTopics.toArray) new CreateTopicsRequest(struct) } private def addPartitionsAndReplicationFactorToFirstTopic(request: CreateTopicsRequest) = { val struct = request.toStruct val topics = struct.getArray("create_topic_requests") val firstTopic = topics(0).asInstanceOf[Struct] firstTopic.set("num_partitions", 1) firstTopic.set("replication_factor", 1.toShort) new CreateTopicsRequest(struct) } private def validateErrorCreateTopicsRequests(request: CreateTopicsRequest, expectedResponse: Map[String, Errors]): Unit = { val response = sendCreateTopicRequest(request, 0) val errors = response.errors.asScala assertEquals("The response size should match", expectedResponse.size, response.errors.size) expectedResponse.foreach { case (topic, expectedError) => assertEquals("The response error should match", expectedResponse(topic), errors(topic)) // If no error validate topic exists if (expectedError == Errors.NONE) { validateTopicExists(topic) } } } @Test def testNotController() { val request = new CreateTopicsRequest(Map("topic1" -> new CreateTopicsRequest.TopicDetails(1, 1.toShort)).asJava, 1000) val response = sendCreateTopicRequest(request, 0, notControllerSocketServer) val error = response.errors.asScala.head._2 assertEquals("Expected controller error when routed incorrectly", Errors.NOT_CONTROLLER, error) } private def validateTopicExists(topic: String): Unit = { TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0) val metadata = sendMetadataRequest(new MetadataRequest(List(topic).asJava)).topicMetadata.asScala assertTrue("The topic should be created", metadata.exists(p => p.topic.equals(topic) && p.error() == Errors.NONE)) } private def replicaAssignmentToJava(assignments: Map[Int, List[Int]]) = { assignments.map { case (k, v) => (k:Integer, v.map { i => i:Integer }.asJava) }.asJava } private def sendCreateTopicRequest(request: CreateTopicsRequest, version: Short, socketServer: SocketServer = controllerSocketServer): CreateTopicsResponse = { val response = send(request, ApiKeys.CREATE_TOPICS, Some(version), socketServer) CreateTopicsResponse.parse(response, version) } private def sendMetadataRequest(request: MetadataRequest, destination: SocketServer = anySocketServer): MetadataResponse = { val version = ProtoUtils.latestVersion(ApiKeys.METADATA.id) val response = send(request, ApiKeys.METADATA, destination = destination) MetadataResponse.parse(response, version) } }
eribeiro/kafka
core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala
Scala
apache-2.0
12,561
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.avro.scala import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.FunSuite import org.apache.avro.scala.Text.implicitCamelCaseableFromString @RunWith(classOf[JUnitRunner]) class TestText extends FunSuite { test("toCamelCase") { assert("fooBar".toCamelCase === "fooBar") assert("FooBar".toCamelCase === "FooBar") assert("foo_bar".toCamelCase === "fooBar") assert("foo_bar".toUpperCamelCase === "FooBar") assert("fooBar".toUpperCamelCase === "FooBar") assert("Foo_bar".toLowerCamelCase === "fooBar") assert("FooBar".toLowerCamelCase === "fooBar") assert("foo_".toCamelCase === "foo_") assert("_foo_".toCamelCase === "Foo_") } }
julianpeeters/avro
lang/scala/src/test/java/org/apache/avro/scala/TestText.scala
Scala
apache-2.0
1,535
/* * Copyright 2016 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600a.v2 import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalBoolean, Linked} case class A1(value: Option[Boolean]) extends CtBoxIdentifier(name = "A1 Loans made during the period released or written off before the end of the period") with CtOptionalBoolean object A1 extends Linked[LPQ09, A1] { override def apply(source: LPQ09): A1 = A1(source.value) }
ahudspith-equalexperts/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600a/v2/A1.scala
Scala
apache-2.0
992
package views.acquire import composition.TestHarness import org.scalatest.selenium.WebBrowser.go import org.scalatest.selenium.WebBrowser.pageSource import pages.acquire.VersionPage import scala.io.Source.fromInputStream import uk.gov.dvla.vehicles.presentation.common.testhelpers.UiSpec class VersionIntegrationSpec extends UiSpec with TestHarness { "Version endpoint" should { "be declared and should include the build-details.txt from classpath" in new WebBrowserForSelenium { go to VersionPage val t = fromInputStream(getClass.getResourceAsStream("/build-details.txt")).getLines().toSet.toList pageSource.lines.toSet.toList should contain allOf(t.head, t.tail.head, t.tail.tail.toSeq:_*) } } }
dvla/vehicles-acquire-online
test/views/acquire/VersionIntegrationSpec.scala
Scala
mit
731
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io._ import java.nio.file.Files import scala.io.Source import scala.util.Properties import scala.collection.JavaConverters._ import scala.collection.mutable.Stack import sbt._ import sbt.Classpaths.publishTask import sbt.Keys._ import sbtunidoc.Plugin.UnidocKeys.unidocGenjavadocVersion import com.simplytyped.Antlr4Plugin._ import com.typesafe.sbt.pom.{PomBuild, SbtPomKeys} import com.typesafe.tools.mima.plugin.MimaKeys import org.scalastyle.sbt.ScalastylePlugin._ import org.scalastyle.sbt.Tasks import spray.revolver.RevolverPlugin._ object BuildCommons { private val buildLocation = file(".").getAbsoluteFile.getParentFile val sqlProjects@Seq(catalyst, sql, hive, hiveThriftServer, sqlKafka010) = Seq( "catalyst", "sql", "hive", "hive-thriftserver", "sql-kafka-0-10" ).map(ProjectRef(buildLocation, _)) val streamingProjects@Seq( streaming, streamingFlumeSink, streamingFlume, streamingKafka, streamingKafka010 ) = Seq( "streaming", "streaming-flume-sink", "streaming-flume", "streaming-kafka-0-8", "streaming-kafka-0-10" ).map(ProjectRef(buildLocation, _)) val allProjects@Seq( core, graphx, mllib, mllibLocal, repl, networkCommon, networkShuffle, launcher, unsafe, tags, sketch, _* ) = Seq( "core", "graphx", "mllib", "mllib-local", "repl", "network-common", "network-shuffle", "launcher", "unsafe", "tags", "sketch" ).map(ProjectRef(buildLocation, _)) ++ sqlProjects ++ streamingProjects val optionallyEnabledProjects@Seq(mesos, yarn, java8Tests, sparkGangliaLgpl, streamingKinesisAsl, dockerIntegrationTests, kubernetes, _*) = Seq("mesos", "yarn", "java8-tests", "ganglia-lgpl", "streaming-kinesis-asl", "docker-integration-tests", "kubernetes", "kubernetes-integration-tests", "kubernetes-integration-tests-spark-jobs", "kubernetes-integration-tests-spark-jobs-helpers", "kubernetes-docker-minimal-bundle" ).map(ProjectRef(buildLocation, _)) val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKafka010Assembly, streamingKinesisAslAssembly) = Seq("network-yarn", "streaming-flume-assembly", "streaming-kafka-0-8-assembly", "streaming-kafka-0-10-assembly", "streaming-kinesis-asl-assembly") .map(ProjectRef(buildLocation, _)) val copyJarsProjects@Seq(assembly, examples) = Seq("assembly", "examples") .map(ProjectRef(buildLocation, _)) val tools = ProjectRef(buildLocation, "tools") // Root project. val spark = ProjectRef(buildLocation, "spark") val sparkHome = buildLocation val testTempDir = s"$sparkHome/target/tmp" val javacJVMVersion = settingKey[String]("source and target JVM version for javac") val scalacJVMVersion = settingKey[String]("source and target JVM version for scalac") } object SparkBuild extends PomBuild { import BuildCommons._ import scala.collection.mutable.Map val projectsMap: Map[String, Seq[Setting[_]]] = Map.empty override val profiles = { val profiles = Properties.envOrNone("SBT_MAVEN_PROFILES") match { case None => Seq("sbt") case Some(v) => v.split("(\\\\s+|,)").filterNot(_.isEmpty).map(_.trim.replaceAll("-P", "")).toSeq } if (System.getProperty("scala-2.10") == "") { // To activate scala-2.10 profile, replace empty property value to non-empty value // in the same way as Maven which handles -Dname as -Dname=true before executes build process. // see: https://github.com/apache/maven/blob/maven-3.0.4/maven-embedder/src/main/java/org/apache/maven/cli/MavenCli.java#L1082 System.setProperty("scala-2.10", "true") } profiles } Properties.envOrNone("SBT_MAVEN_PROPERTIES") match { case Some(v) => v.split("(\\\\s+|,)").filterNot(_.isEmpty).map(_.split("=")).foreach(x => System.setProperty(x(0), x(1))) case _ => } override val userPropertiesMap = System.getProperties.asScala.toMap lazy val MavenCompile = config("m2r") extend(Compile) lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy") lazy val sparkGenjavadocSettings: Seq[sbt.Def.Setting[_]] = Seq( libraryDependencies += compilerPlugin( "com.typesafe.genjavadoc" %% "genjavadoc-plugin" % unidocGenjavadocVersion.value cross CrossVersion.full), scalacOptions ++= Seq( "-P:genjavadoc:out=" + (target.value / "java"), "-P:genjavadoc:strictVisibility=true" // hide package private types ) ) lazy val scalaStyleRules = Project("scalaStyleRules", file("scalastyle")) .settings( libraryDependencies += "org.scalastyle" %% "scalastyle" % "0.8.0" ) lazy val scalaStyleOnCompile = taskKey[Unit]("scalaStyleOnCompile") lazy val scalaStyleOnTest = taskKey[Unit]("scalaStyleOnTest") // We special case the 'println' lint rule to only be a warning on compile, because adding // printlns for debugging is a common use case and is easy to remember to remove. val scalaStyleOnCompileConfig: String = { val in = "scalastyle-config.xml" val out = "scalastyle-on-compile.generated.xml" val replacements = Map( """customId="println" level="error"""" -> """customId="println" level="warn"""" ) var contents = Source.fromFile(in).getLines.mkString("\\n") for ((k, v) <- replacements) { require(contents.contains(k), s"Could not rewrite '$k' in original scalastyle config.") contents = contents.replace(k, v) } new PrintWriter(out) { write(contents) close() } out } // Return a cached scalastyle task for a given configuration (usually Compile or Test) private def cachedScalaStyle(config: Configuration) = Def.task { val logger = streams.value.log // We need a different cache dir per Configuration, otherwise they collide val cacheDir = target.value / s"scalastyle-cache-${config.name}" val cachedFun = FileFunction.cached(cacheDir, FilesInfo.lastModified, FilesInfo.exists) { (inFiles: Set[File]) => { val args: Seq[String] = Seq.empty val scalaSourceV = Seq(file(scalaSource.in(config).value.getAbsolutePath)) val configV = (baseDirectory in ThisBuild).value / scalaStyleOnCompileConfig val configUrlV = scalastyleConfigUrl.in(config).value val streamsV = streams.in(config).value val failOnErrorV = true val scalastyleTargetV = scalastyleTarget.in(config).value val configRefreshHoursV = scalastyleConfigRefreshHours.in(config).value val targetV = target.in(config).value val configCacheFileV = scalastyleConfigUrlCacheFile.in(config).value logger.info(s"Running scalastyle on ${name.value} in ${config.name}") Tasks.doScalastyle(args, configV, configUrlV, failOnErrorV, scalaSourceV, scalastyleTargetV, streamsV, configRefreshHoursV, targetV, configCacheFileV) Set.empty } } cachedFun(findFiles(scalaSource.in(config).value)) } private def findFiles(file: File): Set[File] = if (file.isDirectory) { file.listFiles().toSet.flatMap(findFiles) + file } else { Set(file) } def enableScalaStyle: Seq[sbt.Def.Setting[_]] = Seq( scalaStyleOnCompile := cachedScalaStyle(Compile).value, scalaStyleOnTest := cachedScalaStyle(Test).value, logLevel in scalaStyleOnCompile := Level.Warn, logLevel in scalaStyleOnTest := Level.Warn, (compile in Compile) := { scalaStyleOnCompile.value (compile in Compile).value }, (compile in Test) := { scalaStyleOnTest.value (compile in Test).value } ) lazy val sharedSettings = sparkGenjavadocSettings ++ (if (sys.env.contains("NOLINT_ON_COMPILE")) Nil else enableScalaStyle) ++ Seq( exportJars in Compile := true, exportJars in Test := false, javaHome := sys.env.get("JAVA_HOME") .orElse(sys.props.get("java.home").map { p => new File(p).getParentFile().getAbsolutePath() }) .map(file), incOptions := incOptions.value.withNameHashing(true), publishMavenStyle := true, unidocGenjavadocVersion := "0.10", // Override SBT's default resolvers: resolvers := Seq( DefaultMavenRepository, Resolver.mavenLocal, Resolver.file("local", file(Path.userHome.absolutePath + "/.ivy2/local"))(Resolver.ivyStylePatterns) ), externalResolvers := resolvers.value, otherResolvers := SbtPomKeys.mvnLocalRepository(dotM2 => Seq(Resolver.file("dotM2", dotM2))).value, publishLocalConfiguration in MavenCompile := new PublishConfiguration(None, "dotM2", packagedArtifacts.value, Seq(), ivyLoggingLevel.value), publishMavenStyle in MavenCompile := true, publishLocal in MavenCompile := publishTask(publishLocalConfiguration in MavenCompile, deliverLocal).value, publishLocalBoth := Seq(publishLocal in MavenCompile, publishLocal).dependOn.value, javacOptions in (Compile, doc) ++= { val versionParts = System.getProperty("java.version").split("[+.\\\\-]+", 3) var major = versionParts(0).toInt if (major == 1) major = versionParts(1).toInt if (major >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty }, javacJVMVersion := "1.8", // SBT Scala 2.10 build still doesn't support Java 8, because scalac 2.10 doesn't, but, // it also doesn't touch Java 8 code and it's OK to emit Java 7 bytecode in this case scalacJVMVersion := (if (System.getProperty("scala-2.10") == "true") "1.7" else "1.8"), javacOptions in Compile ++= Seq( "-encoding", "UTF-8", "-source", javacJVMVersion.value ), // This -target option cannot be set in the Compile configuration scope since `javadoc` doesn't // play nicely with it; see https://github.com/sbt/sbt/issues/355#issuecomment-3817629 for // additional discussion and explanation. javacOptions in (Compile, compile) ++= Seq( "-target", javacJVMVersion.value ), scalacOptions in Compile ++= Seq( s"-target:jvm-${scalacJVMVersion.value}", "-sourcepath", (baseDirectory in ThisBuild).value.getAbsolutePath // Required for relative source links in scaladoc ), // Implements -Xfatal-warnings, ignoring deprecation warnings. // Code snippet taken from https://issues.scala-lang.org/browse/SI-8410. compile in Compile := { val analysis = (compile in Compile).value val out = streams.value def logProblem(l: (=> String) => Unit, f: File, p: xsbti.Problem) = { l(f.toString + ":" + p.position.line.fold("")(_ + ":") + " " + p.message) l(p.position.lineContent) l("") } var failed = 0 analysis.infos.allInfos.foreach { case (k, i) => i.reportedProblems foreach { p => val deprecation = p.message.contains("is deprecated") if (!deprecation) { failed = failed + 1 } val printer: (=> String) => Unit = s => if (deprecation) { out.log.warn(s) } else { out.log.error("[warn] " + s) } logProblem(printer, k, p) } } if (failed > 0) { sys.error(s"$failed fatal warnings") } analysis } ) def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = { val existingSettings = projectsMap.getOrElse(projectRef.project, Seq[Setting[_]]()) projectsMap += (projectRef.project -> (existingSettings ++ settings)) } // Note ordering of these settings matter. /* Enable shared settings on all projects */ (allProjects ++ optionallyEnabledProjects ++ assemblyProjects ++ copyJarsProjects ++ Seq(spark, tools)) .foreach(enable(sharedSettings ++ DependencyOverrides.settings ++ ExcludedDependencies.settings)) /* Enable tests settings for all projects except examples, assembly and tools */ (allProjects ++ optionallyEnabledProjects).foreach(enable(TestSettings.settings)) val mimaProjects = allProjects.filterNot { x => Seq( spark, hive, hiveThriftServer, catalyst, repl, networkCommon, networkShuffle, networkYarn, unsafe, tags, sqlKafka010 ).contains(x) } mimaProjects.foreach { x => enable(MimaBuild.mimaSettings(sparkHome, x))(x) } /* Generate and pick the spark build info from extra-resources */ enable(Core.settings)(core) /* Unsafe settings */ enable(Unsafe.settings)(unsafe) /* * Set up tasks to copy dependencies during packaging. This step can be disabled in the command * line, so that dev/mima can run without trying to copy these files again and potentially * causing issues. */ if (!"false".equals(System.getProperty("copyDependencies"))) { copyJarsProjects.foreach(enable(CopyDependencies.settings)) } /* Enable Assembly for all assembly projects */ assemblyProjects.foreach(enable(Assembly.settings)) /* Package pyspark artifacts in a separate zip file for YARN. */ enable(PySparkAssembly.settings)(assembly) /* Enable unidoc only for the root spark project */ enable(Unidoc.settings)(spark) /* Catalyst ANTLR generation settings */ enable(Catalyst.settings)(catalyst) /* Spark SQL Core console settings */ enable(SQL.settings)(sql) /* Hive console settings */ enable(Hive.settings)(hive) enable(Flume.settings)(streamingFlumeSink) // SPARK-14738 - Remove docker tests from main Spark build // enable(DockerIntegrationTests.settings)(dockerIntegrationTests) /** * Adds the ability to run the spark shell directly from SBT without building an assembly * jar. * * Usage: `build/sbt sparkShell` */ val sparkShell = taskKey[Unit]("start a spark-shell.") val sparkPackage = inputKey[Unit]( s""" |Download and run a spark package. |Usage `builds/sbt "sparkPackage <group:artifact:version> <MainClass> [args] """.stripMargin) val sparkSql = taskKey[Unit]("starts the spark sql CLI.") enable(Seq( connectInput in run := true, fork := true, outputStrategy in run := Some (StdoutOutput), javaOptions += "-Xmx2g", sparkShell := { (runMain in Compile).toTask(" org.apache.spark.repl.Main -usejavacp").value }, sparkPackage := { import complete.DefaultParsers._ val packages :: className :: otherArgs = spaceDelimited("<group:artifact:version> <MainClass> [args]").parsed.toList val scalaRun = (runner in run).value val classpath = (fullClasspath in Runtime).value val args = Seq("--packages", packages, "--class", className, (Keys.`package` in Compile in LocalProject("core")) .value.getCanonicalPath) ++ otherArgs println(args) scalaRun.run("org.apache.spark.deploy.SparkSubmit", classpath.map(_.data), args, streams.value.log) }, javaOptions in Compile += "-Dspark.master=local", sparkSql := { (runMain in Compile).toTask(" org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver").value } ))(assembly) enable(Seq(sparkShell := sparkShell in LocalProject("assembly")))(spark) // TODO: move this to its upstream project. override def projectDefinitions(baseDirectory: File): Seq[Project] = { super.projectDefinitions(baseDirectory).map { x => if (projectsMap.exists(_._1 == x.id)) x.settings(projectsMap(x.id): _*) else x.settings(Seq[Setting[_]](): _*) } ++ Seq[Project](OldDeps.project) } } object Core { lazy val settings = Seq( resourceGenerators in Compile += Def.task { val buildScript = baseDirectory.value + "/../build/spark-build-info" val targetDir = baseDirectory.value + "/target/extra-resources/" val command = Seq("bash", buildScript, targetDir, version.value) Process(command).!! val propsFile = baseDirectory.value / "target" / "extra-resources" / "spark-version-info.properties" Seq(propsFile) }.taskValue ) } object Unsafe { lazy val settings = Seq( // This option is needed to suppress warnings from sun.misc.Unsafe usage javacOptions in Compile += "-XDignore.symbol.file" ) } object Flume { lazy val settings = sbtavro.SbtAvro.avroSettings } object DockerIntegrationTests { // This serves to override the override specified in DependencyOverrides: lazy val settings = Seq( dependencyOverrides += "com.google.guava" % "guava" % "18.0", resolvers += "DB2" at "https://app.camunda.com/nexus/content/repositories/public/", libraryDependencies += "com.oracle" % "ojdbc6" % "11.2.0.1.0" from "https://app.camunda.com/nexus/content/repositories/public/com/oracle/ojdbc6/11.2.0.1.0/ojdbc6-11.2.0.1.0.jar" // scalastyle:ignore ) } /** * Overrides to work around sbt's dependency resolution being different from Maven's. */ object DependencyOverrides { lazy val settings = Seq( dependencyOverrides += "com.google.guava" % "guava" % "14.0.1") } /** * This excludes library dependencies in sbt, which are specified in maven but are * not needed by sbt build. */ object ExcludedDependencies { lazy val settings = Seq( libraryDependencies ~= { libs => libs.filterNot(_.name == "groovy-all") } ) } /** * Project to pull previous artifacts of Spark for generating Mima excludes. */ object OldDeps { lazy val project = Project("oldDeps", file("dev"), settings = oldDepsSettings) lazy val allPreviousArtifactKeys = Def.settingDyn[Seq[Set[ModuleID]]] { SparkBuild.mimaProjects .map { project => MimaKeys.mimaPreviousArtifacts in project } .map(k => Def.setting(k.value)) .join } def oldDepsSettings() = Defaults.coreDefaultSettings ++ Seq( name := "old-deps", scalaVersion := "2.10.5", libraryDependencies := allPreviousArtifactKeys.value.flatten ) } object Catalyst { lazy val settings = antlr4Settings ++ Seq( antlr4PackageName in Antlr4 := Some("org.apache.spark.sql.catalyst.parser"), antlr4GenListener in Antlr4 := true, antlr4GenVisitor in Antlr4 := true ) } object SQL { lazy val settings = Seq( initialCommands in console := """ |import org.apache.spark.SparkContext |import org.apache.spark.sql.SQLContext |import org.apache.spark.sql.catalyst.analysis._ |import org.apache.spark.sql.catalyst.dsl._ |import org.apache.spark.sql.catalyst.errors._ |import org.apache.spark.sql.catalyst.expressions._ |import org.apache.spark.sql.catalyst.plans.logical._ |import org.apache.spark.sql.catalyst.rules._ |import org.apache.spark.sql.catalyst.util._ |import org.apache.spark.sql.execution |import org.apache.spark.sql.functions._ |import org.apache.spark.sql.types._ | |val sc = new SparkContext("local[*]", "dev-shell") |val sqlContext = new SQLContext(sc) |import sqlContext.implicits._ |import sqlContext._ """.stripMargin, cleanupCommands in console := "sc.stop()" ) } object Hive { lazy val settings = Seq( // Specially disable assertions since some Hive tests fail them javaOptions in Test := (javaOptions in Test).value.filterNot(_ == "-ea"), // Supporting all SerDes requires us to depend on deprecated APIs, so we turn off the warnings // only for this subproject. scalacOptions := (scalacOptions map { currentOpts: Seq[String] => currentOpts.filterNot(_ == "-deprecation") }).value, initialCommands in console := """ |import org.apache.spark.SparkContext |import org.apache.spark.sql.catalyst.analysis._ |import org.apache.spark.sql.catalyst.dsl._ |import org.apache.spark.sql.catalyst.errors._ |import org.apache.spark.sql.catalyst.expressions._ |import org.apache.spark.sql.catalyst.plans.logical._ |import org.apache.spark.sql.catalyst.rules._ |import org.apache.spark.sql.catalyst.util._ |import org.apache.spark.sql.execution |import org.apache.spark.sql.functions._ |import org.apache.spark.sql.hive._ |import org.apache.spark.sql.hive.test.TestHive._ |import org.apache.spark.sql.hive.test.TestHive.implicits._ |import org.apache.spark.sql.types._""".stripMargin, cleanupCommands in console := "sparkContext.stop()", // Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce // in order to generate golden files. This is only required for developers who are adding new // new query tests. fullClasspath in Test := (fullClasspath in Test).value.filterNot { f => f.toString.contains("jcl-over") } ) } object Assembly { import sbtassembly.AssemblyUtils._ import sbtassembly.Plugin._ import AssemblyKeys._ val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.") lazy val settings = assemblySettings ++ Seq( test in assembly := {}, hadoopVersion := { sys.props.get("hadoop.version") .getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String]) }, jarName in assembly := { if (moduleName.value.contains("streaming-flume-assembly") || moduleName.value.contains("streaming-kafka-0-8-assembly") || moduleName.value.contains("streaming-kafka-0-10-assembly") || moduleName.value.contains("streaming-kinesis-asl-assembly")) { // This must match the same name used in maven (see external/kafka-0-8-assembly/pom.xml) s"${moduleName.value}-${version.value}.jar" } else { s"${moduleName.value}-${version.value}-hadoop${hadoopVersion.value}.jar" } }, jarName in (Test, assembly) := s"${moduleName.value}-test-${version.value}.jar", mergeStrategy in assembly := { case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard case m if m.toLowerCase.matches("meta-inf.*\\\\.sf$") => MergeStrategy.discard case "log4j.properties" => MergeStrategy.discard case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines case "reference.conf" => MergeStrategy.concat case _ => MergeStrategy.first } ) } object PySparkAssembly { import sbtassembly.Plugin._ import AssemblyKeys._ import java.util.zip.{ZipOutputStream, ZipEntry} lazy val settings = Seq( // Use a resource generator to copy all .py files from python/pyspark into a managed directory // to be included in the assembly. We can't just add "python/" to the assembly's resource dir // list since that will copy unneeded / unwanted files. resourceGenerators in Compile += Def.macroValueI(resourceManaged in Compile map { outDir: File => val src = new File(BuildCommons.sparkHome, "python/pyspark") val zipFile = new File(BuildCommons.sparkHome , "python/lib/pyspark.zip") zipFile.delete() zipRecursive(src, zipFile) Seq[File]() }).value ) private def zipRecursive(source: File, destZipFile: File) = { val destOutput = new ZipOutputStream(new FileOutputStream(destZipFile)) addFilesToZipStream("", source, destOutput) destOutput.flush() destOutput.close() } private def addFilesToZipStream(parent: String, source: File, output: ZipOutputStream): Unit = { if (source.isDirectory()) { output.putNextEntry(new ZipEntry(parent + source.getName())) for (file <- source.listFiles()) { addFilesToZipStream(parent + source.getName() + File.separator, file, output) } } else { val in = new FileInputStream(source) output.putNextEntry(new ZipEntry(parent + source.getName())) val buf = new Array[Byte](8192) var n = 0 while (n != -1) { n = in.read(buf) if (n != -1) { output.write(buf, 0, n) } } output.closeEntry() in.close() } } } object Unidoc { import BuildCommons._ import sbtunidoc.Plugin._ import UnidocKeys._ private def ignoreUndocumentedPackages(packages: Seq[Seq[File]]): Seq[Seq[File]] = { packages .map(_.filterNot(_.getName.contains("$"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/deploy"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/examples"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/memory"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/network"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/shuffle"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/executor"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/unsafe"))) .map(_.filterNot(_.getCanonicalPath.contains("python"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/collection"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive/test"))) } private def ignoreClasspaths(classpaths: Seq[Classpath]): Seq[Classpath] = { classpaths .map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka-clients-0\\.10.*"""))) .map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka_2\\..*-0\\.10.*"""))) } val unidocSourceBase = settingKey[String]("Base URL of source links in Scaladoc.") lazy val settings = scalaJavaUnidocSettings ++ Seq ( publish := {}, unidocProjectFilter in(ScalaUnidoc, unidoc) := inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags, streamingKafka010, sqlKafka010), unidocProjectFilter in(JavaUnidoc, unidoc) := inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags, streamingKafka010, sqlKafka010), unidocAllClasspaths in (ScalaUnidoc, unidoc) := { ignoreClasspaths((unidocAllClasspaths in (ScalaUnidoc, unidoc)).value) }, unidocAllClasspaths in (JavaUnidoc, unidoc) := { ignoreClasspaths((unidocAllClasspaths in (JavaUnidoc, unidoc)).value) }, // Skip actual catalyst, but include the subproject. // Catalyst is not public API and contains quasiquotes which break scaladoc. unidocAllSources in (ScalaUnidoc, unidoc) := { ignoreUndocumentedPackages((unidocAllSources in (ScalaUnidoc, unidoc)).value) }, // Skip class names containing $ and some internal packages in Javadocs unidocAllSources in (JavaUnidoc, unidoc) := { ignoreUndocumentedPackages((unidocAllSources in (JavaUnidoc, unidoc)).value) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/hadoop"))) }, javacOptions in (JavaUnidoc, unidoc) := Seq( "-windowtitle", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " JavaDoc", "-public", "-noqualifier", "java.lang", "-tag", """example:a:Example\\:""", "-tag", """note:a:Note\\:""", "-tag", "group:X", "-tag", "tparam:X", "-tag", "constructor:X", "-tag", "todo:X", "-tag", "groupname:X" ), // Use GitHub repository for Scaladoc source links unidocSourceBase := s"https://github.com/apache/spark/tree/v${version.value}", scalacOptions in (ScalaUnidoc, unidoc) ++= Seq( "-groups", // Group similar methods together based on the @group annotation. "-skip-packages", "org.apache.hadoop" ) ++ ( // Add links to sources when generating Scaladoc for a non-snapshot release if (!isSnapshot.value) { Opts.doc.sourceUrl(unidocSourceBase.value + "€{FILE_PATH}.scala") } else { Seq() } ) ) } object CopyDependencies { val copyDeps = TaskKey[Unit]("copyDeps", "Copies needed dependencies to the build directory.") val destPath = (crossTarget in Compile) { _ / "jars"} lazy val settings = Seq( copyDeps := { val dest = destPath.value if (!dest.isDirectory() && !dest.mkdirs()) { throw new IOException("Failed to create jars directory.") } (dependencyClasspath in Compile).value.map(_.data) .filter { jar => jar.isFile() } .foreach { jar => val destJar = new File(dest, jar.getName()) if (destJar.isFile()) { destJar.delete() } Files.copy(jar.toPath(), destJar.toPath()) } }, crossTarget in (Compile, packageBin) := destPath.value, packageBin in Compile := (packageBin in Compile).dependsOn(copyDeps).value ) } object TestSettings { import BuildCommons._ private val scalaBinaryVersion = if (System.getProperty("scala-2.10") == "true") { "2.10" } else { "2.11" } lazy val settings = Seq ( // Fork new JVMs for tests and set Java options for those fork := true, // Setting SPARK_DIST_CLASSPATH is a simple way to make sure any child processes // launched by the tests have access to the correct test-time classpath. envVars in Test ++= Map( "SPARK_DIST_CLASSPATH" -> (fullClasspath in Test).value.files.map(_.getAbsolutePath) .mkString(File.pathSeparator).stripSuffix(File.pathSeparator), "SPARK_PREPEND_CLASSES" -> "1", "SPARK_SCALA_VERSION" -> scalaBinaryVersion, "SPARK_TESTING" -> "1", "JAVA_HOME" -> sys.env.get("JAVA_HOME").getOrElse(sys.props("java.home"))), javaOptions in Test += s"-Djava.io.tmpdir=$testTempDir", javaOptions in Test += "-Dspark.test.home=" + sparkHome, javaOptions in Test += "-Dspark.testing=1", javaOptions in Test += "-Dspark.port.maxRetries=100", javaOptions in Test += "-Dspark.master.rest.enabled=false", javaOptions in Test += "-Dspark.memory.debugFill=true", javaOptions in Test += "-Dspark.ui.enabled=false", javaOptions in Test += "-Dspark.ui.showConsoleProgress=false", javaOptions in Test += "-Dspark.unsafe.exceptionOnMemoryLeak=true", javaOptions in Test += "-Dsun.io.serialization.extendedDebugInfo=false", javaOptions in Test += "-Dderby.system.durability=test", javaOptions in Test ++= System.getProperties.asScala.filter(_._1.startsWith("spark")) .map { case (k,v) => s"-D$k=$v" }.toSeq, javaOptions in Test += "-ea", javaOptions in Test ++= "-Xmx3g -Xss4096k" .split(" ").toSeq, javaOptions += "-Xmx3g", // Exclude tags defined in a system property testOptions in Test += Tests.Argument(TestFrameworks.ScalaTest, sys.props.get("test.exclude.tags").map { tags => tags.split(",").flatMap { tag => Seq("-l", tag) }.toSeq }.getOrElse(Nil): _*), testOptions in Test += Tests.Argument(TestFrameworks.JUnit, sys.props.get("test.exclude.tags").map { tags => Seq("--exclude-categories=" + tags) }.getOrElse(Nil): _*), // Show full stack trace and duration in test cases. testOptions in Test += Tests.Argument("-oDF"), testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"), // Enable Junit testing. libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % "test", // Only allow one test at a time, even across projects, since they run in the same JVM parallelExecution in Test := false, // Make sure the test temp directory exists. resourceGenerators in Test += Def.macroValueI(resourceManaged in Test map { outDir: File => var dir = new File(testTempDir) if (!dir.isDirectory()) { // Because File.mkdirs() can fail if multiple callers are trying to create the same // parent directory, this code tries to create parents one at a time, and avoids // failures when the directories have been created by somebody else. val stack = new Stack[File]() while (!dir.isDirectory()) { stack.push(dir) dir = dir.getParentFile() } while (stack.nonEmpty) { val d = stack.pop() require(d.mkdir() || d.isDirectory(), s"Failed to create directory $d") } } Seq[File]() }).value, concurrentRestrictions in Global += Tags.limit(Tags.Test, 1), // Remove certain packages from Scaladoc scalacOptions in (Compile, doc) := Seq( "-groups", "-skip-packages", Seq( "org.apache.spark.api.python", "org.apache.spark.network", "org.apache.spark.deploy", "org.apache.spark.util.collection" ).mkString(":"), "-doc-title", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " ScalaDoc" ) ) }
publicRoman/spark
project/SparkBuild.scala
Scala
apache-2.0
33,420
package cortex.app import java.io.File import scala.language.dynamics /** * Created by jasonflax on 2/14/16. */ class Config(file: Option[File]) extends Dynamic { if (file.isEmpty) { throw new Exception("Config file has not been set.") } /** * Declare partial function type to be added * to our map (allows for all generic methods). */ type GenFn = PartialFunction[Seq[Any], Array[Byte]] /** Dynamic method storage */ // create a dynamic method for each key value pair so that the // consumer can have easy access, e.g. config.username() protected lazy val fields: Map[String, String] = { scala.io.Source.fromFile(file.get).getLines().map { unsplitKvp => val splitKvp = unsplitKvp.split("=") updateDynamic(splitKvp(0))(splitKvp(1)) }.toMap.withDefault { key => throw new NoSuchFieldError(key) } } def selectDynamic(key: String) = fields(key) def updateDynamic(key: String)(value: String) = key -> value def applyDynamic(key: String)(args: (Any, Any)*): String = fields(key) def applyDynamicNamed(name: String)(args: (String, Any)*) = fields(name) }
jsflax/cortex
src/main/scala/cortex/app/Config.scala
Scala
mit
1,125
package at.forsyte.apalache.tla.bmcmt import at.forsyte.apalache.tla.bmcmt.types.{AnnotationParser, FinSetT, IntT, SeqT} import at.forsyte.apalache.tla.lir.{TlaEx, ValEx} import at.forsyte.apalache.tla.lir.convenience.tla import at.forsyte.apalache.tla.lir.values.{TlaIntSet, TlaNatSet} import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class TestSymbStateDecoder extends RewriterBase { test("decode bool") { val originalEx = tla.bool(true) val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(originalEx == decodedEx) // hard core comparison assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, originalEx))) } test("decode int") { val originalEx = tla.int(3) val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(originalEx == decodedEx) // hard core comparison assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, originalEx))) } test("decode str") { val originalEx = tla.str("hello") val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(originalEx == decodedEx) // hard core comparison assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, originalEx))) } test("decode Int set") { val originalEx = ValEx(TlaIntSet) val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(originalEx == decodedEx) } test("decode Nat set") { val originalEx = ValEx(TlaNatSet) val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(originalEx == decodedEx) } test("decode set") { val originalEx = tla.enumSet(tla.int(2), tla.int(1), tla.int(2)) val simpleOriginalEx = tla.enumSet(tla.int(1), tla.int(2)) val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(simpleOriginalEx == decodedEx) // hard core comparison assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, simpleOriginalEx))) } test("decode fun set") { val domEx = tla.enumSet(tla.int(1), tla.int(2)) val cdmEx = tla.enumSet(tla.int(3), tla.int(4)) val originalEx = tla.funSet(domEx, cdmEx) val state = new SymbState(originalEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(originalEx == decodedEx) // hard core comparison assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, originalEx))) } test("decode SUBSET S") { val set = tla.enumSet(tla.int(1), tla.int(2)) val powset = tla.powSet(set) val state = new SymbState(powset, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(powset == decodedEx) } test("decode fun") { val domEx = tla.enumSet(tla.int(1), tla.int(2)) val funEx = tla.funDef(tla.plus(tla.name("x"), tla.int(1)), tla.name("x"), domEx) val state = new SymbState(funEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) val expectedOutcome = tla.atat(tla.int(1), tla.int(2), tla.int(2), tla.int(3)) assert(expectedOutcome == decodedEx) // we cannot directly compare the outcome, as it comes in the same form as a record // assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, funEx))) } test("decode statically empty fun") { val domEx = tla.withType(tla.enumSet(), AnnotationParser.toTla(FinSetT(IntT()))) val funEx = tla.funDef(tla.plus(tla.name("x"), tla.int(1)), tla.name("x"), domEx) val state = new SymbState(funEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) // this is the standard outcome for an empty-domain function: {x \\in {} |-> {}} val expectedOutcome = tla.atat() assert(expectedOutcome == decodedEx) // we cannot directly compare the outcome, as it comes in the same form as a record // assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, funEx))) } test("decode dynamically empty fun") { // this domain is not empty at the arena level, but it is in every SMT model def dynEmpty(left: TlaEx): TlaEx = { tla.filter(tla.name("t"), left, tla.bool(false)) } val domEx = dynEmpty(tla.enumSet(tla.int(1))) val funEx = tla.funDef(tla.plus(tla.name("x"), tla.int(1)), tla.name("x"), domEx) val state = new SymbState(funEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) // this is the standard outcome for an empty-domain function: {x \\in {} |-> {}} val expectedOutcome = tla.atat() assert(expectedOutcome == decodedEx) // we cannot directly compare the outcome, as it comes in the same form as a record // assertTlaExAndRestore(rewriter, nextState.setRex(tla.eql(decodedEx, funEx))) } test("decode sequence") { val seqEx = tla.withType(tla.tuple(tla.int(1), tla.int(2), tla.int(3), tla.int(4)), AnnotationParser.toTla(SeqT(IntT()))) val subseqEx = tla.subseq(seqEx, tla.int(2), tla.int(3)) val state = new SymbState(subseqEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(tla.tuple(tla.int(2), tla.int(3)) == decodedEx) } test("decode tuple") { val tupleEx = tla.tuple(tla.int(1), tla.int(2), tla.int(3)) val state = new SymbState(tupleEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(tupleEx == decodedEx) } test("decode record") { val recEx = tla.enumFun(tla.str("a"), tla.int(1), tla.str("b"), tla.bool(true)) val state = new SymbState(recEx, arena, Binding()) val rewriter = create() val nextState = rewriter.rewriteUntilDone(state) assert(solverContext.sat()) val cell = nextState.asCell val decoder = new SymbStateDecoder(solverContext, rewriter) val decodedEx = decoder.decodeCellToTlaEx(nextState.arena, cell) assert(recEx == decodedEx) } }
konnov/apalache
tla-bmcmt/src/test/scala/at/forsyte/apalache/tla/bmcmt/TestSymbStateDecoder.scala
Scala
apache-2.0
9,105
/* * product-collections is distributed under the 2-Clause BSD license. See the * LICENSE file in the root of the repository. * * Copyright (c) 2013 - 2014 Mark Lister */ import utest._ import com.github.marklister.collections.immutable._ object CollSeqSuite extends TestSuite{ val testData = CollSeq(("Jan", 10, 22.44, 11.7), ("Feb", 33, 55.77, 23.4), ("Mar", 23, 56.77, 23.4)) val tests = TestSuite{ 'Test1 { 'Size3 { assert(testData.size == 3) } 'Col1 { assert(testData._1 == Seq("Jan", "Feb", "Mar")) } 'Col3 { assert(testData._3 == Seq(22.44, 55.77, 56.77)) } 'Map2 { assert(testData.map(_._2) == Seq(10, 33, 23)) } 'Sum { assert(testData.map(_._2).sum == 66) } 'SumCol { assert(testData._2.sum == 66) } } 'InstanceOf { val res = testData match { case _: CollSeq4[String, Int, Double, Double] => true // case _:Any => false //this line should be unreachable and the compiler should know that } assert(res) } 'MapToCollSeq { val res = testData.map(i => (i._1, i._2, i._3, i._4)) match { case _: CollSeq4[String, Int, Double, Double] => true // case _:Any => false //this line should be unreachable and the compiler should know that } assert(res) } 'ComplexTypeMap { val res = testData.map(i => (i._1 + "Stuff", i._2 + 1, i._3 - 1, i._4 * 2)) match { case _: CollSeq4[String, Int, Double, Double] => true // case _:Any => false //this line should be unreachable and the compiler should know that } assert(res) } 'Intermediate { val res = testData.map(i => (i._1, i._2, i._3.toFloat, i._4.toFloat)) match { case _: CollSeq4[String, Int, Float, Float] => true // case _:Any => false //this line should be unreachable and the compiler should know that } assert(res) } 'TypeChange { val res = testData.map(i => (i._4, i._3, i._2, i._1)) match { case _: CollSeq4[Double, Double, Int, String] => true // case _:Any => false //this line should be unreachable and the compiler should know that } assert(res) } 'FlatZip { val res = testData.flatZip(testData._1) match { case _: CollSeq5[String, Int, Double, Double, String] => true // case _:Any => false //this line should be unreachable and the compiler should know that } assert(res) } 'SimpleTypeConvertsToProduct { assert(CollSeq(1,2,3) == CollSeq1(Seq(Tuple1(1),Tuple1(2),Tuple1(3)))) } 'ProductWrappedInProduct1 { case class T(i:Int=1) val s= Seq(T,T,T) assert(CollSeq(T,T,T) == CollSeq1(Seq(Tuple1(T),Tuple1(T),Tuple1(T)))) } 'MixedProductNsDontCCompile{ compileError("CollSeq((1,2,3),(1,2))") } 'CanAddHeaders{ assert (CollSeq((1,2)).withHeaders("a","b").headers==Seq("a","b")) } 'CanRetrieveMap{ assert (CollSeq((1,2)).withHeaders("a","b").collMap("b")==Seq(2)) } /*This test is for a future version 'FlatZipOfCaseClass { case class T(i:Int=1) assert((CollSeq(T,T,T) flatZip CollSeq(T,T,T))== CollSeq2(Seq((T,T),(T,T),(T,T)))) }*/ } }
marklister/product-collections
shared/src/test/scala/CollSeqSpec.scala
Scala
bsd-2-clause
3,333
/* * Copyright 2017 Guy Van den Broeck <guyvdb@cs.ucla.edu> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.ucla.cs.starai.util import java.util.concurrent.Callable import scala.language.implicitConversions object Conversions { implicit def runnable(f: () => Unit): Runnable = new Runnable() { def run() = f() } implicit def callable[T](f: () => T): Callable[T] = new Callable[T]() { def call() = f() } }
UCLA-StarAI/ScalaDD
src/main/scala/edu/ucla/cs/starai/util/Conversions.scala
Scala
apache-2.0
945
package controllers import play.api.mvc.Controller import play.api.libs.json.Json import play.api.mvc.Action class Access extends Controller { def index = Action { implicit request => Ok(Json.obj( "links" -> Json.obj( "organisations" -> Json.obj( "description" -> "for returning a list of organisations based on organisational files", "url" -> routes.Organisations.index.url ), "activities" -> Json.obj( "description" -> "for returning a list of activities", "url" -> routes.Activities.index.url ), "transactions" -> Json.obj( "description" -> "for returning a list of transactions", "url" -> routes.Transactions.index.url ) ) )) } }
DFID/aid-platform-beta
src/platform/app/controllers/Access.scala
Scala
mit
770
// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.bfg.scala class ScalaSourceFileParser() { def returnOne(): Int = 1 }
bazelbuild/BUILD_file_generator
lang/scala/src/main/scala/com/google/devtools/build/bfg/scala/ScalaSourceFileParser.scala
Scala
apache-2.0
722
/* * Copyright 2001-2008 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest; class AssertionsSpec extends Spec { describe("The === method") { it("should be usable when the left expression results in null") { val npe = new NullPointerException assert(npe.getMessage === null) } } }
kevinwright/scalatest
src/test/scala/org/scalatest/AssertionsSpec.scala
Scala
apache-2.0
854
/* Copyright 2012 Anton Kraievoy akraievoy@gmail.com This file is part of Holonet. Holonet is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Holonet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Holonet. If not, see <http://www.gnu.org/licenses/>. */ package org.akraievoy.holonet.exp import data.{DhtSim, OverlayGA, OverlayEnum, DlaGenImages} trait RegistryData { lazy val experiments: Seq[Experiment] = Seq( DlaGenImages.experiment, OverlayEnum.experiment, OverlayGA.experiment1physDataset, OverlayGA.experiment2overlayDataset, OverlayGA.experiment3genetics, DhtSim.experiment1seeds, DhtSim.experiment2paramSpace, DhtSim.experiment3attack, DhtSim.experiment3attackChained, DhtSim.experiment3attackDestab, DhtSim.experiment3attackDestabChained, DhtSim.experiment3destab, DhtSim.experiment3destabChained, DhtSim.experiment3static, DhtSim.experiment3staticChained ) }
akraievoy/holonet
src/main/scala/org/akraievoy/holonet/exp/RegistryData.scala
Scala
gpl-3.0
1,376
/* * Copyright (c) 2011-2017 Interfaculty Department of Geoinformatics, University of * Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science) * in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand * Ministry of Business, Innovation and Employment (MBIE) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models.users import java.sql.Connection import java.time.ZonedDateTime import anorm.SqlParser._ import anorm._ import uk.gov.hmrc.emailaddress.EmailAddress import utils.ClassnameLogger /** * UserDAO requires implicit connections from calling entity */ object UserDAO extends ClassnameLogger { /** * Parse a User from a ResultSet * */ val userParser = { get[String]("email") ~ get[String]("accountsubject") ~ get[String]("firstname") ~ get[String]("lastname") ~ get[String]("password") ~ get[String]("laststatustoken") ~ get[ZonedDateTime]("laststatuschange") map { case email ~ accountsubject ~ firstname ~ lastname ~ password ~ laststatustoken ~ laststatuschange => User(EmailAddress(email), accountsubject, firstname, lastname, password, laststatustoken, laststatuschange) } } /** * Create a User with a NamedParameterSet (supposedly more typesafe?) * * new java.time. API only partially implemented in Anorm Type mapping * * @param user */ def createUser(user: User)(implicit connection: Connection): Option[User] = { val nps = Seq[NamedParameter](// Tuples as NamedParameter "email" -> user.email.value, "accountsubject" -> user.accountSubject, "firstname" -> user.firstname, "lastname" -> user.lastname, "laststatustoken" -> user.laststatustoken, "password" -> user.password, "laststatuschange" -> user.laststatuschange) val rowCount = SQL( s""" insert into $table_users values ( {email}, {accountsubject}, {firstname}, {lastname}, {password}, {laststatustoken}, {laststatuschange} ) """).on(nps: _*).executeUpdate() rowCount match { case 1 => Some(user) case _ => None } } /** * Update single parts of user without touch password * * @param user * @return */ def updateNoPass(user: User)(implicit connection: Connection): Option[User] = { val rowCount = SQL( s""" update $table_users set accountsubject = {accountsubject}, firstname = {firstname}, lastname = {lastname}, laststatustoken = {laststatustoken}, laststatuschange = {laststatuschange} where email = {email} """).on( 'accountsubject -> user.accountSubject, 'firstname -> user.firstname, 'lastname -> user.lastname, 'laststatustoken -> user.laststatustoken, 'laststatuschange -> user.laststatuschange, 'email -> user.email.value ).executeUpdate() rowCount match { case 1 => Some(user) case _ => None } } /** * Update password parts of user without other parts * * @param user * @return */ def updatePassword(user: User)(implicit connection: Connection): Option[User] = { val rowCount = SQL( s""" update $table_users set password = {password}, laststatustoken = {laststatustoken}, laststatuschange = {laststatuschange} where email = {email} """).on( 'password -> user.password, 'laststatustoken -> user.laststatustoken, 'laststatuschange -> user.laststatuschange, 'email -> user.email.value ).executeUpdate() rowCount match { case 1 => Some(user) case _ => None } } /** * delete a User * * @param email * @return */ def deleteUser(email: String)(implicit connection: Connection): Boolean = { val u = findUserByEmailAsString(email) if (u.isDefined) { deleteUser(u.get) } else { logger.error(s"user with email: $email wasn't found") false } } /** * delete a User * * @param user * @return */ def deleteUser(user: User)(implicit connection: Connection): Boolean = { val rowCount = SQL(s"delete from $table_users where accountsubject = {accountsubject}").on( 'accountsubject -> user.accountSubject ).executeUpdate() rowCount match { case 1 => true case _ => false } } /** * find User By Email * * @param emailString * @return */ def findUserByEmailAsString(emailString: String)(implicit connection: Connection): Option[User] = { if (EmailAddress.isValid(emailString)) { findUserByEmailAddress(EmailAddress(emailString)) } else { logger.error("not a valid email address") None } } /** * find User By Email * * @param emailAddress * @return */ def findUserByEmailAddress(emailAddress: EmailAddress)(implicit connection: Connection): Option[User] = { SQL(s"select * from $table_users where email = {email}").on( 'email -> emailAddress.value ).as(userParser.singleOpt) } /** * Retrieve a User via accountSubject * * @param accountSubject */ def findByAccountSubject(accountSubject: String)(implicit connection: Connection): Option[User] = { SQL(s"select * from $table_users where accountsubject = {accountsubject}").on( 'accountsubject -> accountSubject ).as(userParser.singleOpt) } /** * Retrieve all */ def getAllUsers(implicit connection: Connection): Seq[User] = { SQL(s"select * from $table_users").as(userParser *) } // more utility functions /** * find Users By their status token * * @param token * @return */ def findUsersByToken(token: StatusToken, statusInfo: String)(implicit connection: Connection): Seq[User] = { SQL(s"""select * from $table_users where laststatustoken like '$token$statusInfo'""").as(userParser *) } /** * find Users By their status token "REGISTERED" and their unique registration confirmation link id * * @param regLink * @return */ def findRegisteredUsersWithRegLink(regLink: String)(implicit connection: Connection): Seq[User] = { findUsersByToken(StatusToken.REGISTERED, s":$regLink") } /** * find Users By their status token "EMAILVALIDATION" and their unique registration confirmation link id * * @param regLink * @return */ def findEmailValidationRequiredUsersWithRegLink(regLink: String)(implicit connection: Connection): Seq[User] = { findUsersByToken(StatusToken.EMAILVALIDATION, s":$regLink") } /** * find Users By their status token "PASSWORDRESET" and their uniqu reset link id * * @param resetLink * @return */ def findUsersByPassResetLink(resetLink: String)(implicit connection: Connection): Seq[User] = { findUsersByToken(StatusToken.PASSWORDRESET, s":$resetLink") } /** * find Users By their status token that are only registered but have not yet activated their accounts * * @return */ def findRegisteredOnlyUsers(implicit connection: Connection): Seq[User] = { findUsersByToken(StatusToken.REGISTERED, "%") } /** * find active Users By their status token * * @return */ def findActiveUsers(implicit connection: Connection): Seq[User] = { StatusToken.activatedTokens.flatMap(t => findUsersByToken(StatusToken(t), "%")) } }
ZGIS/smart-portal-backend
app/models/users/UserDAO.scala
Scala
apache-2.0
8,045
package sprawler.actors import akka.testkit.{ TestProbe, ImplicitSender, TestKit } import akka.actor.{ ActorRef, PoisonPill, ActorSystem, Props } import akka.routing.{ SmallestMailboxRouter, DefaultResizer, Broadcast } import sprawler.DummyTestServer import sprawler.crawler.actor.{ LinkScraper, LinkQueueMaster, LinkScraperWorker } import sprawler.crawler.actor.WorkPullingPattern._ import sprawler.crawler.url.{ CrawlerUrl, AbsoluteUrl } import sprawler.SpecHelper import org.scalatest.{ WordSpecLike, ShouldMatchers, BeforeAndAfter, BeforeAndAfterAll } import scala.collection.mutable import scala.concurrent.{ Future, Await, ExecutionContext } import scala.concurrent.duration._ import scala.util.{ Failure, Success, Try } import scala.concurrent.ExecutionContext.Implicits.global import spray.http.{ HttpResponse, Uri } import spray.can.Http import sprawler.actors.LinkActorsSpec.UncrawlableLinkScraper class LinkActorsSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with WordSpecLike with BeforeAndAfter with BeforeAndAfterAll with ShouldMatchers { override def beforeAll() { DummyTestServer.startTestServer() } override def afterAll() { DummyTestServer.shutdownTestServer(system) } def this() = this(ActorSystem("CrawlerSystem")) "LinkQueueMaster" should { val crawlerUrl = AbsoluteUrl( uri = Uri(SpecHelper.testDomain+"/redirectOnce") ) "schedule initial link to be crawled to workers" in { LinkActorsSpec.setupMaster(self, crawlerUrl) { master => expectMsg(WorkAvailable) } } "send work to worker on GimmeWork msg" in { LinkActorsSpec.setupMaster(self, crawlerUrl) { master => expectMsg(WorkAvailable) master ! GimmeWork expectMsg(Work(crawlerUrl)) } } "should enqueue urls for more crawling" in { LinkActorsSpec.setupMaster(self, crawlerUrl) { master => expectMsg(WorkAvailable) master ! GimmeWork expectMsg(Work(crawlerUrl)) // Note: we're sending the same URL twice because URL duplication // isn't checked in the master actor, it's done in the worker. // This is making sure URLS that are sent to the master outside // of constructor / initialization get queued properly. master ! Work(crawlerUrl) expectMsg(WorkAvailable) master ! GimmeWork expectMsg(Work(crawlerUrl)) } } "should shutdown self and workers when no work is left" in { LinkActorsSpec.setupMaster(self, crawlerUrl) { master => expectMsg(WorkAvailable) master ! GimmeWork expectMsg(Work(crawlerUrl)) master ! WorkItemDone expectMsg(Broadcast(PoisonPill)) } } } "LinkScraperWorker" should { // Make these available in scope for the tests. // Before block initializes them to empty buffers. var urlsCrawled: mutable.ArrayBuffer[String] = null var urlsFailed: mutable.ArrayBuffer[String] = null "handle single redirect" in { val crawlerUrl = AbsoluteUrl( uri = Uri(SpecHelper.testDomain+"/redirectOnce") ) LinkActorsSpec.setupWorker(propArgs = Seq(self, crawlerUrl)) { workerRouter => expectMsg(GimmeWork) workerRouter ! Work(crawlerUrl) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/relativeUrl", redirects = Some(4)))) expectMsg(WorkItemDone) expectMsg(GimmeWork) } } "not schedule already crawled urls" in { val crawlerUrl = AbsoluteUrl( uri = Uri(SpecHelper.testDomain+"/redirectOnce") ) LinkActorsSpec.setupWorker(propArgs = Seq(self, crawlerUrl)) { workerRouter => expectMsg(GimmeWork) workerRouter ! Work(crawlerUrl) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/relativeUrl", redirects = Some(4)))) expectMsg(WorkItemDone) expectMsg(GimmeWork) workerRouter ! Work(crawlerUrl) expectMsg(WorkItemDone) expectMsg(GimmeWork) } } "handle infinite redirect (redirect limit reached)" in { val redirectCrawlerUrl = AbsoluteUrl( uri = Uri(SpecHelper.testDomain+"/redirectForever/1234"), redirectsLeft = Some(0) ) var uncrawlableLinks = mutable.ArrayBuffer[CrawlerUrl]() LinkActorsSpec.setupWorker( workerClass = Some(classOf[UncrawlableLinkScraper]), propArgs = Seq(self, redirectCrawlerUrl, uncrawlableLinks) ) { workerRouter => expectMsg(GimmeWork) workerRouter ! Work(redirectCrawlerUrl) // expectMsg checks messages in the order they are received, dequeueing // them one at a time each time expectMsg is called. // This will throw an exception if Work(...) is received instead of // WorkItemDone. expectMsg(WorkItemDone) expectMsg(GimmeWork) uncrawlableLinks shouldBe mutable.ArrayBuffer[CrawlerUrl](redirectCrawlerUrl) } } "send links to be crawled to master" in { val crawlerUrl = AbsoluteUrl( uri = Uri(SpecHelper.testDomain+"/"), depth = 10 ) LinkActorsSpec.setupWorker(propArgs = Seq(self, crawlerUrl)) { workerRouter => expectMsg(GimmeWork) workerRouter ! Work(crawlerUrl) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/relativeUrl"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/relativeUrlMissingSlash"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/relativeUrl"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/redirectForever"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/redirectOnce"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/fullUri"))) expectMsg(Work(crawlerUrl.nextUrl(SpecHelper.testDomain+"/nestedOnce"))) expectMsg(WorkItemDone) expectMsg(GimmeWork) } } } } object LinkActorsSpec { class UncrawlableLinkScraper( masterRef: ActorRef, url: CrawlerUrl, uncrawlableLinks: mutable.ArrayBuffer[CrawlerUrl] ) extends LinkScraperWorker(masterRef, url) { override def onUrlNotCrawlable(url: CrawlerUrl, error: Throwable) = Future { uncrawlableLinks += url } } val resizer = DefaultResizer(lowerBound = 1, upperBound = 10) val router = SmallestMailboxRouter(nrOfInstances = 1, resizer = Some(resizer)) def setupWorker[T, U <: LinkScraperWorker]( workerClass: Option[Class[U]] = None, propArgs: Seq[Any] )(f: ActorRef => T)(implicit system: ActorSystem): T = { val clazz = workerClass.getOrElse { classOf[LinkScraperWorker] } val props = Props(clazz, propArgs: _*) val workerRouter = system.actorOf(props.withRouter(router)) f(workerRouter) } def setupMaster[T](workerRef: ActorRef, url: CrawlerUrl)(f: ActorRef => T)(implicit system: ActorSystem): T = { val masterProps = Props(classOf[LinkQueueMaster], List(url)) val master = system.actorOf(masterProps) master ! RegisterWorkerRouter(workerRef) f(master) } }
daniel-trinh/sprawler
src/test/scala/sprawler/actors/LinkActorsSpec.scala
Scala
mit
7,284
import akka.actor.{Props, ActorSystem} import akka.testkit.{TestKit, ImplicitSender} import com.example.actors.{WorkStatusMessages, WorkCommandMessages, WorkDriver} import org.scalatest.{WordSpecLike, Matchers, BeforeAndAfterAll, FlatSpec} import org.scalatest.matchers.ShouldMatchers import scala.concurrent.duration._ class WorkDriverTest(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll { def this() = this(ActorSystem("WorkDriverTest")) override def afterAll { TestKit.shutdownActorSystem(system) } val worker = system.actorOf(Props[WorkDriver],"worker") import _system.dispatcher "An WorkDriver actor" must { "complete its simulated work in roughly the specified time" in { worker ! WorkCommandMessages.WorkSpec(1000) system.scheduler.scheduleOnce(500 millis, worker, WorkCommandMessages.ReportProgress()) worker ! WorkCommandMessages.ReportProgress() val t0 = System.currentTimeMillis() fishForMessage(1400 milliseconds,"Complete work in the specified time") { case WorkStatusMessages.WorkDone( elapsed ) => { info(s"completed work in a reported $elapsed milliseconds") val t1 = System.currentTimeMillis() info(s"measured as ${t1-t0} milliseconds") true } case WorkStatusMessages.WorkProgress(progress) => { val t1 = System.currentTimeMillis() info(s"status update : $progress % at a measured ${t1-t0} milliseconds") false } case _ => false } } "reset and work again when complete" in { val t0 = System.currentTimeMillis() worker ! WorkCommandMessages.WorkSpec(500) system.scheduler.scheduleOnce(100 millis, worker, WorkCommandMessages.ReportProgress()) system.scheduler.scheduleOnce(200 millis, worker, WorkCommandMessages.ReportProgress()) system.scheduler.scheduleOnce(300 millis, worker, WorkCommandMessages.ReportProgress()) system.scheduler.scheduleOnce(400 millis, worker, WorkCommandMessages.ReportProgress()) fishForMessage(1000 milliseconds,"Complete work in the specified time") { case WorkStatusMessages.WorkDone( elapsed ) => { info(s"completed work in a reported $elapsed milliseconds") val t1 = System.currentTimeMillis() info(s"measured as ${t1-t0} milliseconds") true } case WorkStatusMessages.WorkProgress(progress) => { val t1 = System.currentTimeMillis() info(s"status update : $progress % at a measured ${t1-t0} milliseconds") false } case _ => false } } } }
johanprinsloo/akka-message-pattern-explore
src/test/scala/WorkDriverTest.scala
Scala
mit
2,717
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn import com.intel.analytics.bigdl.nn.abstractnn.TensorModule import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import scala.reflect.ClassTag /** * Each feature map of a given input is padded with specified number of zeros. * If padding values are negative, then input is cropped. * @param padLeft pad left position * @param padRight pad right position * @param padTop pad top position * @param padBottom pad bottom position */ @SerialVersionUID(- 5144173515559923276L) class SpatialZeroPadding[T: ClassTag]( padLeft: Int, padRight: Int, padTop: Int, padBottom: Int)( implicit ev: TensorNumeric[T]) extends TensorModule[T] { def this(padLeft: Int)(implicit ev: TensorNumeric[T]) = this(padLeft, padLeft, padLeft, padLeft) override def updateOutput(input: Tensor[T]): Tensor[T] = { if (input.dim() == 3) { // sizes val h = input.size(2) + this.padTop + this.padBottom val w = input.size(3) + this.padLeft + this.padRight if (w < 1 || h < 1) { throw new IllegalArgumentException("input is too small") } this.output.resize(Array(input.size(1), h, w)) this.output.zero() // crop input if necessary var cInput = input if (this.padTop < 0) cInput = cInput.narrow(2, 1 - this.padTop, cInput.size(2) + this.padTop) if (this.padBottom < 0) cInput = cInput.narrow(2, 1, cInput.size(2) + this.padBottom) if (this.padLeft < 0) cInput = cInput.narrow(3, 1 - this.padLeft, cInput.size(3) + this.padLeft) if (this.padRight < 0) cInput = cInput.narrow(3, 1, cInput.size(3) + this.padRight) // crop output if necessary var cOutput = output if (this.padTop > 0) cOutput = cOutput.narrow(2, 1 + this.padTop, cOutput.size(2) - this.padTop) if (this.padBottom > 0) cOutput = cOutput.narrow(2, 1, cOutput.size(2) - this.padBottom) if (this.padLeft > 0) cOutput = cOutput.narrow(3, 1 + this.padLeft, cOutput.size(3) - this.padLeft) if (this.padRight > 0) cOutput = cOutput.narrow(3, 1, cOutput.size(3) - this.padRight) cOutput.copy(cInput) } else if (input.dim() == 4) { // sizes val h = input.size(3) + this.padTop + this.padBottom val w = input.size(4) + this.padLeft + this.padRight if (w < 1 || h < 1) { throw new IllegalArgumentException("input is too small") } this.output.resize(Array(input.size(1), input.size(2), h, w)) this.output.zero() // crop input if necessary var cInput = input if (this.padTop < 0) cInput = cInput.narrow(3, 1 - this.padTop, cInput.size(3) + this.padTop) if (this.padBottom < 0) cInput = cInput.narrow(3, 1, cInput.size(3) + this.padBottom) if (this.padLeft < 0) cInput = cInput.narrow(4, 1 - this.padLeft, cInput.size(4) + this.padLeft) if (this.padRight < 0) cInput = cInput.narrow(4, 1, cInput.size(4) + this.padRight) // crop output if necessary var cOutput = output if (this.padTop > 0) cOutput = cOutput.narrow(3, 1 + this.padTop, cOutput.size(3) - this.padTop) if (this.padBottom > 0) cOutput = cOutput.narrow(3, 1, cOutput.size(3) - this.padBottom) if (this.padLeft > 0) cOutput = cOutput.narrow(4, 1 + this.padLeft, cOutput.size(4) - this.padLeft) if (this.padRight > 0) cOutput = cOutput.narrow(4, 1, cOutput.size(4) - this.padRight) cOutput.copy(cInput) } else { throw new IllegalArgumentException("input must be 3 or 4-dimensional") } this.output } override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = { if (input.dim() == 3) { this.gradInput.resizeAs(input).zero() // crop gradInput if necessary var cgInput = gradInput if (this.padTop < 0) cgInput = cgInput.narrow(2, 1 - this.padTop, cgInput.size(2) + this.padTop) if (this.padBottom < 0) cgInput = cgInput.narrow(2, 1, cgInput.size(2) + this.padBottom) if (this.padLeft < 0) cgInput = cgInput.narrow(3, 1 - this.padLeft, cgInput.size(3) + this.padLeft) if (this.padRight < 0) cgInput = cgInput.narrow(3, 1, cgInput.size(3) + this.padRight) // crop output if necessary var cgOutput = gradOutput if (this.padTop > 0) cgOutput = cgOutput.narrow(2, 1 + this.padTop, cgOutput.size(2) - this.padTop) if (this.padBottom > 0) cgOutput = cgOutput.narrow(2, 1, cgOutput.size(2) - this.padBottom) if (this.padLeft > 0) cgOutput = cgOutput.narrow(3, 1 + this.padLeft, cgOutput.size(3) - this.padLeft) if (this.padRight > 0) cgOutput = cgOutput.narrow(3, 1, cgOutput.size(3) - this.padRight) cgInput.copy(cgOutput) } else if (input.dim() == 4) { this.gradInput.resizeAs(input).zero() // crop gradInput if necessary var cgInput = gradInput if (this.padTop < 0) cgInput = cgInput.narrow(3, 1 - this.padTop, cgInput.size(3) + this.padTop) if (this.padBottom < 0) cgInput = cgInput.narrow(3, 1, cgInput.size(3) + this.padBottom) if (this.padLeft < 0) cgInput = cgInput.narrow(4, 1 - this.padLeft, cgInput.size(4) + this.padLeft) if (this.padRight < 0) cgInput = cgInput.narrow(4, 1, cgInput.size(4) + this.padRight) // crop output if necessary var cgOutput = gradOutput if (this.padTop > 0) cgOutput = cgOutput.narrow(3, 1 + this.padTop, cgOutput.size(3) - this.padTop) if (this.padBottom > 0) cgOutput = cgOutput.narrow(3, 1, cgOutput.size(3) - this.padBottom) if (this.padLeft > 0) cgOutput = cgOutput.narrow(4, 1 + this.padLeft, cgOutput.size(4) - this.padLeft) if (this.padRight > 0) cgOutput = cgOutput.narrow(4, 1, cgOutput.size(4) - this.padRight) cgInput.copy(cgOutput) } else { throw new IllegalArgumentException("input must be 3 or 4-dimensional") } this.gradInput } override def toString(): String = { s"${getPrintName}(l=$padLeft, r=$padRight, t=$padTop, b=$padBottom)" } } object SpatialZeroPadding { def apply[@specialized(Float, Double) T: ClassTag]( padLeft: Int, padRight: Int, padTop: Int, padBottom: Int)(implicit ev: TensorNumeric[T]) : SpatialZeroPadding[T] = { new SpatialZeroPadding[T](padLeft, padRight, padTop, padBottom) } }
yiheng/BigDL
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/SpatialZeroPadding.scala
Scala
apache-2.0
7,153
package debop4s.data.orm.jpa.stateless import java.sql.Connection import javax.persistence.EntityManagerFactory import debop4s.core.Logging import org.aopalliance.intercept.MethodInvocation import org.hibernate.engine.transaction.spi.TransactionContext import org.hibernate.internal.SessionImpl import org.hibernate.jpa.HibernateEntityManagerFactory import org.hibernate.{Session, SessionFactory, StatelessSession} import org.springframework.aop.framework.ProxyFactory import org.springframework.beans.factory.FactoryBean import org.springframework.beans.factory.annotation.Autowired import org.springframework.orm.jpa.EntityManagerFactoryUtils import org.springframework.transaction.support.{TransactionSynchronizationAdapter, TransactionSynchronizationManager} import org.springframework.util.ReflectionUtils /** * Hibernate 의 `StatelessSession`을 JPA에서 사용할 수 있도록, `StatelessSession` 을 생성해주는 Factory Bean입니다. * * 참고 : https://gist.github.com/jelies/5181262 * * @author sunghyouk.bae@gmail.com 2014. 9. 7. */ class StatelessSessionFactoryBean @Autowired()(val emf: HibernateEntityManagerFactory) extends FactoryBean[StatelessSession] with Logging { var _sf: SessionFactory = emf.getSessionFactory def sessionFactory = _sf /** * `EntityManagerFactory`로부터 얻은 `SessionFactory`를 override 합니다. * NOTE: 단 connection 은 `EntityManager` 로 부터 얻는다. * * @param sf SessionFactory instance */ def sessionFactory_=(sf: SessionFactory) { _sf = sf } override def getObject: StatelessSession = { val statelessInterceptor = new StatelessSessionInterceptor(emf, _sf) ProxyFactory.getProxy(classOf[StatelessSession], statelessInterceptor) } override def getObjectType: Class[_] = classOf[StatelessSession] override def isSingleton: Boolean = true /** * Stateless Session 의 Proxy 에 대한 Interceptor 입니다. */ private class StatelessSessionInterceptor(val emf: EntityManagerFactory, val sf: SessionFactory) extends org.aopalliance.intercept.MethodInterceptor { override def invoke(invocation: MethodInvocation) = { val stateless = getCurrentStateless ReflectionUtils.invokeMethod(invocation.getMethod, stateless, invocation.getArguments: _*) } private def getCurrentStateless: StatelessSession = { if (!TransactionSynchronizationManager.isActualTransactionActive) { throw new IllegalStateException("현 스레드에 활성화된 트랜잭션이 없습니다.") } var stateless = TransactionSynchronizationManager.getResource(sf).asInstanceOf[StatelessSession] if (stateless == null) { trace("현 스레드에 새로운 Stateless Session을 생성합니다.") stateless = newStatelessSession bindWithTransaction(stateless) } stateless } private def newStatelessSession: StatelessSession = { val conn = obtainPhysicalConnection sf.openStatelessSession(conn) } /** * 실제 Connection 을 얻는 게 중요하다. 이렇게 안하면 Proxy 를 이중으로 수행하여, 실제 Connection이 닫히지 않을 수 있다. */ def obtainPhysicalConnection: Connection = { debug("(Proxy가 아닌) Real Connection을 얻습니다...") val em = EntityManagerFactoryUtils.getTransactionalEntityManager(emf) val session = em.unwrap(classOf[Session]).asInstanceOf[SessionImpl] session .getTransactionCoordinator .getJdbcCoordinator .getLogicalConnection .getConnection } def bindWithTransaction(stateless: StatelessSession): Unit = { trace("bind with transaction.") TransactionSynchronizationManager .registerSynchronization(new StatelessSessionSynchronization(sf, stateless)) TransactionSynchronizationManager.bindResource(sf, stateless) } } /** * Stateless Session을 Transaction 에 동기화합니다. */ class StatelessSessionSynchronization(val sf: SessionFactory, val stateless: StatelessSession) extends TransactionSynchronizationAdapter { override def getOrder: Int = EntityManagerFactoryUtils.ENTITY_MANAGER_SYNCHRONIZATION_ORDER - 100 override def beforeCommit(readOnly: Boolean): Unit = { if (!readOnly) { stateless.asInstanceOf[TransactionContext].managedFlush() } } override def beforeCompletion(): Unit = { TransactionSynchronizationManager.unbindResource(sf) stateless.close() } } }
debop/debop4s
debop4s-data-orm/src/main/scala/debop4s/data/orm/jpa/stateless/StatelessSessionFactoryBean.scala
Scala
apache-2.0
4,615
package org.jetbrains.plugins.scala.codeInspection.functionExpressions import com.intellij.codeInspection.LocalInspectionTool import org.jetbrains.plugins.scala.codeInspection.ScalaLightInspectionFixtureTestAdapter class UnnecessaryPartialFunctionInspectionTest extends ScalaLightInspectionFixtureTestAdapter { override protected val classOfInspection: Class[_ <: LocalInspectionTool] = classOf[UnnecessaryPartialFunctionInspection] override protected val annotation: String = UnnecessaryPartialFunctionInspection.inspectionName val hint = UnnecessaryPartialFunctionQuickFix.hint def testInspectionCapturesSimpleExpression(): Unit = { val text = s"val f: Int => String = {${START}case$END x => x.toString}" val fixed = "val f: Int => String = (x => x.toString)" checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesSimpleExpressionWithBlankLines(): Unit = { val text = s"""val f: Int => String = { | ${START}case$END x => x.toString |}""".stripMargin val fixed = "val f: Int => String = (x => x.toString)" checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesMultiLineExpression(): Unit = { val text = s"""val f: Int => String = { | ${START}case$END x => | val value = x.toString | s"value of x is $$value" |}""".stripMargin val fixed = s"""val f: Int => String = { | x => | val value = x.toString | s"value of x is $$value" |}""".stripMargin checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesSingleCaseWithMultiLineBlock(): Unit = { val text = s"""val f: Int => String = { | ${START}case$END x => { | val value = x.toString | s"value of x is $$value" | } |}""".stripMargin val fixed = s"""val f: Int => String = { | x => { | val value = x.toString | s"value of x is $$value" | } |}""".stripMargin checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionDoesNotCaptureSimpleExpressionIfItsTypeIsPartialFunction(): Unit = { val text = s"val f: PartialFunction[Int, String] = {case x => x.toString}" checkTextHasNoErrors(text) } def testInspectionDoesNotCaptureSimpleExpressionIfItsTypeIsPartialFunctionAlias(): Unit = { val text = s"""type Baz = PartialFunction[Int, String] |val f: Baz = {case x => x.toString}""".stripMargin checkTextHasNoErrors(text) } def testInspectionCapturesSimpleExpressionWithTypeConstraint(): Unit = { val text = s"def f: Int => String = {${START}case$END x: Int => x.toString}" val fixed = "def f: Int => String = { x: Int => x.toString }" checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionDoesNotCaptureCaseWithTypeConstraintMoreRestrictiveThanExpectedInputType(): Unit = { val text = s"def f: Any => String = {case x: Int => x.toString}" checkTextHasNoErrors(text) } def testInspectionCapturesCaseWithTypeConstraintLessRestrictiveThanExpectedInputType(): Unit = { val text = s"def f: Int => String = {${START}case$END x: Any => x.toString}" val fixed = "def f: Int => String = { x: Any => x.toString }" checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesCaseWithTypeConstraintWithTypeParameters(): Unit = { val text = s"def f[T]: Option[T] => String = {${START}case$END x: Option[_] => x.toString}" val fixed = "def f[T]: Option[T] => String = { x: Option[_] => x.toString }" checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesSimpleExpressionWithWildcardCase(): Unit = { val text = s"""var f: Int => String = {${START}case$END _ => "foo"}""" val fixed = """var f: Int => String = (_ => "foo")""" checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionDoesNotCaptureConstantMatchingCase(): Unit = { val text = s"""def f: Int => String = {case 1 => "one"}""" checkTextHasNoErrors(text) } def testInspectionDoesNotCaptureCaseWithGuard(): Unit = { val text = s"""def f: Int => String = {case x if x % 2 == 0 => "one"}""" checkTextHasNoErrors(text) } def testInspectionDoesNotCapturePatternMatchingCase(): Unit = { val text = s"def f: Option[Int] => String = {case Some(x) => x.toString}" checkTextHasNoErrors(text) } def testInspectionDoesNotCaptureMultiCaseFunction(): Unit = { val text = """def f: Int => String = { | case 1 => "one" | case _ => "tilt" |}""".stripMargin checkTextHasNoErrors(text) } def testInspectionDoesNotCaptureCaseInMatchStatement(): Unit = { val text = """def f: Int => String = (x: Int) => x match { | case a => "one" |}""".stripMargin checkTextHasNoErrors(text) } def testInspectionCapturesMethodArgument(): Unit = { val text = s"""def foo(bar: Int => String) = bar(42) |foo{${START}case$END x => x.toString}""".stripMargin val fixed = """def foo(bar: Int => String) = bar(42) |foo(x => x.toString)""".stripMargin checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesMethodArgumentWithTypeConstraint(): Unit = { val text = s"""def foo(bar: Int => String) = bar(42) |foo{${START}case$END x: Any => x.toString}""".stripMargin val fixed = """def foo(bar: Int => String) = bar(42) |foo { x: Any => x.toString }""".stripMargin checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesArgumentInMethodWithMultipleAruments(): Unit = { val text = s"""def foo(input: Int, bar: Int => String, prefix: String) = prefix + bar(input) |foo(42, {${START}case$END x => x.toString}, "value: ")""".stripMargin val fixed = """def foo(input: Int, bar: Int => String, prefix: String) = prefix + bar(input) |foo(42, (x => x.toString), "value: ")""".stripMargin checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionCapturesArgumentWithTypeConstraintInMethodWithMultipleAruments(): Unit = { val text = s"""def foo(input: Int, bar: Int => String, prefix: String) = prefix + bar(input) |foo(42, {${START}case$END x: Any => x.toString}, "value: ")""".stripMargin val fixed = """def foo(input: Int, bar: Int => String, prefix: String) = prefix + bar(input) |foo(42, { x: Any => x.toString }, "value: ")""".stripMargin checkTextHasError(text) testFix(text, fixed, hint) } def testInspectionDoesNotCaptureMethodArgumentIfItsTypeIsPartialFunction(): Unit = { val text = s"""def foo(bar: PartialFunction[Int, String]) = bar(42) |foo{case x => x.toString}""".stripMargin checkTextHasNoErrors(text) } def testInspectionDoesNotCaptureMethodArgumentIfItsTypeIsPartialFunctionAlias(): Unit = { val text = s"""type Baz = PartialFunction[Int, String] |def foo(bar: Baz) = bar(42) |foo{case x => x.toString}""".stripMargin checkTextHasNoErrors(text) } }
whorbowicz/intellij-scala
test/org/jetbrains/plugins/scala/codeInspection/functionExpressions/UnnecessaryPartialFunctionInspectionTest.scala
Scala
apache-2.0
7,308
package com.weez.mercury.common import com.typesafe.config.Config /** * 管理客户端(peer)和会话(session)。 * == Overview == * peer: 客户端,通过peerId(String)识别一个客户端。 * HttpServer使用cookie做peerId,所以一个浏览器窗口是一个客户端。 * AkkaServer使用host:port做peerId,所以一个socket地址是一个客户端。 * session: 会话,用于跟踪一系列连续的业务操作。 * Web浏览器端采用单页面技术,通常一个页面只使用一个会话。 * * @param app see [[com.weez.mercury.common.ServiceManager]] * @param config 从config中读取选项 */ class SessionManager(app: ServiceManager, config: Config) { import java.util.concurrent.TimeUnit import scala.concurrent.duration._ private val timeout = config.getDuration("session-timeout", TimeUnit.NANOSECONDS) private val peers = new TTLMap[String, Peer](timeout) private val sessions = new TTLMap[String, Session](timeout) private val sidGen = new Util.SecureIdGenerator(12) private val ttlHandle = app.addTTLCleanEvent(_ => clean()) def clean(): Unit = { sessions.synchronized { val arr = sessions.clean() arr foreach (_.dispose()) peers.unlockAll(arr map (_.peer)) peers.clean() } } def ensurePeer(peerHint: Option[String] = None) = { sessions.synchronized { peerHint flatMap peers.values.get match { case Some(x) => x.activeTimestamp = System.nanoTime() x.id case None => val peer = new Peer(peerHint.getOrElse(sidGen.newId)) peers.values.put(peer.id, peer) if (app.devmode) { val session = new Session(peer.id, peer.id) session.login(0L, "dev", "dev") peer.sessions = session :: peer.sessions } peer.id } } } def createSession(peerId: String) = { sessions.synchronized { val peer = peers.values.getOrElse(peerId, ErrorCode.InvalidPeerID.raise) val sid = sidGen.newId val session = new Session(sid, peerId) sessions.values.put(sid, session) peer.sessions = session :: peer.sessions session } } @inline final def getAndLockSession(sid: String) = sessions.lock(sid) @inline final def returnAndUnlockSession(session: Session) = sessions.unlock(session) @inline final def unlockSession(sid: String) = sessions.unlock(sid) def getSessionsByPeer(peerId: String): List[Session] = { sessions.synchronized { peers.values.get(peerId) match { case Some(x) => x.sessions case None => Nil } } } def close() = { sessions.synchronized { ttlHandle.close() sessions.values.clear() peers.values.clear() } } class Peer(val id: String) extends TTLBased[String] { var sessions: List[Session] = Nil } } class Session(val id: String, val peer: String) extends TTLBased[String] { import scala.collection.mutable private var _loginState: Option[LoginState] = None private val map = mutable.Map[String, Any]() private val hooks = mutable.Set[Session => Unit]() @volatile private var disposed = false private lazy val tempUploads = { val set = mutable.Set[FileIO.Path]() addCleanUpHook { session => import scala.util.control.NonFatal this.synchronized { set foreach { p => try p.deleteIfExists() catch { case NonFatal(_) => } } } } set } @inline private def check() = require(!disposed) def loginState = { check() _loginState } def login(userId: Long, username: String, name: String): Unit = { check() _loginState = Some(LoginState(userId, username, name)) } def logout(): Unit = { check() _loginState = None } def status = { check() map } def tempUploadFiles = { check() tempUploads } def dispose() = { if (!disposed) { hooks.synchronized { hooks foreach (_(this)) hooks.clear() } map.synchronized { map.clear() } disposed = true } } def addCleanUpHook(f: Session => Unit) = { check() hooks.add(f) } def removeCleanUpHook(f: Session => Unit) = { check() hooks.remove(f) } } case class LoginState(userId: Long, username: String, name: String)
weeztech/weez-mercury
main/src/main/scala/com/weez/mercury/common/SessionManager.scala
Scala
apache-2.0
4,376
package com.twitter.finatra.http.response import com.twitter.finagle.httpx.{Response, Status, Version} object SimpleResponse { def apply(status: Status, body: String = ""): Response = { val response = Response(Version.Http11, status) response.setContentString(body) response } }
deanh/finatra
http/src/main/scala/com/twitter/finatra/http/response/SimpleResponse.scala
Scala
apache-2.0
297
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package play.filters.csp import com.typesafe.config.ConfigFactory import javax.inject.Inject import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.json.Json import play.api.mvc.AbstractController import play.api.mvc.ControllerComponents import play.api.test.FakeRequest import play.api.test.PlaySpecification import play.api.Application import play.api.Configuration import scala.reflect.ClassTag class ScalaCSPReportSpec extends PlaySpecification { sequential def toConfiguration(rawConfig: String) = { val typesafeConfig = ConfigFactory.parseString(rawConfig) Configuration(typesafeConfig) } private def inject[T: ClassTag](implicit app: Application) = app.injector.instanceOf[T] private def myAction(implicit app: Application) = inject[ScalaCSPReportSpec.MyAction] def withApplication[T]()(block: Application => T): T = { val app = new GuiceApplicationBuilder() .appRoutes(implicit app => { case _ => myAction.cspReport }) .build() running(app)(block(app)) } "Scala CSP report" should { "work with a chrome style csp-report" in withApplication() { implicit app => val chromeJson = Json.parse( """{ | "csp-report": { | "document-uri": "http://45.55.25.245:8123/csp?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion", | "referrer": "", | "violated-directive": "child-src https://45.55.25.245:8123/", | "effective-directive": "frame-src", | "original-policy": "default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=43.0&browser=chrome&os_version=Lion", | "blocked-uri": "http://google.com", | "status-code": 200 | } |} """.stripMargin ) val request = FakeRequest("POST", "/report-to").withJsonBody(chromeJson) val Some(result) = route(app, request) contentAsJson(result) must be_==(Json.obj("violation" -> "child-src https://45.55.25.245:8123/")) } "work with a firefox style csp-report" in withApplication() { implicit app => val firefoxJson = Json.parse( """{ |"csp-report": { | "blocked-uri": "data:image/gif;base64,R0lGODlhEAAQAMQAAORHHOVSKudfOulrSOp3WOyDZu6QdvCchPGolfO0o/XBs/fNwfjZ0frl3/zy7////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAkAABAALAAAAAAQABAAAAVVICSOZGlCQAosJ6mu7fiyZeKqNKToQGDsM8hBADgUXoGAiqhSvp5QAnQKGIgUhwFUYLCVDFCrKUE1lBavAViFIDlTImbKC5Gm2hB0SlBCBMQiB0UjIQA7", | "document-uri": "http://45.55.25.245:8123/csp?os=OS%20X&device=&browser_version=37.0&browser=firefox&os_version=Yosemite", | "original-policy": "default-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=37.0&browser=firefox&os_version=Yosemite", | "referrer": "", | "violated-directive": "img-src https://45.55.25.245:8123/" | } |} """.stripMargin ) val request = FakeRequest("POST", "/report-to").withJsonBody(firefoxJson) val Some(result) = route(app, request) contentAsJson(result) must be_==(Json.obj("violation" -> "img-src https://45.55.25.245:8123/")) } "work with a webkit style csp-report" in withApplication() { implicit app => val webkitJson = Json.parse( """{ |"csp-report": { | "document-uri": "http://45.55.25.245:8123/csp?os=OS%20X&device=&browser_version=23.0&browser=chrome&os_version=Lion", | "violated-directive": "default-src https://45.55.25.245:8123/", | "original-policy": "default-src https://45.55.25.245:8123/; child-src https://45.55.25.245:8123/; connect-src https://45.55.25.245:8123/; font-src https://45.55.25.245:8123/; img-src https://45.55.25.245:8123/; media-src https://45.55.25.245:8123/; object-src https://45.55.25.245:8123/; script-src https://45.55.25.245:8123/; style-src https://45.55.25.245:8123/; form-action https://45.55.25.245:8123/; frame-ancestors 'none'; plugin-types 'none'; report-uri http://45.55.25.245:8123/csp-report?os=OS%20X&device=&browser_version=23.0&browser=chrome&os_version=Lion", | "blocked-uri": "http://google.com" | } |} """.stripMargin ) val request = FakeRequest("POST", "/report-to").withJsonBody(webkitJson) val Some(result) = route(app, request) contentAsJson(result) must be_==(Json.obj("violation" -> "default-src https://45.55.25.245:8123/")) } "work with a old webkit style csp-report" in withApplication() { implicit app => val request = FakeRequest("POST", "/report-to").withFormUrlEncodedBody( "document-url" -> "http://45.55.25.245:8123/csp?os=OS%2520X&device=&browser_version=3.6&browser=firefox&os_version=Yosemite", "violated-directive" -> "object-src https://45.55.25.245:8123/" ) val Some(result) = route(app, request) contentAsJson(result) must be_==(Json.obj("violation" -> "object-src https://45.55.25.245:8123/")) } } } object ScalaCSPReportSpec { class MyAction @Inject() (cspReportAction: CSPReportActionBuilder, cc: ControllerComponents) extends AbstractController(cc) { def cspReport = cspReportAction { implicit request => val json = Json.toJson(Map("violation" -> request.body.violatedDirective)) Ok(json) } } }
benmccann/playframework
web/play-filters-helpers/src/test/scala/play/filters/csp/ScalaCSPReportSpec.scala
Scala
apache-2.0
6,327
import edu.uta.diql._ import org.apache.spark._ import org.apache.spark.rdd._ import org.apache.spark.sql._ case class Cclass ( K: Long, A:Double) object Test { def main ( args: Array[String] ) { val conf = new SparkConf() .setAppName("Test") .setMaster("local[2]") val sc = new SparkContext(conf) val spark = SparkSession .builder() .config(conf) .getOrCreate() import spark.implicits._ explain(true) val V = sc.textFile(args(0)).zipWithIndex.map{ case (line,i) => {val a = line.split(",") (i.toLong, Cclass(a(0).toLong, a(1).toDouble)) }}.toDS() V.createOrReplaceTempView("V") s(sc,""" var C: vector[Double] = vector(); for i = 0, 10 do { C[V[i].K] += V[i].A; }; println(C); """) } }
fegaras/DIQL
tests/sqlgen/spark/GroupBy.scala
Scala
apache-2.0
818
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.scheduler.rate import org.apache.spark.SparkConf import org.apache.spark.streaming.Duration /** * A component that estimates the rate at wich an InputDStream should ingest * elements, based on updates at every batch completion. */ private[streaming] trait RateEstimator extends Serializable { /** * Computes the number of elements the stream attached to this `RateEstimator` * should ingest per second, given an update on the size and completion * times of the latest batch. * * @param time The timetamp of the current batch interval that just finished * @param elements The number of elements that were processed in this batch * @param processingDelay The time in ms that took for the job to complete * @param schedulingDelay The time in ms that the job spent in the scheduling queue */ def compute( time: Long, elements: Long, processingDelay: Long, schedulingDelay: Long): Option[Double] } object RateEstimator { /** * Return a new RateEstimator based on the value of `spark.streaming.RateEstimator`. * * The only known estimator right now is `pid`. * * @return An instance of RateEstimator * @throws IllegalArgumentException if there is a configured RateEstimator that doesn't match any * known estimators. */ def create(conf: SparkConf, batchInterval: Duration): RateEstimator = conf.get("spark.streaming.backpressure.rateEstimator", "pid") match { case "pid" => val proportional = conf.getDouble("spark.streaming.backpressure.pid.proportional", 1.0) val integral = conf.getDouble("spark.streaming.backpressure.pid.integral", 0.2) val derived = conf.getDouble("spark.streaming.backpressure.pid.derived", 0.0) val minRate = conf.getDouble("spark.streaming.backpressure.pid.minRate", 100) new PIDRateEstimator(batchInterval.milliseconds, proportional, integral, derived, minRate) case estimator => throw new IllegalArgumentException(s"Unkown rate estimator: $estimator") } }
pronix/spark
streaming/src/main/scala/org/apache/spark/streaming/scheduler/rate/RateEstimator.scala
Scala
apache-2.0
2,883
package ru.org.codingteam.horta.plugins.pet.commands import akka.actor.ActorRef import ru.org.codingteam.horta.localization.Localization import ru.org.codingteam.horta.plugins.pet.{PtcUtils, PetData} import ru.org.codingteam.horta.security.Credential class CoinsCommand extends AbstractCommand { override def apply(pet: PetData, coins: ActorRef, credential: Credential, args: Array[String]): (PetData, String) = { val username = credential.name val ptc = PtcUtils.queryPTC(coins, username) (pet, Localization.localize("You have %dPTC.")(credential).format(ptc)) } }
codingteam/horta-hell
src/main/scala/ru/org/codingteam/horta/plugins/pet/commands/CoinsCommand.scala
Scala
mit
586
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import java.text.ParseException import java.time.{DateTimeException, LocalDate, LocalDateTime, ZoneId, ZoneOffset} import java.time.format.DateTimeParseException import java.util.Locale import org.apache.commons.text.StringEscapeUtils import org.apache.spark.SparkDateTimeException import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.{ExpressionBuilder, FunctionRegistry} import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.expressions.codegen.Block._ import org.apache.spark.sql.catalyst.trees.TreePattern._ import org.apache.spark.sql.catalyst.util.{DateTimeUtils, LegacyDateFormats, TimestampFormatter} import org.apache.spark.sql.catalyst.util.DateTimeConstants._ import org.apache.spark.sql.catalyst.util.DateTimeUtils._ import org.apache.spark.sql.catalyst.util.LegacyDateFormats.SIMPLE_DATE_FORMAT import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.sql.types.DayTimeIntervalType.DAY import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String} /** * Common base class for time zone aware expressions. */ trait TimeZoneAwareExpression extends Expression { /** The expression is only resolved when the time zone has been set. */ override lazy val resolved: Boolean = childrenResolved && checkInputDataTypes().isSuccess && timeZoneId.isDefined final override val nodePatterns: Seq[TreePattern] = Seq(TIME_ZONE_AWARE_EXPRESSION) ++ nodePatternsInternal // Subclasses can override this function to provide more TreePatterns. def nodePatternsInternal(): Seq[TreePattern] = Seq() /** the timezone ID to be used to evaluate value. */ def timeZoneId: Option[String] /** Returns a copy of this expression with the specified timeZoneId. */ def withTimeZone(timeZoneId: String): TimeZoneAwareExpression @transient lazy val zoneId: ZoneId = DateTimeUtils.getZoneId(timeZoneId.get) def zoneIdForType(dataType: DataType): ZoneId = dataType match { case _: TimestampNTZType => java.time.ZoneOffset.UTC case _ => zoneId } } trait TimestampFormatterHelper extends TimeZoneAwareExpression { protected def formatString: Expression protected def isParsing: Boolean // Whether the timestamp formatter is for TimestampNTZType. // If yes, the formatter is always `Iso8601TimestampFormatter`. protected def forTimestampNTZ: Boolean = false @transient final protected lazy val formatterOption: Option[TimestampFormatter] = if (formatString.foldable) { Option(formatString.eval()).map(fmt => getFormatter(fmt.toString)) } else None final protected def getFormatter(fmt: String): TimestampFormatter = { TimestampFormatter( format = fmt, zoneId = zoneId, legacyFormat = SIMPLE_DATE_FORMAT, isParsing = isParsing, forTimestampNTZ = forTimestampNTZ) } } @ExpressionDescription( usage = "_FUNC_() - Returns the current session local timezone.", examples = """ Examples: > SELECT _FUNC_(); Asia/Shanghai """, group = "datetime_funcs", since = "3.1.0") case class CurrentTimeZone() extends LeafExpression with Unevaluable { override def nullable: Boolean = false override def dataType: DataType = StringType override def prettyName: String = "current_timezone" final override val nodePatterns: Seq[TreePattern] = Seq(CURRENT_LIKE) } /** * Returns the current date at the start of query evaluation. * There is no code generation since this expression should get constant folded by the optimizer. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_() - Returns the current date at the start of query evaluation. All calls of current_date within the same query return the same value. _FUNC_ - Returns the current date at the start of query evaluation. """, examples = """ Examples: > SELECT _FUNC_(); 2020-04-25 > SELECT _FUNC_; 2020-04-25 """, note = """ The syntax without braces has been supported since 2.0.1. """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class CurrentDate(timeZoneId: Option[String] = None) extends LeafExpression with TimeZoneAwareExpression with CodegenFallback { def this() = this(None) override def foldable: Boolean = true override def nullable: Boolean = false override def dataType: DataType = DateType final override def nodePatternsInternal(): Seq[TreePattern] = Seq(CURRENT_LIKE) override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) override def eval(input: InternalRow): Any = currentDate(zoneId) override def prettyName: String = "current_date" } abstract class CurrentTimestampLike() extends LeafExpression with CodegenFallback { override def foldable: Boolean = true override def nullable: Boolean = false override def dataType: DataType = TimestampType override def eval(input: InternalRow): Any = currentTimestamp() final override val nodePatterns: Seq[TreePattern] = Seq(CURRENT_LIKE) } /** * Returns the current timestamp at the start of query evaluation. * There is no code generation since this expression should get constant folded by the optimizer. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_() - Returns the current timestamp at the start of query evaluation. All calls of current_timestamp within the same query return the same value. _FUNC_ - Returns the current timestamp at the start of query evaluation. """, examples = """ Examples: > SELECT _FUNC_(); 2020-04-25 15:49:11.914 > SELECT _FUNC_; 2020-04-25 15:49:11.914 """, note = """ The syntax without braces has been supported since 2.0.1. """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class CurrentTimestamp() extends CurrentTimestampLike { override def prettyName: String = "current_timestamp" } @ExpressionDescription( usage = "_FUNC_() - Returns the current timestamp at the start of query evaluation.", examples = """ Examples: > SELECT _FUNC_(); 2020-04-25 15:49:11.914 """, group = "datetime_funcs", since = "1.6.0") case class Now() extends CurrentTimestampLike { override def prettyName: String = "now" } /** * Returns the current timestamp without time zone at the start of query evaluation. * There is no code generation since this expression should get constant folded by the optimizer. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_() - Returns the current timestamp without time zone at the start of query evaluation. All calls of localtimestamp within the same query return the same value. _FUNC_ - Returns the current local date-time at the session time zone at the start of query evaluation. """, examples = """ Examples: > SELECT _FUNC_(); 2020-04-25 15:49:11.914 """, group = "datetime_funcs", since = "3.3.0") case class LocalTimestamp(timeZoneId: Option[String] = None) extends LeafExpression with TimeZoneAwareExpression with CodegenFallback { def this() = this(None) override def foldable: Boolean = true override def nullable: Boolean = false override def dataType: DataType = TimestampNTZType final override def nodePatternsInternal(): Seq[TreePattern] = Seq(CURRENT_LIKE) override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) override def eval(input: InternalRow): Any = localDateTimeToMicros(LocalDateTime.now(zoneId)) override def prettyName: String = "localtimestamp" } /** * Expression representing the current batch time, which is used by StreamExecution to * 1. prevent optimizer from pushing this expression below a stateful operator * 2. allow IncrementalExecution to substitute this expression with a Literal(timestamp) * * There is no code generation since this expression should be replaced with a literal. */ case class CurrentBatchTimestamp( timestampMs: Long, dataType: DataType, timeZoneId: Option[String] = None) extends LeafExpression with TimeZoneAwareExpression with Nondeterministic with CodegenFallback { def this(timestampMs: Long, dataType: DataType) = this(timestampMs, dataType, None) override def nullable: Boolean = false override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) final override def nodePatternsInternal(): Seq[TreePattern] = Seq(CURRENT_LIKE) override def prettyName: String = "current_batch_timestamp" override protected def initializeInternal(partitionIndex: Int): Unit = {} /** * Need to return literal value in order to support compile time expression evaluation * e.g., select(current_date()) */ override protected def evalInternal(input: InternalRow): Any = toLiteral.value def toLiteral: Literal = { val timestampUs = millisToMicros(timestampMs) dataType match { case _: TimestampType => Literal(timestampUs, TimestampType) case _: TimestampNTZType => Literal(convertTz(timestampUs, ZoneOffset.UTC, zoneId), TimestampNTZType) case _: DateType => Literal(microsToDays(timestampUs, zoneId), DateType) } } } /** * Adds a number of days to startdate. */ @ExpressionDescription( usage = "_FUNC_(start_date, num_days) - Returns the date that is `num_days` after `start_date`.", examples = """ Examples: > SELECT _FUNC_('2016-07-30', 1); 2016-07-31 """, group = "datetime_funcs", since = "1.5.0") case class DateAdd(startDate: Expression, days: Expression) extends BinaryExpression with ExpectsInputTypes with NullIntolerant { override def left: Expression = startDate override def right: Expression = days override def inputTypes: Seq[AbstractDataType] = Seq(DateType, TypeCollection(IntegerType, ShortType, ByteType)) override def dataType: DataType = DateType override def nullSafeEval(start: Any, d: Any): Any = { start.asInstanceOf[Int] + d.asInstanceOf[Number].intValue() } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { nullSafeCodeGen(ctx, ev, (sd, d) => { s"""${ev.value} = $sd + $d;""" }) } override def prettyName: String = "date_add" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): DateAdd = copy(startDate = newLeft, days = newRight) } /** * Subtracts a number of days to startdate. */ @ExpressionDescription( usage = "_FUNC_(start_date, num_days) - Returns the date that is `num_days` before `start_date`.", examples = """ Examples: > SELECT _FUNC_('2016-07-30', 1); 2016-07-29 """, group = "datetime_funcs", since = "1.5.0") case class DateSub(startDate: Expression, days: Expression) extends BinaryExpression with ExpectsInputTypes with NullIntolerant { override def left: Expression = startDate override def right: Expression = days override def inputTypes: Seq[AbstractDataType] = Seq(DateType, TypeCollection(IntegerType, ShortType, ByteType)) override def dataType: DataType = DateType override def nullSafeEval(start: Any, d: Any): Any = { start.asInstanceOf[Int] - d.asInstanceOf[Number].intValue() } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { nullSafeCodeGen(ctx, ev, (sd, d) => { s"""${ev.value} = $sd - $d;""" }) } override def prettyName: String = "date_sub" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): DateSub = copy(startDate = newLeft, days = newRight) } trait GetTimeField extends UnaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes with NullIntolerant { val func: (Long, ZoneId) => Any val funcName: String @transient protected lazy val zoneIdInEval: ZoneId = zoneIdForType(child.dataType) override def inputTypes: Seq[AbstractDataType] = Seq(AnyTimestampType) override def dataType: DataType = IntegerType override protected def nullSafeEval(timestamp: Any): Any = { func(timestamp.asInstanceOf[Long], zoneIdInEval) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val zid = ctx.addReferenceObj("zoneId", zoneIdInEval, classOf[ZoneId].getName) val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, c => s"$dtu.$funcName($c, $zid)") } } @ExpressionDescription( usage = "_FUNC_(timestamp) - Returns the hour component of the string/timestamp.", examples = """ Examples: > SELECT _FUNC_('2009-07-30 12:58:59'); 12 """, group = "datetime_funcs", since = "1.5.0") case class Hour(child: Expression, timeZoneId: Option[String] = None) extends GetTimeField { def this(child: Expression) = this(child, None) override def withTimeZone(timeZoneId: String): Hour = copy(timeZoneId = Option(timeZoneId)) override val func = DateTimeUtils.getHours override val funcName = "getHours" override protected def withNewChildInternal(newChild: Expression): Hour = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(timestamp) - Returns the minute component of the string/timestamp.", examples = """ Examples: > SELECT _FUNC_('2009-07-30 12:58:59'); 58 """, group = "datetime_funcs", since = "1.5.0") case class Minute(child: Expression, timeZoneId: Option[String] = None) extends GetTimeField { def this(child: Expression) = this(child, None) override def withTimeZone(timeZoneId: String): Minute = copy(timeZoneId = Option(timeZoneId)) override val func = DateTimeUtils.getMinutes override val funcName = "getMinutes" override protected def withNewChildInternal(newChild: Expression): Minute = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(timestamp) - Returns the second component of the string/timestamp.", examples = """ Examples: > SELECT _FUNC_('2009-07-30 12:58:59'); 59 """, group = "datetime_funcs", since = "1.5.0") case class Second(child: Expression, timeZoneId: Option[String] = None) extends GetTimeField { def this(child: Expression) = this(child, None) override def withTimeZone(timeZoneId: String): Second = copy(timeZoneId = Option(timeZoneId)) override val func = DateTimeUtils.getSeconds override val funcName = "getSeconds" override protected def withNewChildInternal(newChild: Expression): Second = copy(child = newChild) } case class SecondWithFraction(child: Expression, timeZoneId: Option[String] = None) extends GetTimeField { def this(child: Expression) = this(child, None) // 2 digits for seconds, and 6 digits for the fractional part with microsecond precision. override def dataType: DataType = DecimalType(8, 6) override def withTimeZone(timeZoneId: String): SecondWithFraction = copy(timeZoneId = Option(timeZoneId)) override val func = DateTimeUtils.getSecondsWithFraction override val funcName = "getSecondsWithFraction" override protected def withNewChildInternal(newChild: Expression): SecondWithFraction = copy(child = newChild) } trait GetDateField extends UnaryExpression with ImplicitCastInputTypes with NullIntolerant { val func: Int => Any val funcName: String override def inputTypes: Seq[AbstractDataType] = Seq(DateType) override def dataType: DataType = IntegerType override protected def nullSafeEval(date: Any): Any = { func(date.asInstanceOf[Int]) } override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, c => s"$dtu.$funcName($c)") } } @ExpressionDescription( usage = "_FUNC_(date) - Returns the day of year of the date/timestamp.", examples = """ Examples: > SELECT _FUNC_('2016-04-09'); 100 """, group = "datetime_funcs", since = "1.5.0") case class DayOfYear(child: Expression) extends GetDateField { override val func = DateTimeUtils.getDayInYear override val funcName = "getDayInYear" override protected def withNewChildInternal(newChild: Expression): DayOfYear = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(days) - Create date from the number of days since 1970-01-01.", examples = """ Examples: > SELECT _FUNC_(1); 1970-01-02 """, group = "datetime_funcs", since = "3.1.0") case class DateFromUnixDate(child: Expression) extends UnaryExpression with ImplicitCastInputTypes with NullIntolerant { override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType) override def dataType: DataType = DateType override def nullSafeEval(input: Any): Any = input.asInstanceOf[Int] override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = defineCodeGen(ctx, ev, c => c) override def prettyName: String = "date_from_unix_date" override protected def withNewChildInternal(newChild: Expression): DateFromUnixDate = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(date) - Returns the number of days since 1970-01-01.", examples = """ Examples: > SELECT _FUNC_(DATE("1970-01-02")); 1 """, group = "datetime_funcs", since = "3.1.0") case class UnixDate(child: Expression) extends UnaryExpression with ExpectsInputTypes with NullIntolerant { override def inputTypes: Seq[AbstractDataType] = Seq(DateType) override def dataType: DataType = IntegerType override def nullSafeEval(input: Any): Any = input.asInstanceOf[Int] override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = defineCodeGen(ctx, ev, c => c) override def prettyName: String = "unix_date" override protected def withNewChildInternal(newChild: Expression): UnixDate = copy(child = newChild) } abstract class IntegralToTimestampBase extends UnaryExpression with ExpectsInputTypes with NullIntolerant { protected def upScaleFactor: Long override def inputTypes: Seq[AbstractDataType] = Seq(IntegralType) override def dataType: DataType = TimestampType override def nullSafeEval(input: Any): Any = { Math.multiplyExact(input.asInstanceOf[Number].longValue(), upScaleFactor) } override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { if (upScaleFactor == 1) { defineCodeGen(ctx, ev, c => c) } else { defineCodeGen(ctx, ev, c => s"java.lang.Math.multiplyExact($c, ${upScaleFactor}L)") } } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(seconds) - Creates timestamp from the number of seconds (can be fractional) since UTC epoch.", examples = """ Examples: > SELECT _FUNC_(1230219000); 2008-12-25 07:30:00 > SELECT _FUNC_(1230219000.123); 2008-12-25 07:30:00.123 """, group = "datetime_funcs", since = "3.1.0") // scalastyle:on line.size.limit case class SecondsToTimestamp(child: Expression) extends UnaryExpression with ExpectsInputTypes with NullIntolerant { override def inputTypes: Seq[AbstractDataType] = Seq(NumericType) override def dataType: DataType = TimestampType override def nullable: Boolean = child.dataType match { case _: FloatType | _: DoubleType => true case _ => child.nullable } @transient private lazy val evalFunc: Any => Any = child.dataType match { case _: IntegralType => input => Math.multiplyExact(input.asInstanceOf[Number].longValue(), MICROS_PER_SECOND) case _: DecimalType => input => val operand = new java.math.BigDecimal(MICROS_PER_SECOND) input.asInstanceOf[Decimal].toJavaBigDecimal.multiply(operand).longValueExact() case _: FloatType => input => val f = input.asInstanceOf[Float] if (f.isNaN || f.isInfinite) null else (f.toDouble * MICROS_PER_SECOND).toLong case _: DoubleType => input => val d = input.asInstanceOf[Double] if (d.isNaN || d.isInfinite) null else (d * MICROS_PER_SECOND).toLong } override def nullSafeEval(input: Any): Any = evalFunc(input) override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = child.dataType match { case _: IntegralType => defineCodeGen(ctx, ev, c => s"java.lang.Math.multiplyExact($c, ${MICROS_PER_SECOND}L)") case _: DecimalType => val operand = s"new java.math.BigDecimal($MICROS_PER_SECOND)" defineCodeGen(ctx, ev, c => s"$c.toJavaBigDecimal().multiply($operand).longValueExact()") case other => val castToDouble = if (other.isInstanceOf[FloatType]) "(double)" else "" nullSafeCodeGen(ctx, ev, c => { val typeStr = CodeGenerator.boxedType(other) s""" |if ($typeStr.isNaN($c) || $typeStr.isInfinite($c)) { | ${ev.isNull} = true; |} else { | ${ev.value} = (long)($castToDouble$c * $MICROS_PER_SECOND); |} |""".stripMargin }) } override def prettyName: String = "timestamp_seconds" override protected def withNewChildInternal(newChild: Expression): SecondsToTimestamp = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(milliseconds) - Creates timestamp from the number of milliseconds since UTC epoch.", examples = """ Examples: > SELECT _FUNC_(1230219000123); 2008-12-25 07:30:00.123 """, group = "datetime_funcs", since = "3.1.0") // scalastyle:on line.size.limit case class MillisToTimestamp(child: Expression) extends IntegralToTimestampBase { override def upScaleFactor: Long = MICROS_PER_MILLIS override def prettyName: String = "timestamp_millis" override protected def withNewChildInternal(newChild: Expression): MillisToTimestamp = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(microseconds) - Creates timestamp from the number of microseconds since UTC epoch.", examples = """ Examples: > SELECT _FUNC_(1230219000123123); 2008-12-25 07:30:00.123123 """, group = "datetime_funcs", since = "3.1.0") // scalastyle:on line.size.limit case class MicrosToTimestamp(child: Expression) extends IntegralToTimestampBase { override def upScaleFactor: Long = 1L override def prettyName: String = "timestamp_micros" override protected def withNewChildInternal(newChild: Expression): MicrosToTimestamp = copy(child = newChild) } abstract class TimestampToLongBase extends UnaryExpression with ExpectsInputTypes with NullIntolerant { protected def scaleFactor: Long override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType) override def dataType: DataType = LongType override def nullSafeEval(input: Any): Any = { Math.floorDiv(input.asInstanceOf[Number].longValue(), scaleFactor) } override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { if (scaleFactor == 1) { defineCodeGen(ctx, ev, c => c) } else { defineCodeGen(ctx, ev, c => s"java.lang.Math.floorDiv($c, ${scaleFactor}L)") } } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timestamp) - Returns the number of seconds since 1970-01-01 00:00:00 UTC. Truncates higher levels of precision.", examples = """ Examples: > SELECT _FUNC_(TIMESTAMP('1970-01-01 00:00:01Z')); 1 """, group = "datetime_funcs", since = "3.1.0") // scalastyle:on line.size.limit case class UnixSeconds(child: Expression) extends TimestampToLongBase { override def scaleFactor: Long = MICROS_PER_SECOND override def prettyName: String = "unix_seconds" override protected def withNewChildInternal(newChild: Expression): UnixSeconds = copy(child = newChild) } // Internal expression used to get the raw UTC timestamp in pandas API on Spark. // This is to work around casting timestamp_ntz to long disallowed by ANSI. case class CastTimestampNTZToLong(child: Expression) extends TimestampToLongBase { override def inputTypes: Seq[AbstractDataType] = Seq(TimestampNTZType) override def scaleFactor: Long = MICROS_PER_SECOND override def prettyName: String = "cast_timestamp_ntz_to_long" override protected def withNewChildInternal(newChild: Expression): CastTimestampNTZToLong = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timestamp) - Returns the number of milliseconds since 1970-01-01 00:00:00 UTC. Truncates higher levels of precision.", examples = """ Examples: > SELECT _FUNC_(TIMESTAMP('1970-01-01 00:00:01Z')); 1000 """, group = "datetime_funcs", since = "3.1.0") // scalastyle:on line.size.limit case class UnixMillis(child: Expression) extends TimestampToLongBase { override def scaleFactor: Long = MICROS_PER_MILLIS override def prettyName: String = "unix_millis" override protected def withNewChildInternal(newChild: Expression): UnixMillis = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timestamp) - Returns the number of microseconds since 1970-01-01 00:00:00 UTC.", examples = """ Examples: > SELECT _FUNC_(TIMESTAMP('1970-01-01 00:00:01Z')); 1000000 """, group = "datetime_funcs", since = "3.1.0") // scalastyle:on line.size.limit case class UnixMicros(child: Expression) extends TimestampToLongBase { override def scaleFactor: Long = 1L override def prettyName: String = "unix_micros" override protected def withNewChildInternal(newChild: Expression): UnixMicros = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(date) - Returns the year component of the date/timestamp.", examples = """ Examples: > SELECT _FUNC_('2016-07-30'); 2016 """, group = "datetime_funcs", since = "1.5.0") case class Year(child: Expression) extends GetDateField { override val func = DateTimeUtils.getYear override val funcName = "getYear" override protected def withNewChildInternal(newChild: Expression): Year = copy(child = newChild) } case class YearOfWeek(child: Expression) extends GetDateField { override val func = DateTimeUtils.getWeekBasedYear override val funcName = "getWeekBasedYear" override protected def withNewChildInternal(newChild: Expression): YearOfWeek = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(date) - Returns the quarter of the year for date, in the range 1 to 4.", examples = """ Examples: > SELECT _FUNC_('2016-08-31'); 3 """, group = "datetime_funcs", since = "1.5.0") case class Quarter(child: Expression) extends GetDateField { override val func = DateTimeUtils.getQuarter override val funcName = "getQuarter" override protected def withNewChildInternal(newChild: Expression): Quarter = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(date) - Returns the month component of the date/timestamp.", examples = """ Examples: > SELECT _FUNC_('2016-07-30'); 7 """, group = "datetime_funcs", since = "1.5.0") case class Month(child: Expression) extends GetDateField { override val func = DateTimeUtils.getMonth override val funcName = "getMonth" override protected def withNewChildInternal(newChild: Expression): Month = copy(child = newChild) } @ExpressionDescription( usage = "_FUNC_(date) - Returns the day of month of the date/timestamp.", examples = """ Examples: > SELECT _FUNC_('2009-07-30'); 30 """, group = "datetime_funcs", since = "1.5.0") case class DayOfMonth(child: Expression) extends GetDateField { override val func = DateTimeUtils.getDayOfMonth override val funcName = "getDayOfMonth" override protected def withNewChildInternal(newChild: Expression): DayOfMonth = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(date) - Returns the day of the week for date/timestamp (1 = Sunday, 2 = Monday, ..., 7 = Saturday).", examples = """ Examples: > SELECT _FUNC_('2009-07-30'); 5 """, group = "datetime_funcs", since = "2.3.0") // scalastyle:on line.size.limit case class DayOfWeek(child: Expression) extends GetDateField { override val func = DateTimeUtils.getDayOfWeek override val funcName = "getDayOfWeek" override protected def withNewChildInternal(newChild: Expression): DayOfWeek = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(date) - Returns the day of the week for date/timestamp (0 = Monday, 1 = Tuesday, ..., 6 = Sunday).", examples = """ Examples: > SELECT _FUNC_('2009-07-30'); 3 """, group = "datetime_funcs", since = "2.4.0") // scalastyle:on line.size.limit case class WeekDay(child: Expression) extends GetDateField { override val func = DateTimeUtils.getWeekDay override val funcName = "getWeekDay" override protected def withNewChildInternal(newChild: Expression): WeekDay = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(date) - Returns the week of the year of the given date. A week is considered to start on a Monday and week 1 is the first week with >3 days.", examples = """ Examples: > SELECT _FUNC_('2008-02-20'); 8 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class WeekOfYear(child: Expression) extends GetDateField { override val func = DateTimeUtils.getWeekOfYear override val funcName = "getWeekOfYear" override protected def withNewChildInternal(newChild: Expression): WeekOfYear = copy(child = newChild) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timestamp, fmt) - Converts `timestamp` to a value of string in the format specified by the date format `fmt`.", arguments = """ Arguments: * timestamp - A date/timestamp or string to be converted to the given format. * fmt - Date/time format pattern to follow. See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_('2016-04-08', 'y'); 2016 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class DateFormatClass(left: Expression, right: Expression, timeZoneId: Option[String] = None) extends BinaryExpression with TimestampFormatterHelper with ImplicitCastInputTypes with NullIntolerant { def this(left: Expression, right: Expression) = this(left, right, None) override def dataType: DataType = StringType override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, StringType) override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) override protected def nullSafeEval(timestamp: Any, format: Any): Any = { val formatter = formatterOption.getOrElse(getFormatter(format.toString)) UTF8String.fromString(formatter.format(timestamp.asInstanceOf[Long])) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { formatterOption.map { tf => val timestampFormatter = ctx.addReferenceObj("timestampFormatter", tf) defineCodeGen(ctx, ev, (timestamp, _) => { s"""UTF8String.fromString($timestampFormatter.format($timestamp))""" }) }.getOrElse { val tf = TimestampFormatter.getClass.getName.stripSuffix("$") val ldf = LegacyDateFormats.getClass.getName.stripSuffix("$") val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) defineCodeGen(ctx, ev, (timestamp, format) => { s"""|UTF8String.fromString($tf$$.MODULE$$.apply( | $format.toString(), | $zid, | $ldf$$.MODULE$$.SIMPLE_DATE_FORMAT(), | false) |.format($timestamp))""".stripMargin }) } } override def prettyName: String = "date_format" override protected def formatString: Expression = right override protected def isParsing: Boolean = false override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): DateFormatClass = copy(left = newLeft, right = newRight) } /** * Converts time string with given pattern. * Deterministic version of [[UnixTimestamp]], must have at least one parameter. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timeExp[, fmt]) - Returns the UNIX timestamp of the given time.", arguments = """ Arguments: * timeExp - A date/timestamp or string which is returned as a UNIX timestamp. * fmt - Date/time format pattern to follow. Ignored if `timeExp` is not a string. Default value is "yyyy-MM-dd HH:mm:ss". See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_('2016-04-08', 'yyyy-MM-dd'); 1460098800 """, group = "datetime_funcs", since = "1.6.0") // scalastyle:on line.size.limit case class ToUnixTimestamp( timeExp: Expression, format: Expression, timeZoneId: Option[String] = None, failOnError: Boolean = SQLConf.get.ansiEnabled) extends UnixTime { def this(timeExp: Expression, format: Expression) = this(timeExp, format, None, SQLConf.get.ansiEnabled) override def left: Expression = timeExp override def right: Expression = format override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) def this(time: Expression) = { this(time, Literal(TimestampFormatter.defaultPattern)) } override def prettyName: String = "to_unix_timestamp" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): ToUnixTimestamp = copy(timeExp = newLeft, format = newRight) } // scalastyle:off line.size.limit /** * Converts time string with given pattern to Unix time stamp (in seconds), returns null if fail. * See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a>. * Note that hive Language Manual says it returns 0 if fail, but in fact it returns null. * If the second parameter is missing, use "yyyy-MM-dd HH:mm:ss". * If no parameters provided, the first parameter will be current_timestamp. * If the first parameter is a Date or Timestamp instead of String, we will ignore the * second parameter. */ @ExpressionDescription( usage = "_FUNC_([timeExp[, fmt]]) - Returns the UNIX timestamp of current or specified time.", arguments = """ Arguments: * timeExp - A date/timestamp or string. If not provided, this defaults to current time. * fmt - Date/time format pattern to follow. Ignored if `timeExp` is not a string. Default value is "yyyy-MM-dd HH:mm:ss". See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html"> Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_(); 1476884637 > SELECT _FUNC_('2016-04-08', 'yyyy-MM-dd'); 1460041200 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class UnixTimestamp( timeExp: Expression, format: Expression, timeZoneId: Option[String] = None, failOnError: Boolean = SQLConf.get.ansiEnabled) extends UnixTime { def this(timeExp: Expression, format: Expression) = this(timeExp, format, None, SQLConf.get.ansiEnabled) override def left: Expression = timeExp override def right: Expression = format override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) def this(time: Expression) = { this(time, Literal(TimestampFormatter.defaultPattern)) } def this() = { this(CurrentTimestamp()) } override def prettyName: String = "unix_timestamp" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): UnixTimestamp = copy(timeExp = newLeft, format = newRight) } /** * Gets a timestamp from a string or a date. */ case class GetTimestamp( left: Expression, right: Expression, override val dataType: DataType, timeZoneId: Option[String] = None, failOnError: Boolean = SQLConf.get.ansiEnabled) extends ToTimestamp { override val forTimestampNTZ: Boolean = dataType == TimestampNTZType override protected def downScaleFactor: Long = 1 override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Some(timeZoneId)) override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): Expression = copy(left = newLeft, right = newRight) } /** * Parses a column to a timestamp without time zone based on the supplied format. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(timestamp_str[, fmt]) - Parses the `timestamp_str` expression with the `fmt` expression to a timestamp without time zone. Returns null with invalid input. By default, it follows casting rules to a timestamp if the `fmt` is omitted. """, arguments = """ Arguments: * timestamp_str - A string to be parsed to timestamp without time zone. * fmt - Timestamp format pattern to follow. See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_('2016-12-31 00:12:00'); 2016-12-31 00:12:00 > SELECT _FUNC_('2016-12-31', 'yyyy-MM-dd'); 2016-12-31 00:00:00 """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit object ParseToTimestampNTZExpressionBuilder extends ExpressionBuilder { override def build(funcName: String, expressions: Seq[Expression]): Expression = { val numArgs = expressions.length if (numArgs == 1 || numArgs == 2) { ParseToTimestamp(expressions(0), expressions.drop(1).lastOption, TimestampNTZType) } else { throw QueryCompilationErrors.invalidFunctionArgumentNumberError(Seq(1, 2), funcName, numArgs) } } } /** * Parses a column to a timestamp with local time zone based on the supplied format. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(timestamp_str[, fmt]) - Parses the `timestamp_str` expression with the `fmt` expression to a timestamp with local time zone. Returns null with invalid input. By default, it follows casting rules to a timestamp if the `fmt` is omitted. """, arguments = """ Arguments: * timestamp_str - A string to be parsed to timestamp with local time zone. * fmt - Timestamp format pattern to follow. See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_('2016-12-31 00:12:00'); 2016-12-31 00:12:00 > SELECT _FUNC_('2016-12-31', 'yyyy-MM-dd'); 2016-12-31 00:00:00 """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit object ParseToTimestampLTZExpressionBuilder extends ExpressionBuilder { override def build(funcName: String, expressions: Seq[Expression]): Expression = { val numArgs = expressions.length if (numArgs == 1 || numArgs == 2) { ParseToTimestamp(expressions(0), expressions.drop(1).lastOption, TimestampType) } else { throw QueryCompilationErrors.invalidFunctionArgumentNumberError(Seq(1, 2), funcName, numArgs) } } } abstract class ToTimestamp extends BinaryExpression with TimestampFormatterHelper with ExpectsInputTypes { def failOnError: Boolean // The result of the conversion to timestamp is microseconds divided by this factor. // For example if the factor is 1000000, the result of the expression is in seconds. protected def downScaleFactor: Long override protected def formatString: Expression = right override protected def isParsing = true override def forTimestampNTZ: Boolean = left.dataType == TimestampNTZType override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, DateType, TimestampType, TimestampNTZType), StringType) override def dataType: DataType = LongType override def nullable: Boolean = if (failOnError) children.exists(_.nullable) else true private def isParseError(e: Throwable): Boolean = e match { case _: DateTimeParseException | _: DateTimeException | _: ParseException => true case _ => false } override def eval(input: InternalRow): Any = { val t = left.eval(input) if (t == null) { null } else { left.dataType match { case DateType => daysToMicros(t.asInstanceOf[Int], zoneId) / downScaleFactor case TimestampType | TimestampNTZType => t.asInstanceOf[Long] / downScaleFactor case StringType => val fmt = right.eval(input) if (fmt == null) { null } else { val formatter = formatterOption.getOrElse(getFormatter(fmt.toString)) try { if (forTimestampNTZ) { formatter.parseWithoutTimeZone(t.asInstanceOf[UTF8String].toString) } else { formatter.parse(t.asInstanceOf[UTF8String].toString) / downScaleFactor } } catch { case e: DateTimeParseException if failOnError => throw QueryExecutionErrors.ansiDateTimeParseError(e) case e: DateTimeException if failOnError => throw QueryExecutionErrors.ansiDateTimeError(e) case e: ParseException if failOnError => throw QueryExecutionErrors.ansiParseError(e) case e if isParseError(e) => null } } } } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val javaType = CodeGenerator.javaType(dataType) def parseErrorBranch(method: String): String = if (failOnError) { s"throw QueryExecutionErrors.$method(e);" } else { s"${ev.isNull} = true;" } val parseMethod = if (forTimestampNTZ) { "parseWithoutTimeZone" } else { "parse" } val downScaleCode = if (forTimestampNTZ) { "" } else { s"/ $downScaleFactor" } left.dataType match { case StringType => formatterOption.map { fmt => val df = classOf[TimestampFormatter].getName val formatterName = ctx.addReferenceObj("formatter", fmt, df) nullSafeCodeGen(ctx, ev, (datetimeStr, _) => s""" |try { | ${ev.value} = $formatterName.$parseMethod($datetimeStr.toString()) $downScaleCode; |} catch (java.time.format.DateTimeParseException e) { | ${parseErrorBranch("ansiDateTimeParseError")} |} catch (java.time.DateTimeException e) { | ${parseErrorBranch("ansiDateTimeError")} |} catch (java.text.ParseException e) { | ${parseErrorBranch("ansiParseError")} |} |""".stripMargin) }.getOrElse { val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) val tf = TimestampFormatter.getClass.getName.stripSuffix("$") val ldf = LegacyDateFormats.getClass.getName.stripSuffix("$") val timestampFormatter = ctx.freshName("timestampFormatter") nullSafeCodeGen(ctx, ev, (string, format) => s""" |$tf $timestampFormatter = $tf$$.MODULE$$.apply( | $format.toString(), | $zid, | $ldf$$.MODULE$$.SIMPLE_DATE_FORMAT(), | true); |try { | ${ev.value} = $timestampFormatter.$parseMethod($string.toString()) $downScaleCode; |} catch (java.time.format.DateTimeParseException e) { | ${parseErrorBranch("ansiDateTimeParseError")} |} catch (java.time.DateTimeException e) { | ${parseErrorBranch("ansiDateTimeError")} |} catch (java.text.ParseException e) { | ${parseErrorBranch("ansiParseError")} |} |""".stripMargin) } case TimestampType | TimestampNTZType => val eval1 = left.genCode(ctx) ev.copy(code = code""" ${eval1.code} boolean ${ev.isNull} = ${eval1.isNull}; $javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)}; if (!${ev.isNull}) { ${ev.value} = ${eval1.value} / $downScaleFactor; }""") case DateType => val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val eval1 = left.genCode(ctx) ev.copy(code = code""" ${eval1.code} boolean ${ev.isNull} = ${eval1.isNull}; $javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)}; if (!${ev.isNull}) { ${ev.value} = $dtu.daysToMicros(${eval1.value}, $zid) / $downScaleFactor; }""") } } } abstract class UnixTime extends ToTimestamp { override val downScaleFactor: Long = MICROS_PER_SECOND } /** * Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string * representing the timestamp of that moment in the current system time zone in the given * format. If the format is missing, using format like "1970-01-01 00:00:00". * Note that Hive Language Manual says it returns 0 if fail, but in fact it returns null. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(unix_time[, fmt]) - Returns `unix_time` in the specified `fmt`.", arguments = """ Arguments: * unix_time - UNIX Timestamp to be converted to the provided format. * fmt - Date/time format pattern to follow. See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. The 'yyyy-MM-dd HH:mm:ss' pattern is used if omitted. """, examples = """ Examples: > SELECT _FUNC_(0, 'yyyy-MM-dd HH:mm:ss'); 1969-12-31 16:00:00 > SELECT _FUNC_(0); 1969-12-31 16:00:00 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class FromUnixTime(sec: Expression, format: Expression, timeZoneId: Option[String] = None) extends BinaryExpression with TimestampFormatterHelper with ImplicitCastInputTypes with NullIntolerant { def this(sec: Expression, format: Expression) = this(sec, format, None) override def left: Expression = sec override def right: Expression = format override def prettyName: String = "from_unixtime" def this(unix: Expression) = { this(unix, Literal(TimestampFormatter.defaultPattern)) } override def dataType: DataType = StringType override def nullable: Boolean = true override def inputTypes: Seq[AbstractDataType] = Seq(LongType, StringType) override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) override def nullSafeEval(seconds: Any, format: Any): Any = { val fmt = formatterOption.getOrElse(getFormatter(format.toString)) UTF8String.fromString(fmt.format(seconds.asInstanceOf[Long] * MICROS_PER_SECOND)) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { formatterOption.map { f => val formatterName = ctx.addReferenceObj("formatter", f) defineCodeGen(ctx, ev, (seconds, _) => s"UTF8String.fromString($formatterName.format($seconds * 1000000L))") }.getOrElse { val tf = TimestampFormatter.getClass.getName.stripSuffix("$") val ldf = LegacyDateFormats.getClass.getName.stripSuffix("$") val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) defineCodeGen(ctx, ev, (seconds, format) => s""" |UTF8String.fromString( | $tf$$.MODULE$$.apply($format.toString(), | $zid, | $ldf$$.MODULE$$.SIMPLE_DATE_FORMAT(), | false).format($seconds * 1000000L)) |""".stripMargin) } } override protected def formatString: Expression = format override protected def isParsing: Boolean = false override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): FromUnixTime = copy(sec = newLeft, format = newRight) } /** * Returns the last day of the month which the date belongs to. */ @ExpressionDescription( usage = "_FUNC_(date) - Returns the last day of the month which the date belongs to.", examples = """ Examples: > SELECT _FUNC_('2009-01-12'); 2009-01-31 """, group = "datetime_funcs", since = "1.5.0") case class LastDay(startDate: Expression) extends UnaryExpression with ImplicitCastInputTypes with NullIntolerant { override def child: Expression = startDate override def inputTypes: Seq[AbstractDataType] = Seq(DateType) override def dataType: DataType = DateType override def nullSafeEval(date: Any): Any = { DateTimeUtils.getLastDayOfMonth(date.asInstanceOf[Int]) } override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, sd => s"$dtu.getLastDayOfMonth($sd)") } override def prettyName: String = "last_day" override protected def withNewChildInternal(newChild: Expression): LastDay = copy(startDate = newChild) } /** * Returns the first date which is later than startDate and named as dayOfWeek. * For example, NextDay(2015-07-27, Sunday) would return 2015-08-02, which is the first * Sunday later than 2015-07-27. * * Allowed "dayOfWeek" is defined in [[DateTimeUtils.getDayOfWeekFromString]]. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """_FUNC_(start_date, day_of_week) - Returns the first date which is later than `start_date` and named as indicated. The function returns NULL if at least one of the input parameters is NULL. When both of the input parameters are not NULL and day_of_week is an invalid input, the function throws IllegalArgumentException if `spark.sql.ansi.enabled` is set to true, otherwise NULL. """, examples = """ Examples: > SELECT _FUNC_('2015-01-14', 'TU'); 2015-01-20 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class NextDay( startDate: Expression, dayOfWeek: Expression, failOnError: Boolean = SQLConf.get.ansiEnabled) extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant { override def left: Expression = startDate override def right: Expression = dayOfWeek def this(left: Expression, right: Expression) = this(left, right, SQLConf.get.ansiEnabled) override def inputTypes: Seq[AbstractDataType] = Seq(DateType, StringType) override def dataType: DataType = DateType override def nullable: Boolean = true override def nullSafeEval(start: Any, dayOfW: Any): Any = { try { val dow = DateTimeUtils.getDayOfWeekFromString(dayOfW.asInstanceOf[UTF8String]) val sd = start.asInstanceOf[Int] DateTimeUtils.getNextDateForDayOfWeek(sd, dow) } catch { case e: IllegalArgumentException => if (failOnError) { throw QueryExecutionErrors.ansiIllegalArgumentError(e) } else { null } } } private def dateTimeUtilClass: String = DateTimeUtils.getClass.getName.stripSuffix("$") private def nextDayGenCode( ev: ExprCode, dayOfWeekTerm: String, sd: String, dowS: String): String = { val failOnErrorBranch = if (failOnError) { "throw QueryExecutionErrors.ansiIllegalArgumentError(e);" } else { s"${ev.isNull} = true;" } s""" |try { | int $dayOfWeekTerm = $dateTimeUtilClass.getDayOfWeekFromString($dowS); | ${ev.value} = $dateTimeUtilClass.getNextDateForDayOfWeek($sd, $dayOfWeekTerm); |} catch (IllegalArgumentException e) { | $failOnErrorBranch |} |""".stripMargin } override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { nullSafeCodeGen(ctx, ev, (sd, dowS) => { val dayOfWeekTerm = ctx.freshName("dayOfWeek") if (dayOfWeek.foldable) { val input = dayOfWeek.eval().asInstanceOf[UTF8String] if (input eq null) { s"""${ev.isNull} = true;""" } else { try { val dayOfWeekValue = DateTimeUtils.getDayOfWeekFromString(input) s"${ev.value} = $dateTimeUtilClass.getNextDateForDayOfWeek($sd, $dayOfWeekValue);" } catch { case _: IllegalArgumentException => nextDayGenCode(ev, dayOfWeekTerm, sd, dowS) } } } else { nextDayGenCode(ev, dayOfWeekTerm, sd, dowS) } }) } override def prettyName: String = "next_day" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): NextDay = copy(startDate = newLeft, dayOfWeek = newRight) } /** * Adds an interval to timestamp. */ case class TimeAdd(start: Expression, interval: Expression, timeZoneId: Option[String] = None) extends BinaryExpression with TimeZoneAwareExpression with ExpectsInputTypes with NullIntolerant { def this(start: Expression, interval: Expression) = this(start, interval, None) override def left: Expression = start override def right: Expression = interval override def toString: String = s"$left + $right" override def sql: String = s"${left.sql} + ${right.sql}" override def inputTypes: Seq[AbstractDataType] = Seq(AnyTimestampType, TypeCollection(CalendarIntervalType, DayTimeIntervalType)) override def dataType: DataType = start.dataType override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) @transient private lazy val zoneIdInEval: ZoneId = zoneIdForType(left.dataType) override def nullSafeEval(start: Any, interval: Any): Any = right.dataType match { case _: DayTimeIntervalType => timestampAddDayTime(start.asInstanceOf[Long], interval.asInstanceOf[Long], zoneIdInEval) case CalendarIntervalType => val i = interval.asInstanceOf[CalendarInterval] timestampAddInterval(start.asInstanceOf[Long], i.months, i.days, i.microseconds, zoneIdInEval) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val zid = ctx.addReferenceObj("zoneId", zoneIdInEval, classOf[ZoneId].getName) val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") interval.dataType match { case _: DayTimeIntervalType => defineCodeGen(ctx, ev, (sd, dt) => s"""$dtu.timestampAddDayTime($sd, $dt, $zid)""") case CalendarIntervalType => defineCodeGen(ctx, ev, (sd, i) => { s"""$dtu.timestampAddInterval($sd, $i.months, $i.days, $i.microseconds, $zid)""" }) } } override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): TimeAdd = copy(start = newLeft, interval = newRight) } /** * Subtract an interval from timestamp or date, which is only used to give a pretty sql string * for `datetime - interval` operations */ case class DatetimeSub( start: Expression, interval: Expression, replacement: Expression) extends RuntimeReplaceable with InheritAnalysisRules { override def parameters: Seq[Expression] = Seq(start, interval) override def makeSQLString(childrenSQL: Seq[String]): String = { childrenSQL.mkString(" - ") } override def toString: String = s"$start - $interval" override protected def withNewChildInternal(newChild: Expression): Expression = { copy(replacement = newChild) } } /** * Adds date and an interval. * * When ansi mode is on, the microseconds part of interval needs to be 0, otherwise a runtime * [[IllegalArgumentException]] will be raised. * When ansi mode is off, if the microseconds part of interval is 0, we perform date + interval * for better performance. if the microseconds part is not 0, then the date will be converted to a * timestamp to add with the whole interval parts. */ case class DateAddInterval( start: Expression, interval: Expression, timeZoneId: Option[String] = None, ansiEnabled: Boolean = SQLConf.get.ansiEnabled) extends BinaryExpression with ExpectsInputTypes with TimeZoneAwareExpression with NullIntolerant { override def left: Expression = start override def right: Expression = interval override def toString: String = s"$left + $right" override def sql: String = s"${left.sql} + ${right.sql}" override def inputTypes: Seq[AbstractDataType] = Seq(DateType, CalendarIntervalType) override def dataType: DataType = DateType override def nullSafeEval(start: Any, interval: Any): Any = { val itvl = interval.asInstanceOf[CalendarInterval] if (ansiEnabled || itvl.microseconds == 0) { DateTimeUtils.dateAddInterval(start.asInstanceOf[Int], itvl) } else { val startTs = DateTimeUtils.daysToMicros(start.asInstanceOf[Int], zoneId) val resultTs = DateTimeUtils.timestampAddInterval( startTs, itvl.months, itvl.days, itvl.microseconds, zoneId) DateTimeUtils.microsToDays(resultTs, zoneId) } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") nullSafeCodeGen(ctx, ev, (sd, i) => if (ansiEnabled) { s"""${ev.value} = $dtu.dateAddInterval($sd, $i);""" } else { val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) val startTs = ctx.freshName("startTs") val resultTs = ctx.freshName("resultTs") s""" |if ($i.microseconds == 0) { | ${ev.value} = $dtu.dateAddInterval($sd, $i); |} else { | long $startTs = $dtu.daysToMicros($sd, $zid); | long $resultTs = | $dtu.timestampAddInterval($startTs, $i.months, $i.days, $i.microseconds, $zid); | ${ev.value} = $dtu.microsToDays($resultTs, $zid); |} |""".stripMargin }) } override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): DateAddInterval = copy(start = newLeft, interval = newRight) } sealed trait UTCTimestamp extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant { val func: (Long, String) => Long val funcName: String override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, StringType) override def dataType: DataType = TimestampType override def nullSafeEval(time: Any, timezone: Any): Any = { func(time.asInstanceOf[Long], timezone.asInstanceOf[UTF8String].toString) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") if (right.foldable) { val tz = right.eval().asInstanceOf[UTF8String] if (tz == null) { ev.copy(code = code""" |boolean ${ev.isNull} = true; |long ${ev.value} = 0; """.stripMargin) } else { val tzClass = classOf[ZoneId].getName val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val escapedTz = StringEscapeUtils.escapeJava(tz.toString) val tzTerm = ctx.addMutableState(tzClass, "tz", v => s"""$v = $dtu.getZoneId("$escapedTz");""") val utcTerm = "java.time.ZoneOffset.UTC" val (fromTz, toTz) = this match { case _: FromUTCTimestamp => (utcTerm, tzTerm) case _: ToUTCTimestamp => (tzTerm, utcTerm) } val eval = left.genCode(ctx) ev.copy(code = code""" |${eval.code} |boolean ${ev.isNull} = ${eval.isNull}; |long ${ev.value} = 0; |if (!${ev.isNull}) { | ${ev.value} = $dtu.convertTz(${eval.value}, $fromTz, $toTz); |} """.stripMargin) } } else { defineCodeGen(ctx, ev, (timestamp, format) => { s"""$dtu.$funcName($timestamp, $format.toString())""" }) } } } /** * This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function * takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and * renders that timestamp as a timestamp in the given time zone. * * However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not * timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to * the given timezone. * * This function may return confusing result if the input is a string with timezone, e.g. * '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp * according to the timezone in the string, and finally display the result by converting the * timestamp to string according to the session local timezone. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timestamp, timezone) - Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders that time as a timestamp in the given time zone. For example, 'GMT+1' would yield '2017-07-14 03:40:00.0'.", examples = """ Examples: > SELECT _FUNC_('2016-08-31', 'Asia/Seoul'); 2016-08-31 09:00:00 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class FromUTCTimestamp(left: Expression, right: Expression) extends UTCTimestamp { override val func = DateTimeUtils.fromUTCTime override val funcName: String = "fromUTCTime" override val prettyName: String = "from_utc_timestamp" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): FromUTCTimestamp = copy(left = newLeft, right = newRight) } /** * This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function * takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given * timezone, and renders that timestamp as a timestamp in UTC. * * However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not * timezone-agnostic. So in Spark this function just shift the timestamp value from the given * timezone to UTC timezone. * * This function may return confusing result if the input is a string with timezone, e.g. * '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp * according to the timezone in the string, and finally display the result by converting the * timestamp to string according to the session local timezone. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(timestamp, timezone) - Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield '2017-07-14 01:40:00.0'.", examples = """ Examples: > SELECT _FUNC_('2016-08-31', 'Asia/Seoul'); 2016-08-30 15:00:00 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class ToUTCTimestamp(left: Expression, right: Expression) extends UTCTimestamp { override val func = DateTimeUtils.toUTCTime override val funcName: String = "toUTCTime" override val prettyName: String = "to_utc_timestamp" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): ToUTCTimestamp = copy(left = newLeft, right = newRight) } abstract class AddMonthsBase extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant { override def dataType: DataType = DateType override def nullSafeEval(start: Any, months: Any): Any = { DateTimeUtils.dateAddMonths(start.asInstanceOf[Int], months.asInstanceOf[Int]) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, (sd, m) => { s"""$dtu.dateAddMonths($sd, $m)""" }) } } /** * Returns the date that is num_months after start_date. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(start_date, num_months) - Returns the date that is `num_months` after `start_date`.", examples = """ Examples: > SELECT _FUNC_('2016-08-31', 1); 2016-09-30 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class AddMonths(startDate: Expression, numMonths: Expression) extends AddMonthsBase { override def left: Expression = startDate override def right: Expression = numMonths override def inputTypes: Seq[AbstractDataType] = Seq(DateType, IntegerType) override def prettyName: String = "add_months" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): AddMonths = copy(startDate = newLeft, numMonths = newRight) } // Adds the year-month interval to the date case class DateAddYMInterval(date: Expression, interval: Expression) extends AddMonthsBase { override def left: Expression = date override def right: Expression = interval override def inputTypes: Seq[AbstractDataType] = Seq(DateType, YearMonthIntervalType) override def toString: String = s"$left + $right" override def sql: String = s"${left.sql} + ${right.sql}" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): DateAddYMInterval = copy(date = newLeft, interval = newRight) } // Adds the year-month interval to the timestamp case class TimestampAddYMInterval( timestamp: Expression, interval: Expression, timeZoneId: Option[String] = None) extends BinaryExpression with TimeZoneAwareExpression with ExpectsInputTypes with NullIntolerant { def this(timestamp: Expression, interval: Expression) = this(timestamp, interval, None) override def left: Expression = timestamp override def right: Expression = interval override def toString: String = s"$left + $right" override def sql: String = s"${left.sql} + ${right.sql}" override def inputTypes: Seq[AbstractDataType] = Seq(AnyTimestampType, YearMonthIntervalType) override def dataType: DataType = timestamp.dataType override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) @transient private lazy val zoneIdInEval: ZoneId = zoneIdForType(left.dataType) override def nullSafeEval(micros: Any, months: Any): Any = { timestampAddMonths(micros.asInstanceOf[Long], months.asInstanceOf[Int], zoneIdInEval) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val zid = ctx.addReferenceObj("zoneId", zoneIdInEval, classOf[ZoneId].getName) val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, (micros, months) => { s"""$dtu.timestampAddMonths($micros, $months, $zid)""" }) } override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): TimestampAddYMInterval = copy(timestamp = newLeft, interval = newRight) } /** * Returns number of months between times `timestamp1` and `timestamp2`. * If `timestamp1` is later than `timestamp2`, then the result is positive. * If `timestamp1` and `timestamp2` are on the same day of month, or both * are the last day of month, time of day will be ignored. Otherwise, the * difference is calculated based on 31 days per month, and rounded to * 8 digits unless roundOff=false. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(timestamp1, timestamp2[, roundOff]) - If `timestamp1` is later than `timestamp2`, then the result is positive. If `timestamp1` and `timestamp2` are on the same day of month, or both are the last day of month, time of day will be ignored. Otherwise, the difference is calculated based on 31 days per month, and rounded to 8 digits unless roundOff=false. """, examples = """ Examples: > SELECT _FUNC_('1997-02-28 10:30:00', '1996-10-30'); 3.94959677 > SELECT _FUNC_('1997-02-28 10:30:00', '1996-10-30', false); 3.9495967741935485 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class MonthsBetween( date1: Expression, date2: Expression, roundOff: Expression, timeZoneId: Option[String] = None) extends TernaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes with NullIntolerant { def this(date1: Expression, date2: Expression) = this(date1, date2, Literal.TrueLiteral, None) def this(date1: Expression, date2: Expression, roundOff: Expression) = this(date1, date2, roundOff, None) override def first: Expression = date1 override def second: Expression = date2 override def third: Expression = roundOff override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, TimestampType, BooleanType) override def dataType: DataType = DoubleType override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) override def nullSafeEval(t1: Any, t2: Any, roundOff: Any): Any = { DateTimeUtils.monthsBetween( t1.asInstanceOf[Long], t2.asInstanceOf[Long], roundOff.asInstanceOf[Boolean], zoneId) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, (d1, d2, roundOff) => { s"""$dtu.monthsBetween($d1, $d2, $roundOff, $zid)""" }) } override def prettyName: String = "months_between" override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): MonthsBetween = copy(date1 = newFirst, date2 = newSecond, roundOff = newThird) } /** * Parses a column to a date based on the given format. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(date_str[, fmt]) - Parses the `date_str` expression with the `fmt` expression to a date. Returns null with invalid input. By default, it follows casting rules to a date if the `fmt` is omitted. """, arguments = """ Arguments: * date_str - A string to be parsed to date. * fmt - Date format pattern to follow. See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_('2009-07-30 04:17:52'); 2009-07-30 > SELECT _FUNC_('2016-12-31', 'yyyy-MM-dd'); 2016-12-31 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class ParseToDate( left: Expression, format: Option[Expression], timeZoneId: Option[String] = None) extends RuntimeReplaceable with ImplicitCastInputTypes with TimeZoneAwareExpression { override lazy val replacement: Expression = format.map { f => Cast(GetTimestamp(left, f, TimestampType, timeZoneId), DateType, timeZoneId) }.getOrElse(Cast(left, DateType, timeZoneId)) // backwards compatibility def this(left: Expression, format: Expression) = { this(left, Option(format)) } def this(left: Expression) = { this(left, None) } override def prettyName: String = "to_date" override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Some(timeZoneId)) override def nodePatternsInternal(): Seq[TreePattern] = Seq(RUNTIME_REPLACEABLE) override def children: Seq[Expression] = left +: format.toSeq override def inputTypes: Seq[AbstractDataType] = { // Note: ideally this function should only take string input, but we allow more types here to // be backward compatible. TypeCollection(StringType, DateType, TimestampType, TimestampNTZType) +: format.map(_ => StringType).toSeq } override protected def withNewChildrenInternal( newChildren: IndexedSeq[Expression]): Expression = { if (format.isDefined) { copy(left = newChildren.head, format = Some(newChildren.last)) } else { copy(left = newChildren.head) } } } /** * Parses a column to a timestamp based on the supplied format. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(timestamp_str[, fmt]) - Parses the `timestamp_str` expression with the `fmt` expression to a timestamp. Returns null with invalid input. By default, it follows casting rules to a timestamp if the `fmt` is omitted. The result data type is consistent with the value of configuration `spark.sql.timestampType`. """, arguments = """ Arguments: * timestamp_str - A string to be parsed to timestamp. * fmt - Timestamp format pattern to follow. See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a> for valid date and time format patterns. """, examples = """ Examples: > SELECT _FUNC_('2016-12-31 00:12:00'); 2016-12-31 00:12:00 > SELECT _FUNC_('2016-12-31', 'yyyy-MM-dd'); 2016-12-31 00:00:00 """, group = "datetime_funcs", since = "2.2.0") // scalastyle:on line.size.limit case class ParseToTimestamp( left: Expression, format: Option[Expression], override val dataType: DataType, timeZoneId: Option[String] = None) extends RuntimeReplaceable with ImplicitCastInputTypes with TimeZoneAwareExpression { override lazy val replacement: Expression = format.map { f => GetTimestamp(left, f, dataType, timeZoneId) }.getOrElse(Cast(left, dataType, timeZoneId)) def this(left: Expression, format: Expression) = { this(left, Option(format), SQLConf.get.timestampType) } def this(left: Expression) = this(left, None, SQLConf.get.timestampType) override def nodeName: String = "to_timestamp" override def nodePatternsInternal(): Seq[TreePattern] = Seq(RUNTIME_REPLACEABLE) override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Some(timeZoneId)) override def children: Seq[Expression] = left +: format.toSeq override def inputTypes: Seq[AbstractDataType] = { // Note: ideally this function should only take string input, but we allow more types here to // be backward compatible. TypeCollection(StringType, DateType, TimestampType, TimestampNTZType) +: format.map(_ => StringType).toSeq } override protected def withNewChildrenInternal( newChildren: IndexedSeq[Expression]): Expression = { if (format.isDefined) { copy(left = newChildren.head, format = Some(newChildren.last)) } else { copy(left = newChildren.head) } } } trait TruncInstant extends BinaryExpression with ImplicitCastInputTypes { val instant: Expression val format: Expression override def nullable: Boolean = true private lazy val truncLevel: Int = DateTimeUtils.parseTruncLevel(format.eval().asInstanceOf[UTF8String]) /** * @param input internalRow (time) * @param minLevel Minimum level that can be used for truncation (e.g WEEK for Date input) * @param truncFunc function: (time, level) => time */ protected def evalHelper(input: InternalRow, minLevel: Int)( truncFunc: (Any, Int) => Any): Any = { val level = if (format.foldable) { truncLevel } else { DateTimeUtils.parseTruncLevel(format.eval().asInstanceOf[UTF8String]) } if (level < minLevel) { // unknown format or too small level null } else { val t = instant.eval(input) if (t == null) { null } else { truncFunc(t, level) } } } protected def codeGenHelper( ctx: CodegenContext, ev: ExprCode, minLevel: Int, orderReversed: Boolean = false)( truncFunc: (String, String) => String) : ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val javaType = CodeGenerator.javaType(dataType) if (format.foldable) { if (truncLevel < minLevel) { ev.copy(code = code""" boolean ${ev.isNull} = true; $javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};""") } else { val t = instant.genCode(ctx) val truncFuncStr = truncFunc(t.value, truncLevel.toString) ev.copy(code = code""" ${t.code} boolean ${ev.isNull} = ${t.isNull}; $javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)}; if (!${ev.isNull}) { ${ev.value} = $dtu.$truncFuncStr; }""") } } else { nullSafeCodeGen(ctx, ev, (left, right) => { val form = ctx.freshName("form") val (dateVal, fmt) = if (orderReversed) { (right, left) } else { (left, right) } val truncFuncStr = truncFunc(dateVal, form) s""" int $form = $dtu.parseTruncLevel($fmt); if ($form < $minLevel) { ${ev.isNull} = true; } else { ${ev.value} = $dtu.$truncFuncStr } """ }) } } } /** * Returns date truncated to the unit specified by the format. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(date, fmt) - Returns `date` with the time portion of the day truncated to the unit specified by the format model `fmt`. """, arguments = """ Arguments: * date - date value or valid date string * fmt - the format representing the unit to be truncated to - "YEAR", "YYYY", "YY" - truncate to the first date of the year that the `date` falls in - "QUARTER" - truncate to the first date of the quarter that the `date` falls in - "MONTH", "MM", "MON" - truncate to the first date of the month that the `date` falls in - "WEEK" - truncate to the Monday of the week that the `date` falls in """, examples = """ Examples: > SELECT _FUNC_('2019-08-04', 'week'); 2019-07-29 > SELECT _FUNC_('2019-08-04', 'quarter'); 2019-07-01 > SELECT _FUNC_('2009-02-12', 'MM'); 2009-02-01 > SELECT _FUNC_('2015-10-27', 'YEAR'); 2015-01-01 """, group = "datetime_funcs", since = "1.5.0") // scalastyle:on line.size.limit case class TruncDate(date: Expression, format: Expression) extends TruncInstant { override def left: Expression = date override def right: Expression = format override def inputTypes: Seq[AbstractDataType] = Seq(DateType, StringType) override def dataType: DataType = DateType override def prettyName: String = "trunc" override val instant = date override def eval(input: InternalRow): Any = { evalHelper(input, minLevel = MIN_LEVEL_OF_DATE_TRUNC) { (d: Any, level: Int) => DateTimeUtils.truncDate(d.asInstanceOf[Int], level) } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { codeGenHelper(ctx, ev, minLevel = MIN_LEVEL_OF_DATE_TRUNC) { (date: String, fmt: String) => s"truncDate($date, $fmt);" } } override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): TruncDate = copy(date = newLeft, format = newRight) } /** * Returns timestamp truncated to the unit specified by the format. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = """ _FUNC_(fmt, ts) - Returns timestamp `ts` truncated to the unit specified by the format model `fmt`. """, arguments = """ Arguments: * fmt - the format representing the unit to be truncated to - "YEAR", "YYYY", "YY" - truncate to the first date of the year that the `ts` falls in, the time part will be zero out - "QUARTER" - truncate to the first date of the quarter that the `ts` falls in, the time part will be zero out - "MONTH", "MM", "MON" - truncate to the first date of the month that the `ts` falls in, the time part will be zero out - "WEEK" - truncate to the Monday of the week that the `ts` falls in, the time part will be zero out - "DAY", "DD" - zero out the time part - "HOUR" - zero out the minute and second with fraction part - "MINUTE"- zero out the second with fraction part - "SECOND" - zero out the second fraction part - "MILLISECOND" - zero out the microseconds - "MICROSECOND" - everything remains * ts - datetime value or valid timestamp string """, examples = """ Examples: > SELECT _FUNC_('YEAR', '2015-03-05T09:32:05.359'); 2015-01-01 00:00:00 > SELECT _FUNC_('MM', '2015-03-05T09:32:05.359'); 2015-03-01 00:00:00 > SELECT _FUNC_('DD', '2015-03-05T09:32:05.359'); 2015-03-05 00:00:00 > SELECT _FUNC_('HOUR', '2015-03-05T09:32:05.359'); 2015-03-05 09:00:00 > SELECT _FUNC_('MILLISECOND', '2015-03-05T09:32:05.123456'); 2015-03-05 09:32:05.123 """, group = "datetime_funcs", since = "2.3.0") // scalastyle:on line.size.limit case class TruncTimestamp( format: Expression, timestamp: Expression, timeZoneId: Option[String] = None) extends TruncInstant with TimeZoneAwareExpression { override def left: Expression = format override def right: Expression = timestamp override def inputTypes: Seq[AbstractDataType] = Seq(StringType, TimestampType) override def dataType: TimestampType = TimestampType override def prettyName: String = "date_trunc" override val instant = timestamp override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) def this(format: Expression, timestamp: Expression) = this(format, timestamp, None) override def eval(input: InternalRow): Any = { evalHelper(input, minLevel = MIN_LEVEL_OF_TIMESTAMP_TRUNC) { (t: Any, level: Int) => DateTimeUtils.truncTimestamp(t.asInstanceOf[Long], level, zoneId) } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) codeGenHelper(ctx, ev, minLevel = MIN_LEVEL_OF_TIMESTAMP_TRUNC, true) { (date: String, fmt: String) => s"truncTimestamp($date, $fmt, $zid);" } } override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): TruncTimestamp = copy(format = newLeft, timestamp = newRight) } /** * Returns the number of days from startDate to endDate. */ @ExpressionDescription( usage = "_FUNC_(endDate, startDate) - Returns the number of days from `startDate` to `endDate`.", examples = """ Examples: > SELECT _FUNC_('2009-07-31', '2009-07-30'); 1 > SELECT _FUNC_('2009-07-30', '2009-07-31'); -1 """, group = "datetime_funcs", since = "1.5.0") case class DateDiff(endDate: Expression, startDate: Expression) extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant { override def left: Expression = endDate override def right: Expression = startDate override def inputTypes: Seq[AbstractDataType] = Seq(DateType, DateType) override def dataType: DataType = IntegerType override def nullSafeEval(end: Any, start: Any): Any = { end.asInstanceOf[Int] - start.asInstanceOf[Int] } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { defineCodeGen(ctx, ev, (end, start) => s"$end - $start") } override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): DateDiff = copy(endDate = newLeft, startDate = newRight) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(year, month, day) - Create date from year, month and day fields. If the configuration `spark.sql.ansi.enabled` is false, the function returns NULL on invalid inputs. Otherwise, it will throw an error instead.", arguments = """ Arguments: * year - the year to represent, from 1 to 9999 * month - the month-of-year to represent, from 1 (January) to 12 (December) * day - the day-of-month to represent, from 1 to 31 """, examples = """ Examples: > SELECT _FUNC_(2013, 7, 15); 2013-07-15 > SELECT _FUNC_(2019, 7, NULL); NULL """, group = "datetime_funcs", since = "3.0.0") // scalastyle:on line.size.limit case class MakeDate( year: Expression, month: Expression, day: Expression, failOnError: Boolean = SQLConf.get.ansiEnabled) extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant { def this(year: Expression, month: Expression, day: Expression) = this(year, month, day, SQLConf.get.ansiEnabled) override def first: Expression = year override def second: Expression = month override def third: Expression = day override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType, IntegerType, IntegerType) override def dataType: DataType = DateType override def nullable: Boolean = if (failOnError) children.exists(_.nullable) else true override def nullSafeEval(year: Any, month: Any, day: Any): Any = { try { val ld = LocalDate.of(year.asInstanceOf[Int], month.asInstanceOf[Int], day.asInstanceOf[Int]) localDateToDays(ld) } catch { case e: java.time.DateTimeException => if (failOnError) throw QueryExecutionErrors.ansiDateTimeError(e) else null } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val failOnErrorBranch = if (failOnError) { "throw QueryExecutionErrors.ansiDateTimeError(e);" } else { s"${ev.isNull} = true;" } nullSafeCodeGen(ctx, ev, (year, month, day) => { s""" try { ${ev.value} = $dtu.localDateToDays(java.time.LocalDate.of($year, $month, $day)); } catch (java.time.DateTimeException e) { $failOnErrorBranch }""" }) } override def prettyName: String = "make_date" override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): MakeDate = copy(year = newFirst, month = newSecond, day = newThird) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(year, month, day, hour, min, sec) - Create local date-time from year, month, day, hour, min, sec fields. If the configuration `spark.sql.ansi.enabled` is false, the function returns NULL on invalid inputs. Otherwise, it will throw an error instead.", arguments = """ Arguments: * year - the year to represent, from 1 to 9999 * month - the month-of-year to represent, from 1 (January) to 12 (December) * day - the day-of-month to represent, from 1 to 31 * hour - the hour-of-day to represent, from 0 to 23 * min - the minute-of-hour to represent, from 0 to 59 * sec - the second-of-minute and its micro-fraction to represent, from 0 to 60. If the sec argument equals to 60, the seconds field is set to 0 and 1 minute is added to the final timestamp. """, examples = """ Examples: > SELECT _FUNC_(2014, 12, 28, 6, 30, 45.887); 2014-12-28 06:30:45.887 > SELECT _FUNC_(2019, 6, 30, 23, 59, 60); 2019-07-01 00:00:00 > SELECT _FUNC_(null, 7, 22, 15, 30, 0); NULL """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit object MakeTimestampNTZExpressionBuilder extends ExpressionBuilder { override def build(funcName: String, expressions: Seq[Expression]): Expression = { val numArgs = expressions.length if (numArgs == 6) { MakeTimestamp( expressions(0), expressions(1), expressions(2), expressions(3), expressions(4), expressions(5), dataType = TimestampNTZType) } else { throw QueryCompilationErrors.invalidFunctionArgumentNumberError(Seq(6), funcName, numArgs) } } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(year, month, day, hour, min, sec[, timezone]) - Create the current timestamp with local time zone from year, month, day, hour, min, sec and timezone fields. If the configuration `spark.sql.ansi.enabled` is false, the function returns NULL on invalid inputs. Otherwise, it will throw an error instead.", arguments = """ Arguments: * year - the year to represent, from 1 to 9999 * month - the month-of-year to represent, from 1 (January) to 12 (December) * day - the day-of-month to represent, from 1 to 31 * hour - the hour-of-day to represent, from 0 to 23 * min - the minute-of-hour to represent, from 0 to 59 * sec - the second-of-minute and its micro-fraction to represent, from 0 to 60. If the sec argument equals to 60, the seconds field is set to 0 and 1 minute is added to the final timestamp. * timezone - the time zone identifier. For example, CET, UTC and etc. """, examples = """ Examples: > SELECT _FUNC_(2014, 12, 28, 6, 30, 45.887); 2014-12-28 06:30:45.887 > SELECT _FUNC_(2014, 12, 28, 6, 30, 45.887, 'CET'); 2014-12-27 21:30:45.887 > SELECT _FUNC_(2019, 6, 30, 23, 59, 60); 2019-07-01 00:00:00 > SELECT _FUNC_(null, 7, 22, 15, 30, 0); NULL """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit object MakeTimestampLTZExpressionBuilder extends ExpressionBuilder { override def build(funcName: String, expressions: Seq[Expression]): Expression = { val numArgs = expressions.length if (numArgs == 6 || numArgs == 7) { MakeTimestamp( expressions(0), expressions(1), expressions(2), expressions(3), expressions(4), expressions(5), expressions.drop(6).lastOption, dataType = TimestampType) } else { throw QueryCompilationErrors.invalidFunctionArgumentNumberError(Seq(6), funcName, numArgs) } } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(year, month, day, hour, min, sec[, timezone]) - Create timestamp from year, month, day, hour, min, sec and timezone fields. The result data type is consistent with the value of configuration `spark.sql.timestampType`. If the configuration `spark.sql.ansi.enabled` is false, the function returns NULL on invalid inputs. Otherwise, it will throw an error instead.", arguments = """ Arguments: * year - the year to represent, from 1 to 9999 * month - the month-of-year to represent, from 1 (January) to 12 (December) * day - the day-of-month to represent, from 1 to 31 * hour - the hour-of-day to represent, from 0 to 23 * min - the minute-of-hour to represent, from 0 to 59 * sec - the second-of-minute and its micro-fraction to represent, from 0 to 60. The value can be either an integer like 13 , or a fraction like 13.123. If the sec argument equals to 60, the seconds field is set to 0 and 1 minute is added to the final timestamp. * timezone - the time zone identifier. For example, CET, UTC and etc. """, examples = """ Examples: > SELECT _FUNC_(2014, 12, 28, 6, 30, 45.887); 2014-12-28 06:30:45.887 > SELECT _FUNC_(2014, 12, 28, 6, 30, 45.887, 'CET'); 2014-12-27 21:30:45.887 > SELECT _FUNC_(2019, 6, 30, 23, 59, 60); 2019-07-01 00:00:00 > SELECT _FUNC_(2019, 6, 30, 23, 59, 1); 2019-06-30 23:59:01 > SELECT _FUNC_(null, 7, 22, 15, 30, 0); NULL """, group = "datetime_funcs", since = "3.0.0") // scalastyle:on line.size.limit case class MakeTimestamp( year: Expression, month: Expression, day: Expression, hour: Expression, min: Expression, sec: Expression, timezone: Option[Expression] = None, timeZoneId: Option[String] = None, failOnError: Boolean = SQLConf.get.ansiEnabled, override val dataType: DataType = SQLConf.get.timestampType) extends SeptenaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes with NullIntolerant { def this( year: Expression, month: Expression, day: Expression, hour: Expression, min: Expression, sec: Expression) = { this(year, month, day, hour, min, sec, None, None, SQLConf.get.ansiEnabled, SQLConf.get.timestampType) } def this( year: Expression, month: Expression, day: Expression, hour: Expression, min: Expression, sec: Expression, timezone: Expression) = { this(year, month, day, hour, min, sec, Some(timezone), None, SQLConf.get.ansiEnabled, SQLConf.get.timestampType) } override def children: Seq[Expression] = Seq(year, month, day, hour, min, sec) ++ timezone // Accept `sec` as DecimalType to avoid loosing precision of microseconds while converting // them to the fractional part of `sec`. For accepts IntegerType as `sec` and integer can be // casted into decimal safely, we use DecimalType(16, 6) which is wider than DecimalType(10, 0). override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType, IntegerType, IntegerType, IntegerType, IntegerType, DecimalType(16, 6)) ++ timezone.map(_ => StringType) override def nullable: Boolean = if (failOnError) children.exists(_.nullable) else true override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) private def toMicros( year: Int, month: Int, day: Int, hour: Int, min: Int, secAndMicros: Decimal, zoneId: ZoneId): Any = { try { assert(secAndMicros.scale == 6, s"Seconds fraction must have 6 digits for microseconds but got ${secAndMicros.scale}") val unscaledSecFrac = secAndMicros.toUnscaledLong val totalMicros = unscaledSecFrac.toInt // 8 digits cannot overflow Int val seconds = Math.floorDiv(totalMicros, MICROS_PER_SECOND.toInt) val nanos = Math.floorMod(totalMicros, MICROS_PER_SECOND.toInt) * NANOS_PER_MICROS.toInt val ldt = if (seconds == 60) { if (nanos == 0) { // This case of sec = 60 and nanos = 0 is supported for compatibility with PostgreSQL LocalDateTime.of(year, month, day, hour, min, 0, 0).plusMinutes(1) } else { throw QueryExecutionErrors.invalidFractionOfSecondError() } } else { LocalDateTime.of(year, month, day, hour, min, seconds, nanos) } if (dataType == TimestampType) { instantToMicros(ldt.atZone(zoneId).toInstant) } else { localDateTimeToMicros(ldt) } } catch { case e: SparkDateTimeException if failOnError => throw e case e: DateTimeException if failOnError => throw QueryExecutionErrors.ansiDateTimeError(e) case _: DateTimeException => null } } override def nullSafeEval( year: Any, month: Any, day: Any, hour: Any, min: Any, sec: Any, timezone: Option[Any]): Any = { val zid = timezone .map(tz => DateTimeUtils.getZoneId(tz.asInstanceOf[UTF8String].toString)) .getOrElse(zoneId) toMicros( year.asInstanceOf[Int], month.asInstanceOf[Int], day.asInstanceOf[Int], hour.asInstanceOf[Int], min.asInstanceOf[Int], sec.asInstanceOf[Decimal], zid) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName) val d = Decimal.getClass.getName.stripSuffix("$") val failOnErrorBranch = if (failOnError) { "throw QueryExecutionErrors.ansiDateTimeError(e);" } else { s"${ev.isNull} = true;" } val failOnSparkErrorBranch = if (failOnError) "throw e;" else s"${ev.isNull} = true;" nullSafeCodeGen(ctx, ev, (year, month, day, hour, min, secAndNanos, timezone) => { val zoneId = timezone.map(tz => s"$dtu.getZoneId(${tz}.toString())").getOrElse(zid) val toMicrosCode = if (dataType == TimestampType) { s""" |java.time.Instant instant = ldt.atZone($zoneId).toInstant(); |${ev.value} = $dtu.instantToMicros(instant); |""".stripMargin } else { s"${ev.value} = $dtu.localDateTimeToMicros(ldt);" } s""" try { org.apache.spark.sql.types.Decimal secFloor = $secAndNanos.floor(); org.apache.spark.sql.types.Decimal nanosPerSec = $d$$.MODULE$$.apply(1000000000L, 10, 0); int nanos = (($secAndNanos.$$minus(secFloor)).$$times(nanosPerSec)).toInt(); int seconds = secFloor.toInt(); java.time.LocalDateTime ldt; if (seconds == 60) { if (nanos == 0) { ldt = java.time.LocalDateTime.of( $year, $month, $day, $hour, $min, 0, 0).plusMinutes(1); } else { throw QueryExecutionErrors.invalidFractionOfSecondError(); } } else { ldt = java.time.LocalDateTime.of($year, $month, $day, $hour, $min, seconds, nanos); } $toMicrosCode } catch (org.apache.spark.SparkDateTimeException e) { $failOnSparkErrorBranch } catch (java.time.DateTimeException e) { $failOnErrorBranch }""" }) } override def nodeName: String = "make_timestamp" // override def children: Seq[Expression] = Seq(year, month, day, hour, min, sec) ++ timezone override protected def withNewChildrenInternal( newChildren: IndexedSeq[Expression]): MakeTimestamp = { val timezoneOpt = if (timezone.isDefined) Some(newChildren(6)) else None copy( year = newChildren(0), month = newChildren(1), day = newChildren(2), hour = newChildren(3), min = newChildren(4), sec = newChildren(5), timezone = timezoneOpt) } } object DatePart { def parseExtractField( extractField: String, source: Expression): Expression = extractField.toUpperCase(Locale.ROOT) match { case "YEAR" | "Y" | "YEARS" | "YR" | "YRS" => Year(source) case "YEAROFWEEK" => YearOfWeek(source) case "QUARTER" | "QTR" => Quarter(source) case "MONTH" | "MON" | "MONS" | "MONTHS" => Month(source) case "WEEK" | "W" | "WEEKS" => WeekOfYear(source) case "DAY" | "D" | "DAYS" => DayOfMonth(source) case "DAYOFWEEK" | "DOW" => DayOfWeek(source) case "DAYOFWEEK_ISO" | "DOW_ISO" => Add(WeekDay(source), Literal(1)) case "DOY" => DayOfYear(source) case "HOUR" | "H" | "HOURS" | "HR" | "HRS" => Hour(source) case "MINUTE" | "M" | "MIN" | "MINS" | "MINUTES" => Minute(source) case "SECOND" | "S" | "SEC" | "SECONDS" | "SECS" => SecondWithFraction(source) case _ => throw QueryCompilationErrors.literalTypeUnsupportedForSourceTypeError(extractField, source) } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(field, source) - Extracts a part of the date/timestamp or interval source.", arguments = """ Arguments: * field - selects which part of the source should be extracted, and supported string values are as same as the fields of the equivalent function `EXTRACT`. * source - a date/timestamp or interval column from where `field` should be extracted """, examples = """ Examples: > SELECT _FUNC_('YEAR', TIMESTAMP '2019-08-12 01:00:00.123456'); 2019 > SELECT _FUNC_('week', timestamp'2019-08-12 01:00:00.123456'); 33 > SELECT _FUNC_('doy', DATE'2019-08-12'); 224 > SELECT _FUNC_('SECONDS', timestamp'2019-10-01 00:00:01.000001'); 1.000001 > SELECT _FUNC_('days', interval 5 days 3 hours 7 minutes); 5 > SELECT _FUNC_('seconds', interval 5 hours 30 seconds 1 milliseconds 1 microseconds); 30.001001 > SELECT _FUNC_('MONTH', INTERVAL '2021-11' YEAR TO MONTH); 11 > SELECT _FUNC_('MINUTE', INTERVAL '123 23:55:59.002001' DAY TO SECOND); 55 """, note = """ The _FUNC_ function is equivalent to the SQL-standard function `EXTRACT(field FROM source)` """, group = "datetime_funcs", since = "3.0.0") // scalastyle:on line.size.limit object DatePartExpressionBuilder extends ExpressionBuilder { override def build(funcName: String, expressions: Seq[Expression]): Expression = { val numArgs = expressions.length if (numArgs == 2) { val field = expressions(0) val source = expressions(1) Extract(field, source, Extract.createExpr(funcName, field, source)) } else { throw QueryCompilationErrors.invalidFunctionArgumentNumberError(Seq(2), funcName, numArgs) } } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(field FROM source) - Extracts a part of the date/timestamp or interval source.", arguments = """ Arguments: * field - selects which part of the source should be extracted - Supported string values of `field` for dates and timestamps are(case insensitive): - "YEAR", ("Y", "YEARS", "YR", "YRS") - the year field - "YEAROFWEEK" - the ISO 8601 week-numbering year that the datetime falls in. For example, 2005-01-02 is part of the 53rd week of year 2004, so the result is 2004 - "QUARTER", ("QTR") - the quarter (1 - 4) of the year that the datetime falls in - "MONTH", ("MON", "MONS", "MONTHS") - the month field (1 - 12) - "WEEK", ("W", "WEEKS") - the number of the ISO 8601 week-of-week-based-year. A week is considered to start on a Monday and week 1 is the first week with >3 days. In the ISO week-numbering system, it is possible for early-January dates to be part of the 52nd or 53rd week of the previous year, and for late-December dates to be part of the first week of the next year. For example, 2005-01-02 is part of the 53rd week of year 2004, while 2012-12-31 is part of the first week of 2013 - "DAY", ("D", "DAYS") - the day of the month field (1 - 31) - "DAYOFWEEK",("DOW") - the day of the week for datetime as Sunday(1) to Saturday(7) - "DAYOFWEEK_ISO",("DOW_ISO") - ISO 8601 based day of the week for datetime as Monday(1) to Sunday(7) - "DOY" - the day of the year (1 - 365/366) - "HOUR", ("H", "HOURS", "HR", "HRS") - The hour field (0 - 23) - "MINUTE", ("M", "MIN", "MINS", "MINUTES") - the minutes field (0 - 59) - "SECOND", ("S", "SEC", "SECONDS", "SECS") - the seconds field, including fractional parts - Supported string values of `field` for interval(which consists of `months`, `days`, `microseconds`) are(case insensitive): - "YEAR", ("Y", "YEARS", "YR", "YRS") - the total `months` / 12 - "MONTH", ("MON", "MONS", "MONTHS") - the total `months` % 12 - "DAY", ("D", "DAYS") - the `days` part of interval - "HOUR", ("H", "HOURS", "HR", "HRS") - how many hours the `microseconds` contains - "MINUTE", ("M", "MIN", "MINS", "MINUTES") - how many minutes left after taking hours from `microseconds` - "SECOND", ("S", "SEC", "SECONDS", "SECS") - how many second with fractions left after taking hours and minutes from `microseconds` * source - a date/timestamp or interval column from where `field` should be extracted """, examples = """ Examples: > SELECT _FUNC_(YEAR FROM TIMESTAMP '2019-08-12 01:00:00.123456'); 2019 > SELECT _FUNC_(week FROM timestamp'2019-08-12 01:00:00.123456'); 33 > SELECT _FUNC_(doy FROM DATE'2019-08-12'); 224 > SELECT _FUNC_(SECONDS FROM timestamp'2019-10-01 00:00:01.000001'); 1.000001 > SELECT _FUNC_(days FROM interval 5 days 3 hours 7 minutes); 5 > SELECT _FUNC_(seconds FROM interval 5 hours 30 seconds 1 milliseconds 1 microseconds); 30.001001 > SELECT _FUNC_(MONTH FROM INTERVAL '2021-11' YEAR TO MONTH); 11 > SELECT _FUNC_(MINUTE FROM INTERVAL '123 23:55:59.002001' DAY TO SECOND); 55 """, note = """ The _FUNC_ function is equivalent to `date_part(field, source)`. """, group = "datetime_funcs", since = "3.0.0") // scalastyle:on line.size.limit case class Extract(field: Expression, source: Expression, replacement: Expression) extends RuntimeReplaceable with InheritAnalysisRules { def this(field: Expression, source: Expression) = this(field, source, Extract.createExpr("extract", field, source)) override def parameters: Seq[Expression] = Seq(field, source) override def makeSQLString(childrenSQL: Seq[String]): String = { getTagValue(FunctionRegistry.FUNC_ALIAS) match { case Some("date_part") => s"$prettyName(${childrenSQL.mkString(", ")})" case _ => s"$prettyName(${childrenSQL.mkString(" FROM ")})" } } override protected def withNewChildInternal(newChild: Expression): Expression = { copy(replacement = newChild) } } object Extract { def createExpr(funcName: String, field: Expression, source: Expression): Expression = { // both string and null literals are allowed. if ((field.dataType == StringType || field.dataType == NullType) && field.foldable) { val fieldStr = field.eval().asInstanceOf[UTF8String] if (fieldStr == null) { Literal(null, DoubleType) } else { source.dataType match { case _: AnsiIntervalType | CalendarIntervalType => ExtractIntervalPart.parseExtractField(fieldStr.toString, source) case _ => DatePart.parseExtractField(fieldStr.toString, source) } } } else { throw QueryCompilationErrors.requireLiteralParameter(funcName, "field", "string") } } } /** * Returns the interval from `right` to `left` timestamps. * - When the SQL config `spark.sql.legacy.interval.enabled` is `true`, * it returns `CalendarIntervalType` in which the months` and `day` field is set to 0 and * the `microseconds` field is initialized to the microsecond difference between * the given timestamps. * - Otherwise the expression returns `DayTimeIntervalType` with the difference in microseconds * between given timestamps. */ case class SubtractTimestamps( left: Expression, right: Expression, legacyInterval: Boolean, timeZoneId: Option[String] = None) extends BinaryExpression with TimeZoneAwareExpression with ExpectsInputTypes with NullIntolerant { def this(endTimestamp: Expression, startTimestamp: Expression) = this(endTimestamp, startTimestamp, SQLConf.get.legacyIntervalEnabled) override def inputTypes: Seq[AbstractDataType] = Seq(AnyTimestampType, AnyTimestampType) override def dataType: DataType = if (legacyInterval) CalendarIntervalType else DayTimeIntervalType() override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) @transient private lazy val zoneIdInEval: ZoneId = zoneIdForType(left.dataType) @transient private lazy val evalFunc: (Long, Long) => Any = legacyInterval match { case false => (leftMicros, rightMicros) => subtractTimestamps(leftMicros, rightMicros, zoneIdInEval) case true => (leftMicros, rightMicros) => new CalendarInterval(0, 0, leftMicros - rightMicros) } override def nullSafeEval(leftMicros: Any, rightMicros: Any): Any = { evalFunc(leftMicros.asInstanceOf[Long], rightMicros.asInstanceOf[Long]) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = legacyInterval match { case false => val zid = ctx.addReferenceObj("zoneId", zoneIdInEval, classOf[ZoneId].getName) val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, (l, r) => s"""$dtu.subtractTimestamps($l, $r, $zid)""") case true => defineCodeGen(ctx, ev, (end, start) => s"new org.apache.spark.unsafe.types.CalendarInterval(0, 0, $end - $start)") } override def toString: String = s"($left - $right)" override def sql: String = s"(${left.sql} - ${right.sql})" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): SubtractTimestamps = copy(left = newLeft, right = newRight) } object SubtractTimestamps { def apply(left: Expression, right: Expression): SubtractTimestamps = { new SubtractTimestamps(left, right) } } /** * Returns the interval from the `left` date (inclusive) to the `right` date (exclusive). * - When the SQL config `spark.sql.legacy.interval.enabled` is `true`, * it returns `CalendarIntervalType` in which the `microseconds` field is set to 0 and * the `months` and `days` fields are initialized to the difference between the given dates. * - Otherwise the expression returns `DayTimeIntervalType` with the difference in days * between the given dates. */ case class SubtractDates( left: Expression, right: Expression, legacyInterval: Boolean) extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant { def this(left: Expression, right: Expression) = this(left, right, SQLConf.get.legacyIntervalEnabled) override def inputTypes: Seq[AbstractDataType] = Seq(DateType, DateType) override def dataType: DataType = { if (legacyInterval) CalendarIntervalType else DayTimeIntervalType(DAY) } @transient private lazy val evalFunc: (Int, Int) => Any = legacyInterval match { case false => (leftDays: Int, rightDays: Int) => Math.multiplyExact(Math.subtractExact(leftDays, rightDays), MICROS_PER_DAY) case true => (leftDays: Int, rightDays: Int) => subtractDates(leftDays, rightDays) } override def nullSafeEval(leftDays: Any, rightDays: Any): Any = { evalFunc(leftDays.asInstanceOf[Int], rightDays.asInstanceOf[Int]) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = legacyInterval match { case false => val m = classOf[Math].getName defineCodeGen(ctx, ev, (leftDays, rightDays) => s"$m.multiplyExact($m.subtractExact($leftDays, $rightDays), ${MICROS_PER_DAY}L)") case true => defineCodeGen(ctx, ev, (leftDays, rightDays) => { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") s"$dtu.subtractDates($leftDays, $rightDays)" }) } override def toString: String = s"($left - $right)" override def sql: String = s"(${left.sql} - ${right.sql})" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): SubtractDates = copy(left = newLeft, right = newRight) } object SubtractDates { def apply(left: Expression, right: Expression): SubtractDates = new SubtractDates(left, right) } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(sourceTz, targetTz, sourceTs) - Converts the timestamp without time zone `sourceTs` from the `sourceTz` time zone to `targetTz`. ", arguments = """ Arguments: * sourceTz - the time zone for the input timestamp * targetTz - the time zone to which the input timestamp should be converted * sourceTs - a timestamp without time zone """, examples = """ Examples: > SELECT _FUNC_('Europe/Amsterdam', 'America/Los_Angeles', timestamp_ntz'2021-12-06 00:00:00'); 2021-12-05 15:00:00 """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit case class ConvertTimezone( sourceTz: Expression, targetTz: Expression, sourceTs: Expression) extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant { override def first: Expression = sourceTz override def second: Expression = targetTz override def third: Expression = sourceTs override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType, TimestampNTZType) override def dataType: DataType = TimestampNTZType override def nullSafeEval(srcTz: Any, tgtTz: Any, micros: Any): Any = { DateTimeUtils.convertTimestampNtzToAnotherTz( srcTz.asInstanceOf[UTF8String].toString, tgtTz.asInstanceOf[UTF8String].toString, micros.asInstanceOf[Long]) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") defineCodeGen(ctx, ev, (srcTz, tgtTz, micros) => s"""$dtu.convertTimestampNtzToAnotherTz($srcTz.toString(), $tgtTz.toString(), $micros)""") } override def prettyName: String = "convert_timezone" override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): ConvertTimezone = { copy(sourceTz = newFirst, targetTz = newSecond, sourceTs = newThird) } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(unit, quantity, timestamp) - Adds the specified number of units to the given timestamp.", arguments = """ Arguments: * unit - this indicates the units of datetime that you want to add. Supported string values of `unit` are (case insensitive): - "YEAR" - "QUARTER" - 3 months - "MONTH" - "WEEK" - 7 days - "DAY", "DAYOFYEAR" - "HOUR" - "MINUTE" - "SECOND" - "MILLISECOND" - "MICROSECOND" * quantity - this is the number of units of time that you want to add. * timestamp - this is a timestamp (w/ or w/o timezone) to which you want to add. """, examples = """ Examples: > SELECT _FUNC_('HOUR', 8, timestamp_ntz'2022-02-11 20:30:00'); 2022-02-12 04:30:00 > SELECT _FUNC_('MONTH', 1, timestamp_ltz'2022-01-31 00:00:00'); 2022-02-28 00:00:00 > SELECT _FUNC_(SECOND, -10, date'2022-01-01'); 2021-12-31 23:59:50 > SELECT _FUNC_(YEAR, 10, timestamp'2000-01-01 01:02:03.123456'); 2010-01-01 01:02:03.123456 """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit case class TimestampAdd( unit: Expression, quantity: Expression, timestamp: Expression, timeZoneId: Option[String] = None) extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant with TimeZoneAwareExpression { def this(unit: Expression, quantity: Expression, timestamp: Expression) = this(unit, quantity, timestamp, None) override def first: Expression = unit override def second: Expression = quantity override def third: Expression = timestamp override def inputTypes: Seq[AbstractDataType] = Seq(StringType, IntegerType, AnyTimestampType) override def dataType: DataType = timestamp.dataType override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) @transient private lazy val zoneIdInEval: ZoneId = zoneIdForType(timestamp.dataType) override def nullSafeEval(u: Any, q: Any, micros: Any): Any = { DateTimeUtils.timestampAdd( u.asInstanceOf[UTF8String].toString, q.asInstanceOf[Int], micros.asInstanceOf[Long], zoneIdInEval) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val zid = ctx.addReferenceObj("zoneId", zoneIdInEval, classOf[ZoneId].getName) defineCodeGen(ctx, ev, (u, q, micros) => s"""$dtu.timestampAdd($u.toString(), $q, $micros, $zid)""") } override def prettyName: String = "timestampadd" override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): TimestampAdd = { copy(unit = newFirst, quantity = newSecond, timestamp = newThird) } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(unit, startTimestamp, endTimestamp) - Gets the difference between the timestamps `endTimestamp` and `startTimestamp` in the specified units by truncating the fraction part.", arguments = """ Arguments: * unit - this indicates the units of the difference between the given timestamps. Supported string values of `unit` are (case insensitive): - "YEAR" - "QUARTER" - 3 months - "MONTH" - "WEEK" - 7 days - "DAY" - "HOUR" - "MINUTE" - "SECOND" - "MILLISECOND" - "MICROSECOND" * startTimestamp - A timestamp which the expression subtracts from `endTimestamp`. * endTimestamp - A timestamp from which the expression subtracts `startTimestamp`. """, examples = """ Examples: > SELECT _FUNC_('HOUR', timestamp_ntz'2022-02-11 20:30:00', timestamp_ntz'2022-02-12 04:30:00'); 8 > SELECT _FUNC_('MONTH', timestamp_ltz'2022-01-01 00:00:00', timestamp_ltz'2022-02-28 00:00:00'); 1 > SELECT _FUNC_(SECOND, date'2022-01-01', timestamp'2021-12-31 23:59:50'); -10 > SELECT _FUNC_(YEAR, timestamp'2000-01-01 01:02:03.123456', timestamp'2010-01-01 01:02:03.123456'); 10 """, group = "datetime_funcs", since = "3.3.0") // scalastyle:on line.size.limit case class TimestampDiff( unit: Expression, startTimestamp: Expression, endTimestamp: Expression, timeZoneId: Option[String] = None) extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant with TimeZoneAwareExpression { def this(unit: Expression, quantity: Expression, timestamp: Expression) = this(unit, quantity, timestamp, None) override def first: Expression = unit override def second: Expression = startTimestamp override def third: Expression = endTimestamp override def inputTypes: Seq[AbstractDataType] = Seq(StringType, TimestampType, TimestampType) override def dataType: DataType = LongType override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression = copy(timeZoneId = Option(timeZoneId)) @transient private lazy val zoneIdInEval: ZoneId = zoneIdForType(endTimestamp.dataType) override def nullSafeEval(u: Any, startMicros: Any, endMicros: Any): Any = { DateTimeUtils.timestampDiff( u.asInstanceOf[UTF8String].toString, startMicros.asInstanceOf[Long], endMicros.asInstanceOf[Long], zoneIdInEval) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val dtu = DateTimeUtils.getClass.getName.stripSuffix("$") val zid = ctx.addReferenceObj("zoneId", zoneIdInEval, classOf[ZoneId].getName) defineCodeGen(ctx, ev, (u, s, e) => s"""$dtu.timestampDiff($u.toString(), $s, $e, $zid)""") } override def prettyName: String = "timestampdiff" override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): TimestampDiff = { copy(unit = newFirst, startTimestamp = newSecond, endTimestamp = newThird) } }
srowen/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/datetimeExpressions.scala
Scala
apache-2.0
120,834
/* * Copyright (C) 2012 Romain Reuillon * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.openmole.plugin.task import org.openmole.core.dsl._ import org.openmole.core.expansion.{ FromContext, ToFromContext } import org.openmole.core.tools.service.OS package external { import java.io._ import org.openmole.core.context.Val import org.openmole.core.workflow.builder.InputOutputBuilder trait ExternalPackage { lazy val inputFiles = new { /** * Copy a file or directory from the dataflow to the task workspace */ def +=[T: ExternalBuilder: InputOutputBuilder](p: Val[File], name: FromContext[String], link: Boolean = false): T ⇒ T = implicitly[ExternalBuilder[T]].inputFiles add External.InputFile(p, name, link) andThen (inputs += p) } lazy val inputFileArrays = new { /** * Copy an array of files or directory from the dataflow to the task workspace. The files * in the array are named prefix$nSuffix where $n i the index of the file in the array. */ def +=[T: ExternalBuilder: InputOutputBuilder](p: Val[Array[File]], prefix: FromContext[String], suffix: FromContext[String] = "", link: Boolean = false): T ⇒ T = (implicitly[ExternalBuilder[T]].inputFileArrays add External.InputFileArray(p, prefix, suffix, link)) andThen (inputs += p) } lazy val outputFiles = new { /** * Get a file generate by the task and inject it in the dataflow * */ def +=[T: ExternalBuilder: InputOutputBuilder](name: FromContext[String], p: Val[File]): T ⇒ T = (implicitly[ExternalBuilder[T]].outputFiles add External.OutputFile(name, p)) andThen (outputs += p) } lazy val resources = new { /** * Copy a file from your computer in the workspace of the task */ def +=[T: ExternalBuilder](file: File, name: OptionalArgument[FromContext[String]] = None, link: Boolean = false, os: OS = OS()): T ⇒ T = implicitly[ExternalBuilder[T]].resources add External.Resource(file, name.getOrElse(file.getName), link = link, os = os) } } } package object external extends ExternalPackage { object EnvironmentVariable { implicit def fromTuple[N, V](tuple: (N, V))(implicit toFromContextN: ToFromContext[N, String], toFromContextV: ToFromContext[V, String]): EnvironmentVariable = EnvironmentVariable(toFromContextN(tuple._1), toFromContextV(tuple._2)) } case class EnvironmentVariable(name: FromContext[String], value: FromContext[String]) trait EnvironmentVariables[T] { def environmentVariables: monocle.Lens[T, Vector[EnvironmentVariable]] } import org.openmole.tool.file._ def directoryContentInformation(directory: File, margin: String = " ") = { def fileInformation(file: File) = { def permissions = { val w = if (file.canWrite) "w" else "" val r = if (file.canRead) "r" else "" val x = if (file.canExecute) "x" else "" s"$r$w$x" } def fileType = if (file.isDirectory) "directory" else if (file.isSymbolicLink) "link" else if (file.isFile) "file" else "unknown" s"""${directory.toPath.relativize(file.toPath)} (type=$fileType, permissions=$permissions)""" } directory.listRecursive(_ ⇒ true).filter(_ != directory).map(fileInformation).map(i ⇒ s"$margin$i").mkString("\\n") } }
openmole/openmole
openmole/plugins/org.openmole.plugin.task.external/src/main/scala/org/openmole/plugin/task/external/package.scala
Scala
agpl-3.0
4,034
package com.github.traviscrawford.spark.dynamodb import java.net.URI import org.apache.hadoop.fs.FileSystem import org.apache.hadoop.fs.Path /** Backup a DynamoDB table as JSON. * * The full table is scanned and the results are stored in the given output path. */ object DynamoBackupJob extends Job { private val region = flag[String]("region", "Region of the DynamoDB table to scan.") private val table = flag[String]("table", "DynamoDB table to scan.") private val totalSegments = flag("totalSegments", 1, "Number of DynamoDB parallel scan segments.") private val pageSize = flag("pageSize", 1000, "Page size of each DynamoDB request.") private val output = flag[String]("output", "Path to write the DynamoDB table backup.") private val overwrite = flag("overwrite", false, "Set to true to overwrite output path.") private val credentials = flag[String]("credentials", "Optional AWS credentials provider class name.") private val rateLimit = flag[Int]("rateLimit", "Max number of read capacity units per second each scan segment will consume.") def run(): Unit = { val maybeCredentials = if (credentials.isDefined) Some(credentials()) else None val maybeRateLimit = if (rateLimit.isDefined) Some(rateLimit()) else None val maybeRegion = if (region.isDefined) Some(region()) else None val awsAccessKey = None val awsSecretKey = None if (overwrite()) deleteOutputPath(output()) DynamoScanner(sc, table(), totalSegments(), pageSize(), maybeCredentials, awsAccessKey, awsSecretKey, maybeRateLimit, maybeRegion).saveAsTextFile(output()) } private def deleteOutputPath(output: String): Unit = { log.info(s"Deleting existing output path $output") FileSystem.get(new URI(output), sc.hadoopConfiguration) .delete(new Path(output), true) } }
traviscrawford/spark-dynamodb
src/main/scala/com/github/traviscrawford/spark/dynamodb/DynamoBackupJob.scala
Scala
apache-2.0
1,833
package com.gu.adapters.http import org.scalatra.ScalatraServlet import org.scalatra.swagger.{ ApiInfo, NativeSwaggerBase, Swagger } class ResourcesApp(implicit val swagger: Swagger) extends ScalatraServlet with NativeSwaggerBase object AvatarApiInfo extends ApiInfo( title = "Guardian Avatar API", description = "Docs for the Avatar API", termsOfServiceUrl = "https//github.com/guardian/avatar", contact = "discussiondev@theguardian.com", license = "To be determined", licenseUrl = "To be added" ) class AvatarSwagger extends Swagger(Swagger.SpecVersion, "1.0.0", AvatarApiInfo)
guardian/discussion-avatar
api/src/main/scala/com/gu/adapters/http/AvatarSwagger.scala
Scala
apache-2.0
595
package fr.laas.fape.constraints.stnu.parser import fr.laas.fape.anml.model.abs.time.TimepointTypeEnum._ import fr.laas.fape.anml.model.concrete.{ContingentConstraint, MinDelayConstraint, TPRef, TemporalConstraint} import fr.laas.fape.anml.pending.IntExpression import scala.util.parsing.combinator.JavaTokenParsers import scala.collection.mutable class STNUParser extends JavaTokenParsers { private val timepointsRecord = mutable.Map[Int,TPRef]() private var optStart: Option[TPRef] = None private var optEnd: Option[TPRef] = None def inWord: Parser[String] = """[a-zA-Z0-9][a-zA-Z0-9_\\-]*""".r //ident def word: Parser[String] = inWord ^^ (w => w.toLowerCase) def id: Parser[Int] = decimalNumber ^^ (x => x.toInt) def delay: Parser[Int] = wholeNumber ^^ (x => x.toInt) def problem: Parser[(List[TPRef],List[TemporalConstraint],Option[TPRef],Option[TPRef])] = "(define"~>timepoints~constraints<~")" ^^ { case tps~edges => (tps, edges, optStart, optEnd) } def timepoints: Parser[List[TPRef]] = "(:timepoints"~>rep(timepoint)<~")" ^^ { case tps => tps.foreach(tp => timepointsRecord.put(tp.id, tp)) tps } def timepoint: Parser[TPRef] = "("~>word~id<~")" ^^ { case typ~id => getTPRef(typ, id) } private def getTPRef(typ: String, id: Int): TPRef = { val tp = new TPRef(id) typ match { case "start" => tp.genre.setType(DISPATCHABLE); optStart = Some(tp) case "end" => tp.genre.setType(DISPATCHABLE); optEnd = Some(tp) case "dispatchable" => tp.genre.setType(DISPATCHABLE) case "structural" => tp.genre.setType(STRUCTURAL) case "contingent" => tp.genre.setType(CONTINGENT) case x => sys.error("Unrecognized timepoint type: "+x) } tp } def constraints: Parser[List[TemporalConstraint]] = "(:constraints"~>rep(constraint)<~")" def constraint: Parser[TemporalConstraint] = "(min-delay"~>id~id~delay<~")" ^^ { case from~to~d => new MinDelayConstraint(timepointsRecord(from), timepointsRecord(to), IntExpression.lit(d)) } | "("~"contingent"~>id~id~delay~delay<~")" ^^ { case from~to~min~max => new ContingentConstraint(timepointsRecord(from),timepointsRecord(to), IntExpression.lit(min), IntExpression.lit(max))} }
athy/fape
constraints/src/main/scala/fr/laas/fape/constraints/stnu/parser/STNUParser.scala
Scala
bsd-2-clause
2,242
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.types import scala.math.Numeric._ import scala.math.Ordering import org.apache.spark.sql.catalyst.util.SQLOrderingUtil import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.types.Decimal.DecimalIsConflicted private[sql] object ByteExactNumeric extends ByteIsIntegral with Ordering.ByteOrdering { private def checkOverflow(res: Int, x: Byte, y: Byte, op: String): Unit = { if (res > Byte.MaxValue || res < Byte.MinValue) { throw QueryExecutionErrors.binaryArithmeticCauseOverflowError(x, op, y) } } override def plus(x: Byte, y: Byte): Byte = { val tmp = x + y checkOverflow(tmp, x, y, "+") tmp.toByte } override def minus(x: Byte, y: Byte): Byte = { val tmp = x - y checkOverflow(tmp, x, y, "-") tmp.toByte } override def times(x: Byte, y: Byte): Byte = { val tmp = x * y checkOverflow(tmp, x, y, "*") tmp.toByte } override def negate(x: Byte): Byte = { if (x == Byte.MinValue) { // if and only if x is Byte.MinValue, overflow can happen throw QueryExecutionErrors.unaryMinusCauseOverflowError(x) } (-x).toByte } } private[sql] object ShortExactNumeric extends ShortIsIntegral with Ordering.ShortOrdering { private def checkOverflow(res: Int, x: Short, y: Short, op: String): Unit = { if (res > Short.MaxValue || res < Short.MinValue) { throw QueryExecutionErrors.binaryArithmeticCauseOverflowError(x, op, y) } } override def plus(x: Short, y: Short): Short = { val tmp = x + y checkOverflow(tmp, x, y, "+") tmp.toShort } override def minus(x: Short, y: Short): Short = { val tmp = x - y checkOverflow(tmp, x, y, "-") tmp.toShort } override def times(x: Short, y: Short): Short = { val tmp = x * y checkOverflow(tmp, x, y, "*") tmp.toShort } override def negate(x: Short): Short = { if (x == Short.MinValue) { // if and only if x is Byte.MinValue, overflow can happen throw QueryExecutionErrors.unaryMinusCauseOverflowError(x) } (-x).toShort } } private[sql] object IntegerExactNumeric extends IntIsIntegral with Ordering.IntOrdering { override def plus(x: Int, y: Int): Int = Math.addExact(x, y) override def minus(x: Int, y: Int): Int = Math.subtractExact(x, y) override def times(x: Int, y: Int): Int = Math.multiplyExact(x, y) override def negate(x: Int): Int = Math.negateExact(x) } private[sql] object LongExactNumeric extends LongIsIntegral with Ordering.LongOrdering { override def plus(x: Long, y: Long): Long = Math.addExact(x, y) override def minus(x: Long, y: Long): Long = Math.subtractExact(x, y) override def times(x: Long, y: Long): Long = Math.multiplyExact(x, y) override def negate(x: Long): Long = Math.negateExact(x) override def toInt(x: Long): Int = if (x == x.toInt) { x.toInt } else { throw QueryExecutionErrors.castingCauseOverflowError(x, "int") } } private[sql] object FloatExactNumeric extends FloatIsFractional { private val intUpperBound = Int.MaxValue private val intLowerBound = Int.MinValue private val longUpperBound = Long.MaxValue private val longLowerBound = Long.MinValue override def toInt(x: Float): Int = { // When casting floating values to integral types, Spark uses the method `Numeric.toInt` // Or `Numeric.toLong` directly. For positive floating values, it is equivalent to `Math.floor`; // for negative floating values, it is equivalent to `Math.ceil`. // So, we can use the condition `Math.floor(x) <= upperBound && Math.ceil(x) >= lowerBound` // to check if the floating value x is in the range of an integral type after rounding. // This condition applies to converting Float/Double value to any integral types. if (Math.floor(x) <= intUpperBound && Math.ceil(x) >= intLowerBound) { x.toInt } else { throw QueryExecutionErrors.castingCauseOverflowError(x, "int") } } override def toLong(x: Float): Long = { if (Math.floor(x) <= longUpperBound && Math.ceil(x) >= longLowerBound) { x.toLong } else { throw QueryExecutionErrors.castingCauseOverflowError(x, "int") } } override def compare(x: Float, y: Float): Int = SQLOrderingUtil.compareFloats(x, y) } private[sql] object DoubleExactNumeric extends DoubleIsFractional { private val intUpperBound = Int.MaxValue private val intLowerBound = Int.MinValue private val longUpperBound = Long.MaxValue private val longLowerBound = Long.MinValue override def toInt(x: Double): Int = { if (Math.floor(x) <= intUpperBound && Math.ceil(x) >= intLowerBound) { x.toInt } else { throw QueryExecutionErrors.castingCauseOverflowError(x, "int") } } override def toLong(x: Double): Long = { if (Math.floor(x) <= longUpperBound && Math.ceil(x) >= longLowerBound) { x.toLong } else { throw QueryExecutionErrors.castingCauseOverflowError(x, "long") } } override def compare(x: Double, y: Double): Int = SQLOrderingUtil.compareDoubles(x, y) } private[sql] object DecimalExactNumeric extends DecimalIsConflicted { override def toInt(x: Decimal): Int = x.roundToInt() override def toLong(x: Decimal): Long = x.roundToLong() }
wangmiao1981/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/types/numerics.scala
Scala
apache-2.0
6,074
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package connectors import play.api.libs.json.{JsObject, Json} import testHelpers.VatRegSpec import uk.gov.hmrc.http.HttpResponse import scala.concurrent.Future class ICLConnectorSpec extends VatRegSpec { class Setup { val jsResponse = Json.obj("link" -> "exampleaddress.co.uk") val successfulResponse = HttpResponse(200, jsResponse.toString) val testConnector: ICLConnector = new ICLConnector( mockHttpClient, mockServicesConfig ) { override val IClInitialiseUrl: String = "example.url" override val IClFEinternal: String = "example.url2" } } "ICLSetup" should { "return a JSObject" in new Setup { mockHttpPOST[JsObject, HttpResponse]("", successfulResponse) val res = await(testConnector.iclSetup(Json.parse("{}").as[JsObject])) res mustBe jsResponse } "return an exception" in new Setup { val exception = new Exception mockHttpFailedPOST[JsObject, HttpResponse]("", exception) intercept[Exception](await(testConnector.iclSetup(Json.parse("{}").as[JsObject]))) } } "ICLGetResult" should { "return a JSObject" in new Setup { mockHttpGET[HttpResponse]("", Future.successful(HttpResponse(200, iclMultipleResults.toString))) val res = await(testConnector.iclGetResult("")) res mustBe iclMultipleResults } "return an Exception" in new Setup { mockHttpGET[HttpResponse]("", Future.failed(new Exception)) intercept[Exception](await(testConnector.iclGetResult(""))) } } }
hmrc/vat-registration-frontend
test/connectors/ICLConnectorSpec.scala
Scala
apache-2.0
2,130
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.util import java.io.IOException import org.apache.hadoop.fs.Path import org.json4s._ import org.json4s.{DefaultFormats, JObject} import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.SparkContext import org.apache.spark.annotation.{DeveloperApi, Since} import org.apache.spark.internal.Logging import org.apache.spark.ml._ import org.apache.spark.ml.classification.{OneVsRest, OneVsRestModel} import org.apache.spark.ml.feature.RFormulaModel import org.apache.spark.ml.param.{ParamPair, Params} import org.apache.spark.ml.tuning.ValidatorParams import org.apache.spark.sql.{SparkSession, SQLContext} import org.apache.spark.util.Utils /** * Trait for `MLWriter` and `MLReader`. */ private[util] sealed trait BaseReadWrite { private var optionSparkSession: Option[SparkSession] = None /** * Sets the Spark SQLContext to use for saving/loading. */ @Since("1.6.0") @deprecated("Use session instead, This method will be removed in 2.2.0.", "2.0.0") def context(sqlContext: SQLContext): this.type = { optionSparkSession = Option(sqlContext.sparkSession) this } /** * Sets the Spark Session to use for saving/loading. */ @Since("2.0.0") def session(sparkSession: SparkSession): this.type = { optionSparkSession = Option(sparkSession) this } /** * Returns the user-specified Spark Session or the default. */ protected final def sparkSession: SparkSession = { if (optionSparkSession.isEmpty) { optionSparkSession = Some(SparkSession.builder().getOrCreate()) } optionSparkSession.get } /** * Returns the user-specified SQL context or the default. */ protected final def sqlContext: SQLContext = sparkSession.sqlContext /** Returns the underlying `SparkContext`. */ protected final def sc: SparkContext = sparkSession.sparkContext } /** * Abstract class for utility classes that can save ML instances. */ @Since("1.6.0") abstract class MLWriter extends BaseReadWrite with Logging { protected var shouldOverwrite: Boolean = false /** * Saves the ML instances to the input path. */ @Since("1.6.0") @throws[IOException]("If the input path already exists but overwrite is not enabled.") def save(path: String): Unit = { val hadoopConf = sc.hadoopConfiguration val outputPath = new Path(path) val fs = outputPath.getFileSystem(hadoopConf) val qualifiedOutputPath = outputPath.makeQualified(fs.getUri, fs.getWorkingDirectory) if (fs.exists(qualifiedOutputPath)) { if (shouldOverwrite) { logInfo(s"Path $path already exists. It will be overwritten.") // TODO: Revert back to the original content if save is not successful. fs.delete(qualifiedOutputPath, true) } else { throw new IOException(s"Path $path already exists. To overwrite it, " + s"please use write.overwrite().save(path) for Scala and use " + s"write().overwrite().save(path) for Java and Python.") } } saveImpl(path) } /** * `save()` handles overwriting and then calls this method. Subclasses should override this * method to implement the actual saving of the instance. */ @Since("1.6.0") protected def saveImpl(path: String): Unit /** * Overwrites if the output path already exists. */ @Since("1.6.0") def overwrite(): this.type = { shouldOverwrite = true this } // override for Java compatibility override def session(sparkSession: SparkSession): this.type = super.session(sparkSession) // override for Java compatibility override def context(sqlContext: SQLContext): this.type = super.session(sqlContext.sparkSession) } /** * Trait for classes that provide `MLWriter`. */ @Since("1.6.0") trait MLWritable { /** * Returns an `MLWriter` instance for this ML instance. */ @Since("1.6.0") def write: MLWriter /** * Saves this ML instance to the input path, a shortcut of `write.save(path)`. */ @Since("1.6.0") @throws[IOException]("If the input path already exists but overwrite is not enabled.") def save(path: String): Unit = write.save(path) } /** * :: DeveloperApi :: * * Helper trait for making simple `Params` types writable. If a `Params` class stores * all data as [[org.apache.spark.ml.param.Param]] values, then extending this trait will provide * a default implementation of writing saved instances of the class. * This only handles simple [[org.apache.spark.ml.param.Param]] types; e.g., it will not handle * [[org.apache.spark.sql.Dataset]]. * * @see `DefaultParamsReadable`, the counterpart to this trait */ @DeveloperApi trait DefaultParamsWritable extends MLWritable { self: Params => override def write: MLWriter = new DefaultParamsWriter(this) } /** * Abstract class for utility classes that can load ML instances. * * @tparam T ML instance type */ @Since("1.6.0") abstract class MLReader[T] extends BaseReadWrite { /** * Loads the ML component from the input path. */ @Since("1.6.0") def load(path: String): T // override for Java compatibility override def session(sparkSession: SparkSession): this.type = super.session(sparkSession) // override for Java compatibility override def context(sqlContext: SQLContext): this.type = super.session(sqlContext.sparkSession) } /** * Trait for objects that provide `MLReader`. * * @tparam T ML instance type */ @Since("1.6.0") trait MLReadable[T] { /** * Returns an `MLReader` instance for this class. */ @Since("1.6.0") def read: MLReader[T] /** * Reads an ML instance from the input path, a shortcut of `read.load(path)`. * * @note Implementing classes should override this to be Java-friendly. */ @Since("1.6.0") def load(path: String): T = read.load(path) } /** * :: DeveloperApi :: * * Helper trait for making simple `Params` types readable. If a `Params` class stores * all data as [[org.apache.spark.ml.param.Param]] values, then extending this trait will provide * a default implementation of reading saved instances of the class. * This only handles simple [[org.apache.spark.ml.param.Param]] types; e.g., it will not handle * [[org.apache.spark.sql.Dataset]]. * * @tparam T ML instance type * @see `DefaultParamsWritable`, the counterpart to this trait */ @DeveloperApi trait DefaultParamsReadable[T] extends MLReadable[T] { override def read: MLReader[T] = new DefaultParamsReader[T] } /** * Default `MLWriter` implementation for transformers and estimators that contain basic * (json4s-serializable) params and no data. This will not handle more complex params or types with * data (e.g., models with coefficients). * * @param instance object to save */ private[ml] class DefaultParamsWriter(instance: Params) extends MLWriter { override protected def saveImpl(path: String): Unit = { DefaultParamsWriter.saveMetadata(instance, path, sc) } } private[ml] object DefaultParamsWriter { /** * Saves metadata + Params to: path + "/metadata" * - class * - timestamp * - sparkVersion * - uid * - paramMap * - (optionally, extra metadata) * * @param extraMetadata Extra metadata to be saved at same level as uid, paramMap, etc. * @param paramMap If given, this is saved in the "paramMap" field. * Otherwise, all [[org.apache.spark.ml.param.Param]]s are encoded using * [[org.apache.spark.ml.param.Param.jsonEncode()]]. */ def saveMetadata( instance: Params, path: String, sc: SparkContext, extraMetadata: Option[JObject] = None, paramMap: Option[JValue] = None): Unit = { val metadataPath = new Path(path, "metadata").toString val metadataJson = getMetadataToSave(instance, sc, extraMetadata, paramMap) sc.parallelize(Seq(metadataJson), 1).saveAsTextFile(metadataPath) } /** * Helper for [[saveMetadata()]] which extracts the JSON to save. * This is useful for ensemble models which need to save metadata for many sub-models. * * @see [[saveMetadata()]] for details on what this includes. */ def getMetadataToSave( instance: Params, sc: SparkContext, extraMetadata: Option[JObject] = None, paramMap: Option[JValue] = None): String = { val uid = instance.uid val cls = instance.getClass.getName val params = instance.extractParamMap().toSeq.asInstanceOf[Seq[ParamPair[Any]]] val jsonParams = paramMap.getOrElse(render(params.map { case ParamPair(p, v) => p.name -> parse(p.jsonEncode(v)) }.toList)) val basicMetadata = ("class" -> cls) ~ ("timestamp" -> System.currentTimeMillis()) ~ ("sparkVersion" -> sc.version) ~ ("uid" -> uid) ~ ("paramMap" -> jsonParams) val metadata = extraMetadata match { case Some(jObject) => basicMetadata ~ jObject case None => basicMetadata } val metadataJson: String = compact(render(metadata)) metadataJson } } /** * Default `MLReader` implementation for transformers and estimators that contain basic * (json4s-serializable) params and no data. This will not handle more complex params or types with * data (e.g., models with coefficients). * * @tparam T ML instance type * TODO: Consider adding check for correct class name. */ private[ml] class DefaultParamsReader[T] extends MLReader[T] { override def load(path: String): T = { val metadata = DefaultParamsReader.loadMetadata(path, sc) val cls = Utils.classForName(metadata.className) val instance = cls.getConstructor(classOf[String]).newInstance(metadata.uid).asInstanceOf[Params] DefaultParamsReader.getAndSetParams(instance, metadata) instance.asInstanceOf[T] } } private[ml] object DefaultParamsReader { /** * All info from metadata file. * * @param params paramMap, as a `JValue` * @param metadata All metadata, including the other fields * @param metadataJson Full metadata file String (for debugging) */ case class Metadata( className: String, uid: String, timestamp: Long, sparkVersion: String, params: JValue, metadata: JValue, metadataJson: String) { /** * Get the JSON value of the [[org.apache.spark.ml.param.Param]] of the given name. * This can be useful for getting a Param value before an instance of `Params` * is available. */ def getParamValue(paramName: String): JValue = { implicit val format = DefaultFormats params match { case JObject(pairs) => val values = pairs.filter { case (pName, jsonValue) => pName == paramName }.map(_._2) assert(values.length == 1, s"Expected one instance of Param '$paramName' but found" + s" ${values.length} in JSON Params: " + pairs.map(_.toString).mkString(", ")) values.head case _ => throw new IllegalArgumentException( s"Cannot recognize JSON metadata: $metadataJson.") } } } /** * Load metadata saved using [[DefaultParamsWriter.saveMetadata()]] * * @param expectedClassName If non empty, this is checked against the loaded metadata. * @throws IllegalArgumentException if expectedClassName is specified and does not match metadata */ def loadMetadata(path: String, sc: SparkContext, expectedClassName: String = ""): Metadata = { val metadataPath = new Path(path, "metadata").toString val metadataStr = sc.textFile(metadataPath, 1).first() parseMetadata(metadataStr, expectedClassName) } /** * Parse metadata JSON string produced by [[DefaultParamsWriter.getMetadataToSave()]]. * This is a helper function for [[loadMetadata()]]. * * @param metadataStr JSON string of metadata * @param expectedClassName If non empty, this is checked against the loaded metadata. * @throws IllegalArgumentException if expectedClassName is specified and does not match metadata */ def parseMetadata(metadataStr: String, expectedClassName: String = ""): Metadata = { val metadata = parse(metadataStr) implicit val format = DefaultFormats val className = (metadata \\ "class").extract[String] val uid = (metadata \\ "uid").extract[String] val timestamp = (metadata \\ "timestamp").extract[Long] val sparkVersion = (metadata \\ "sparkVersion").extract[String] val params = metadata \\ "paramMap" if (expectedClassName.nonEmpty) { require(className == expectedClassName, s"Error loading metadata: Expected class name" + s" $expectedClassName but found class name $className") } Metadata(className, uid, timestamp, sparkVersion, params, metadata, metadataStr) } /** * Extract Params from metadata, and set them in the instance. * This works if all Params implement [[org.apache.spark.ml.param.Param.jsonDecode()]]. * TODO: Move to [[Metadata]] method */ def getAndSetParams(instance: Params, metadata: Metadata): Unit = { implicit val format = DefaultFormats metadata.params match { case JObject(pairs) => pairs.foreach { case (paramName, jsonValue) => val param = instance.getParam(paramName) val value = param.jsonDecode(compact(render(jsonValue))) instance.set(param, value) } case _ => throw new IllegalArgumentException( s"Cannot recognize JSON metadata: ${metadata.metadataJson}.") } } /** * Load a `Params` instance from the given path, and return it. * This assumes the instance implements [[MLReadable]]. */ def loadParamsInstance[T](path: String, sc: SparkContext): T = { val metadata = DefaultParamsReader.loadMetadata(path, sc) val cls = Utils.classForName(metadata.className) cls.getMethod("read").invoke(null).asInstanceOf[MLReader[T]].load(path) } } /** * Default Meta-Algorithm read and write implementation. */ private[ml] object MetaAlgorithmReadWrite { /** * Examine the given estimator (which may be a compound estimator) and extract a mapping * from UIDs to corresponding `Params` instances. */ def getUidMap(instance: Params): Map[String, Params] = { val uidList = getUidMapImpl(instance) val uidMap = uidList.toMap if (uidList.size != uidMap.size) { throw new RuntimeException(s"${instance.getClass.getName}.load found a compound estimator" + s" with stages with duplicate UIDs. List of UIDs: ${uidList.map(_._1).mkString(", ")}.") } uidMap } private def getUidMapImpl(instance: Params): List[(String, Params)] = { val subStages: Array[Params] = instance match { case p: Pipeline => p.getStages.asInstanceOf[Array[Params]] case pm: PipelineModel => pm.stages.asInstanceOf[Array[Params]] case v: ValidatorParams => Array(v.getEstimator, v.getEvaluator) case ovr: OneVsRest => Array(ovr.getClassifier) case ovrModel: OneVsRestModel => Array(ovrModel.getClassifier) ++ ovrModel.models case rformModel: RFormulaModel => Array(rformModel.pipelineModel) case _: Params => Array.empty[Params] } val subStageMaps = subStages.flatMap(getUidMapImpl) List((instance.uid, instance)) ++ subStageMaps } }
milliman/spark
mllib/src/main/scala/org/apache/spark/ml/util/ReadWrite.scala
Scala
apache-2.0
16,035
package fpinscala.laziness import Stream._ import scala.annotation.tailrec trait Stream[+A] { def foldRight[B](z: => B)(f: (A, => B) => B): B = // The arrow `=>` in front of the argument type `B` means that the function `f` takes its second argument by name and may choose not to evaluate it. this match { case Cons(h,t) => f(h(), t().foldRight(z)(f)) // If `f` doesn't evaluate its second argument, the recursion never occurs. case _ => z } def exists(p: A => Boolean): Boolean = foldRight(false)((a, b) => p(a) || b) // Here `b` is the unevaluated recursive step that folds the tail of the stream. If `p(a)` returns `true`, `b` will never be evaluated and the computation terminates early. @annotation.tailrec final def find(f: A => Boolean): Option[A] = this match { case Empty => None case Cons(h, t) => if (f(h())) Some(h()) else t().find(f) } def take(n: Int): Stream[A] = this match { case Empty => empty case Cons(h,t) => if (n <= 0) empty else cons(h(), t().take(n-1)) } def drop(n: Int): Stream[A] = this match { case Empty => empty case Cons(h,t) => if (n <= 0) this else t().drop(n-1) } def takeWhile(p: A => Boolean): Stream[A] = this match { case Cons(h,t) if p(h()) => cons(h(), t() takeWhile p) case _ => empty } // Note how we need to type annotation on the empty zero value, otherwise the h in cons(h, t) does not type check - why? def takeWhileUsingFoldRight(p: A => Boolean): Stream[A] = foldRight(empty[A])((h, t) => if (p(h)) cons(h, t) else empty) /* Since `&&` is non-strict in its second argument, this terminates the traversal as soon as a nonmatching element is found. Remember how this expands out: With a Stream(1, 2, 3) and predicate of (_ < 4) true && (true && (true && zero_value)) */ def forAll(p: A => Boolean): Boolean = foldRight(true)((a, b) => p(a) && b) def headOption: Option[A] = this match { case Cons(h,t) => Some(h()) case _ => None } def headOptionUsingFoldRight: Option[A] = foldRight(None: Option[A])((h,_) => Some(h)) // The natural recursive solution def toListRecursive: List[A] = this match { case Cons(h,t) => h() :: t().toListRecursive case _ => List() } /* The above solution will stack overflow for large streams, since it's not tail-recursive. Here is a tail-recursive implementation. At each step we cons onto the front of the `acc` list, which will result in the reverse of the stream. Then at the end we reverse the result to get the correct order again. */ def toList: List[A] = { @annotation.tailrec def go(s: Stream[A], acc: List[A]): List[A] = s match { case Cons(h, t) => go(t(), h() :: acc) case _ => acc } go(this, List()).reverse } /* In order to avoid the `reverse` at the end, we could write it using a mutable list buffer and an explicit loop instead. Note that the mutable list buffer never escapes our `toList` method, so this function is still _pure_. */ def toListFast: List[A] = { val buf = new collection.mutable.ListBuffer[A] @annotation.tailrec def go(s: Stream[A]): List[A] = s match { case Cons(h,t) => buf += h() go(t()) case _ => buf.toList } go(this) } def map[B](f: A => B): Stream[B] = foldRight(empty[B])((h,t) => cons(f(h), t)) def filter(f: A => Boolean): Stream[A] = foldRight(empty[A])((h,t) => if (f(h)) cons(h, t) else t) def append[B>:A](s: => Stream[B]): Stream[B] = foldRight(s)((h,t) => cons(h, t)) def flatMap[B](f: A => Stream[B]): Stream[B] = foldRight(empty[B])((h,t) => f(h) append t) def mapViaUnfold[B](f: A => B): Stream[B] = unfold(this) { case Cons(h,t) => Some((f(h()), t())) case _ => None } def takeViaUnfold(n: Int): Stream[A] = unfold((this, n)){ case (Cons(h,t), n) if n == 1 => Some((h(), (empty, n-1))) case (Cons(h,t), n) if n > 0 => Some((h(), (t(), n-1))) case _ => None } def takeWhileViaUnfold(f: A => Boolean): Stream[A] = unfold(this) { case Cons(h,t) if f(h()) => Some((h(), t())) case _ => None } def zipWith[B,C](s2: Stream[B])(f: (A,B) => C): Stream[C] = unfold((this, s2)) { case (Cons(h1,t1), Cons(h2,t2)) => Some((f(h1(), h2()), (t1(), t2()))) case _ => None } // special case of `zip` def zip[B](s2: Stream[B]): Stream[(A,B)] = zipWith(s2)((_,_)) def zipAll[B](s2: Stream[B]): Stream[(Option[A],Option[B])] = zipWithAll(s2)((_,_)) def zipWithAll[B, C](s2: Stream[B])(f: (Option[A], Option[B]) => C): Stream[C] = Stream.unfold((this, s2)) { case (Empty, Empty) => None case (Cons(h, t), Empty) => Some(f(Some(h()), Option.empty[B]) -> (t(), empty[B])) case (Empty, Cons(h, t)) => Some(f(Option.empty[A], Some(h())) -> (empty[A] -> t())) case (Cons(h1, t1), Cons(h2, t2)) => Some(f(Some(h1()), Some(h2())) -> (t1() -> t2())) } def startsWith[A](s: Stream[A]): Boolean = zipAll(s).takeWhile(!_._2.isEmpty) forAll { case (h,h2) => h == h2 } def tails: Stream[Stream[A]] = unfold(this){ case Empty => None case s => Some((s, s drop 1)) } append (Stream(empty)) def hasSubsequence[A](s: Stream[A]): Boolean = tails exists (_ startsWith s) def scanRight[B](z: B)(f: (A, => B) => B): Stream[B] = foldRight((z, Stream(z)))((a,p) => { val b2 = f(a, p._1) (b2, cons(b2, p._2)) })._2 } case object Empty extends Stream[Nothing] case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A] object Stream { def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = { lazy val head = hd lazy val tail = tl Cons(() => head, () => tail) } def empty[A]: Stream[A] = Empty def apply[A](as: A*): Stream[A] = if (as.isEmpty) empty else cons(as.head, apply(as.tail: _*)) val ones: Stream[Int] = Stream.cons(1, ones) def constant[A](a: A): Stream[A] = Stream.cons(a, constant(a)) def from(n: Int): Stream[Int] = Stream.cons(n, from(n+1)) /* * Build a stream of fibonacci number */ def fibs: Stream[Int] = { def loop(prev: Int, curr: Int): Stream[Int] = cons(prev, loop(curr, prev + curr)) loop(0, 1) } def fibsUsingUnfold = unfold((0, 1)) { case (f0, f1) => Some((f0, (f1, f0+f1))) } def fromUsingUnfold(n: Int) = unfold(n)(n => Some((n, n+1))) def constantUsingUnfold[A](a: A): Stream[A] = unfold(a)(_ => Some((a, a))) val onesUsingUnfold: Stream[Int] = unfold(1)(_ => Some((1, 1))) /* * A more general stream building function - the ones example above however * only consumes "constant" memory, this one does not. */ def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = f(z) match { case Some((h,s)) => cons(h, unfold(s)(f)) case None => empty } }
js1972/fpinscala
exercises/src/main/scala/fpinscala/laziness/Stream.scala
Scala
mit
6,976
/* * Copyright 2012-2013 Eligotech BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.eligosource.eventsourced.journal.common.serialization import scala.language.existentials import akka.actor._ import akka.serialization.{Serializer, Serialization, SerializationExtension} import com.google.protobuf.ByteString import org.eligosource.eventsourced.core.{Confirmation, Message} import org.eligosource.eventsourced.core.JournalProtocol._ import org.eligosource.eventsourced.journal.common.serialization.Protocol._ /** * Extension for protobuf-based (de)serialization of confirmation messages. * * @see [[org.eligosource.eventsourced.core.Confirmation]] */ class ConfirmationSerialization(system: ExtendedActorSystem) extends Extension { def serializeConfirmation(confirmation: Confirmation): Array[Byte] = confirmationProtocolBuilder(confirmation).build().toByteArray def deserializeConfirmation(bytes: Array[Byte]): Confirmation = confirmation(ConfirmationProtocol.parseFrom(bytes)) protected def confirmationProtocolBuilder(confirmation: Confirmation) = ConfirmationProtocol.newBuilder .setProcessorId(confirmation.processorId) .setChannelId(confirmation.channelId) .setSequenceNr(confirmation.sequenceNr) .setPositive(confirmation.positive) protected def confirmation(confirmationProtocol: ConfirmationProtocol): Confirmation = Confirmation( confirmationProtocol.getProcessorId, confirmationProtocol.getChannelId, confirmationProtocol.getSequenceNr, confirmationProtocol.getPositive) } /** * Extension for protobuf-based (de)serialization of event messages. * Serializers for events contained in event messages are looked up * in the Akka [[akka.serialization.Serialization]] extension. * * @see [[org.eligosource.eventsourced.core.Message]] */ class MessageSerialization(system: ExtendedActorSystem) extends ConfirmationSerialization(system) { val extension = SerializationExtension(system) /** * Serializes an event [[org.eligosource.eventsourced.core.Message]]. * * @param message event message. * @return serialized event message. */ def serializeMessage(message: Message): Array[Byte] = messageProtocolBuilder(message).build().toByteArray /** * Deserializes an event [[org.eligosource.eventsourced.core.Message]]. * * @param bytes serialized event message. * @return event message. */ def deserializeMessage(bytes: Array[Byte]): Message = message(MessageProtocol.parseFrom(bytes)) protected def messageProtocolBuilder(message: Message) = { val event = message.event.asInstanceOf[AnyRef] val serializer = extension.findSerializerFor(event) val builder = MessageProtocol.newBuilder .setEvent(ByteString.copyFrom(serializer.toBinary(event))) .setEventSerializerId(serializer.identifier) .setProcessorId(message.processorId) .setSequenceNr(message.sequenceNr) .setTimestamp(message.timestamp) if (message.confirmationTarget != null && message.confirmationPrototype != null) { builder.setConfirmationTarget(Serialization.serializedActorPath(message.confirmationTarget)) builder.setConfirmationPrototype(confirmationProtocolBuilder(message.confirmationPrototype)) } if (message.senderRef != null) { builder.setSenderRef(Serialization.serializedActorPath(message.senderRef)) } if (serializer.includeManifest) { builder.setEventManifest(ByteString.copyFromUtf8(event.getClass.getName)) } builder } protected def message(messageProtocol: MessageProtocol): Message = { val eventClass = if (messageProtocol.hasEventManifest) Some(system.dynamicAccess.getClassFor[AnyRef](messageProtocol.getEventManifest.toStringUtf8).get) else None val event = extension.deserialize( messageProtocol.getEvent.toByteArray, messageProtocol.getEventSerializerId, eventClass).get val confirmationTarget = if (messageProtocol.hasConfirmationTarget) system.provider.resolveActorRef(messageProtocol.getConfirmationTarget) else null val confirmationPrototype = if (messageProtocol.hasConfirmationPrototype) confirmation(messageProtocol.getConfirmationPrototype) else null val senderRef = if (messageProtocol.hasSenderRef) system.provider.resolveActorRef(messageProtocol.getSenderRef) else null Message( event = event, processorId = messageProtocol.getProcessorId, sequenceNr = messageProtocol.getSequenceNr, timestamp = messageProtocol.getTimestamp, confirmationTarget = confirmationTarget, confirmationPrototype = confirmationPrototype, senderRef = senderRef) } } /** * Extension for protobuf-based (de)serialization of journal commands. * Serializers for events contained in event messages are looked up * in the Akka [[akka.serialization.Serialization]] extension. */ class CommandSerialization(system: ExtendedActorSystem) extends MessageSerialization(system) { /** * Serializes journal commands. * * - [[org.eligosource.eventsourced.core.JournalProtocol.WriteInMsg]] * - [[org.eligosource.eventsourced.core.JournalProtocol.WriteOutMsg]] * - [[org.eligosource.eventsourced.core.JournalProtocol.WriteAck]] * * @param command journal command. * @return serialized journal command. */ def serializeCommand(command: AnyRef): Array[Byte] = { import CommandType._ val builder = command match { case cmd: WriteInMsg => CommandProtocol.newBuilder .setCommandType(WRITE_IN) .setProcessorId(cmd.processorId) .setMessage(messageProtocolBuilder(cmd.message)) case cmd: WriteOutMsg => CommandProtocol.newBuilder .setCommandType(WRITE_OUT) .setProcessorId(cmd.ackProcessorId) .setChannelId(cmd.channelId) .setSequenceNr(cmd.ackSequenceNr) .setMessage(messageProtocolBuilder(cmd.message)) case cmd: WriteAck => CommandProtocol.newBuilder .setCommandType(WRITE_ACK) .setProcessorId(cmd.processorId) .setChannelId(cmd.channelId) .setSequenceNr(cmd.ackSequenceNr) } builder.build().toByteArray() } /** * Deserializes journal commands. * * - [[org.eligosource.eventsourced.core.JournalProtocol.WriteInMsg]] * - [[org.eligosource.eventsourced.core.JournalProtocol.WriteOutMsg]] * - [[org.eligosource.eventsourced.core.JournalProtocol.WriteAck]] * * @param bytes serialized journal command. * @return journal command. */ def deserializeCommand(bytes: Array[Byte]): AnyRef = { import CommandType._ val commandProtocol = CommandProtocol.parseFrom(bytes) commandProtocol.getCommandType match { case WRITE_IN => WriteInMsg( processorId = commandProtocol.getProcessorId, message = message(commandProtocol.getMessage), target = null) case WRITE_OUT => WriteOutMsg( channelId = commandProtocol.getChannelId, message = message(commandProtocol.getMessage), ackProcessorId = commandProtocol.getProcessorId, ackSequenceNr = commandProtocol.getSequenceNr, target = null) case WRITE_ACK => WriteAck( processorId = commandProtocol.getProcessorId, channelId = commandProtocol.getChannelId, ackSequenceNr = commandProtocol.getSequenceNr) } } } object ConfirmationSerialization extends ExtensionId[ConfirmationSerialization] with ExtensionIdProvider { override def lookup = ConfirmationSerialization override def createExtension(system: ExtendedActorSystem) = new ConfirmationSerialization(system) } object MessageSerialization extends ExtensionId[MessageSerialization] with ExtensionIdProvider { override def lookup = MessageSerialization override def createExtension(system: ExtendedActorSystem) = new MessageSerialization(system) } object CommandSerialization extends ExtensionId[CommandSerialization] with ExtensionIdProvider { override def lookup = CommandSerialization override def createExtension(system: ExtendedActorSystem) = new CommandSerialization(system) } /** * Protobuf-based [[org.eligosource.eventsourced.core.Confirmation]] serialize. The * Eventsourced library configures this serializer as default serializer for confirmation * messages. */ class ConfirmationSerializer(system: ExtendedActorSystem) extends Serializer { lazy val serialization = ConfirmationSerialization(system) /** * Returns `43872`. */ def identifier = 43872 /** * Returns `false`. */ def includeManifest = false /** * Serializes a [[org.eligosource.eventsourced.core.Confirmation]] message. * * @param msg confirmation message. * @return serialized confirmation message. */ def toBinary(msg: AnyRef) = msg match { case c: Confirmation => serialization.serializeConfirmation(c) case _ => throw new IllegalArgumentException("Cannot serialize %s" format msg.getClass) } /** * Deserializes a [[org.eligosource.eventsourced.core.Confirmation]] message. * * @param bytes serialized confirmation message. * @return deserialized confirmation message. */ def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]) = serialization.deserializeConfirmation(bytes) } /** * Protobuf-based [[org.eligosource.eventsourced.core.Message]] serializer that uses * the Akka [[akka.serialization.Serialization]] extension to find a serializer for * an event contained in an event message. The Eventsourced library configures this * serializer as default serializer for event messages. */ class MessageSerializer(system: ExtendedActorSystem) extends Serializer { lazy val serialization = MessageSerialization(system) /** * Returns `43871`. */ def identifier = 43871 /** * Returns `true`. */ def includeManifest = true /** * Serializes an event [[org.eligosource.eventsourced.core.Message]]. * * @param msg event message. * @return serialized event message. */ def toBinary(msg: AnyRef) = msg match { case m: Message => serialization.serializeMessage(m) case _ => throw new IllegalArgumentException("Cannot serialize %s" format msg.getClass) } /** * Deserializes an event [[org.eligosource.eventsourced.core.Message]]. * * @param bytes serialized event message. * @param manifest event message manifest. * @return deserialized event message. */ def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]) = manifest match { case Some(c) if (c == classOf[Message]) => serialization.deserializeMessage(bytes) case Some(c) => throw new IllegalArgumentException("Cannot deserialize %s" format c) case None => throw new IllegalArgumentException("Manifest not available") } }
CoderPaulK/eventsourced
es-journal/es-journal-common/src/main/scala/org/eligosource/eventsourced/journal/common/serialization/MessageSerialization.scala
Scala
apache-2.0
11,298
/* * Copyright 2015 Depop * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.depop.json import com.depop.json.model._ import org.specs2.mutable.Specification import spray.json._ import spray.json.DefaultJsonProtocol._ class SprayJsonADTFieldRenamingMacrosSpec extends Specification { "spray json macro" should { "write out an ADT in camel case" in { implicit val carJsonWriter = SprayJsonMacros.jsonWriter[Car](toSnakeCase) implicit val airplaneJsonWriter = SprayJsonMacros.jsonWriter[Airplane](toSnakeCase) val jsonWriter = SprayJsonMacros.jsonWriterFromSubTypes[Vehicle] jsonWriter.write(Car(2, 2.4)) ==== JsObject("number_of_doors" -> JsNumber(2), "engine_capacity" -> JsNumber(2.4)) jsonWriter.write(Airplane(4)) ==== JsObject("number_of_engines" -> JsNumber(4)) } } }
depop/json-macros
src/test/scala/com/depop/json/SprayJsonADTFieldRenamingMacrosSpec.scala
Scala
apache-2.0
1,343
/* * Copyright 2014 JHC Systems Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sqlest.ast.syntax trait QuerySyntax { object select extends SelectSyntax object insert extends InsertSyntax object update extends UpdateSyntax object delete extends DeleteSyntax }
andrewjskatz/sqlest
src/main/scala/sqlest/ast/syntax/QuerySyntax.scala
Scala
apache-2.0
800
/** * © 2019 Refinitiv. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.hochgi.sbt.cassandra import java.io._ import java.util.Properties import org.yaml.snakeyaml.Yaml import sbt.Keys._ import sbt._ import nl.gn0s1s.bump.SemVer import scala.sys.process._ import scala.concurrent._ import scala.concurrent.duration._ import scala.util.Try object CassandraPlugin extends AutoPlugin { //defaults: private[this] val defaultConfigDir = "NO_DIR_SUPPLIED" private[this] val defaultCliInit = "NO_CLI_COMMANDS_SUPPLIED" private[this] val defaultCqlInit = "NO_CQL_COMMANDS_SUPPLIED" object autoImport { val cassandraVersion = SettingKey[String]("cassandra-version") val cassandraConfigDir = SettingKey[String]("cassandra-config-dir") val cassandraCliInit = SettingKey[String]("cassandra-cli-init") val cassandraCqlInit = SettingKey[String]("cassandra-cql-init") val cassandraHost = SettingKey[String]("cassandra-host") val cassandraPort = SettingKey[String]("cassandra-port") val cassandraCqlPort = SettingKey[String]("cassandra-cql-port") val stopCassandraAfterTests = SettingKey[Boolean]("stop-cassandra-after-tests") val cleanCassandraAfterStop = SettingKey[Boolean]("stop-cassandra-after-tests") val configMappings = SettingKey[Seq[(String, java.lang.Object)]]("cassandra-conf", "used to override values in conf/cassandra.yaml. values are appropriate java objects") val cassandraJavaArgs = SettingKey[Seq[String]]("cassandra-java-args") val cassandraApplicationArgs = SettingKey[Seq[String]]("cassandra-application-args") val cassandraHome = TaskKey[File]("cassandra-home") val cassandraStartDeadline = TaskKey[Int]("cassandra-start-deadline") val deployCassandra = TaskKey[File]("deploy-cassandra") val startCassandra = TaskKey[String]("start-cassandra") val cassandraPid = TaskKey[String]("cassandra-pid") val stopCassandra = TaskKey[Unit]("stop-cassandra") } import autoImport._ override def projectSettings = Seq( cassandraHost := "localhost", cassandraPort := "9160", configMappings := Seq(), configMappings ++= { val port = cassandraPort.value val targetDir = target.value val data = targetDir / "data" def d(s: String): String = (data / s).getAbsolutePath Seq( "rpc_port" -> port, "data_file_directories" -> { val l = new java.util.LinkedList[String]() l.add(d("data")) l }, "commitlog_directory" -> d("commitlog"), "saved_caches_directory" -> d("saved_caches") ) }, cassandraJavaArgs := Nil, cassandraApplicationArgs := Nil, cassandraConfigDir := defaultConfigDir, cassandraCliInit := defaultCliInit, cassandraCqlInit := defaultCqlInit, stopCassandraAfterTests := true, cleanCassandraAfterStop := true, cassandraStartDeadline := 20, cassandraHome := { val ver = cassandraVersion.value val targetDir = target.value targetDir / s"apache-cassandra-${ver}" }, cassandraVersion := "2.1.2", cassandraCqlPort := { val oldPort = cassandraHost.value val ver = cassandraVersion.value if(SemVer(ver) > SemVer("2.1.0")) "9042" else oldPort }, classpathTypes ~= (_ + "tar.gz"), libraryDependencies += { "org.apache.cassandra" % "apache-cassandra" % cassandraVersion.value artifacts(Artifact("apache-cassandra", "tar.gz", "tar.gz","bin")) intransitive() }, deployCassandra := { val ver = cassandraVersion.value val targetDir = target.value val classpath = (dependencyClasspath in Runtime).value val logger = streams.value.log val cassandraTarGz = Attributed.data(classpath).find(_.getName == s"apache-cassandra-$ver-bin.tar.gz").get if (cassandraTarGz == null) sys.error("could not load: cassandra tar.gz file.") logger.info(s"cassandraTarGz: ${cassandraTarGz.getAbsolutePath}") Process(Seq("tar","-xzf",cassandraTarGz.getAbsolutePath),targetDir).! val cassHome = targetDir / s"apache-cassandra-${ver}" //old cassandra versions used log4j, newer versions use logback and are configurable through env vars val oldLogging = cassHome / "conf" / "log4j-server.properties" if(oldLogging.exists) { val in: FileInputStream = new FileInputStream(oldLogging) val props: Properties = new Properties props.load(in) in.close val out: FileOutputStream = new FileOutputStream(oldLogging) props.setProperty("log4j.appender.R.File", (targetDir / "data" / "logs").getAbsolutePath) props.store(out, null) out.close } cassHome }, startCassandra := { //if compilation of test classes fails, cassandra should not be invoked. (moreover, Test.Cleanup won't execute to stop it...) (compile in Test).value val javaArgs = cassandraJavaArgs.value val appArgs = cassandraApplicationArgs.value val targetDir = target.value val cassHome = deployCassandra.value val confDirAsString = cassandraConfigDir.value val cli = cassandraCliInit.value val cql = cassandraCqlInit.value val host = cassandraHost.value val port = cassandraPort.value val cqlPort = cassandraCqlPort.value val startDeadline = cassandraStartDeadline.value val confMappings = configMappings.value val logger = streams.value.log val pidFile = targetDir / "cass.pid" val jarClasspath = sbt.IO.listFiles(cassHome / "lib").collect { case f: File if f.getName.endsWith(".jar") => f.getAbsolutePath }.mkString(":") val conf: String = { if (confDirAsString == defaultConfigDir) { val configDir = cassHome / "conf" configDir.getAbsolutePath } else confDirAsString } val classpath = conf + ":" + jarClasspath val bin = cassHome / "bin" / "cassandra" val args = Seq(bin.getAbsolutePath, "-p", pidFile.getAbsolutePath) ++ appArgs overrideConfigs(conf, confMappings, logger) if (!isCassandraRunning(port)) { Process(args, cassHome, "CASSANDRA_CONF" -> conf, "CASSANDRA_HOME" -> cassHome.getAbsolutePath, "JVM_OPTS" -> javaArgs.mkString(" ")).run logger.info("going to wait for cassandra:") waitForCassandra(port, startDeadline, (s: String) => logger.info(s)) logger.info("going to initialize cassandra:") initCassandra(cli, cql, classpath, cassHome, host, port, cqlPort) } else { logger.warn("cassandra already running") } val pid = Try(sbt.IO.read(pidFile).filterNot(_.isWhitespace)).getOrElse("NO PID") cassandraPid := pid pid }, cassandraPid := { val cassPid = target.value / "cass.pid" if(cassPid.exists) sbt.IO.read(cassPid).filterNot(_.isWhitespace) else "NO PID" // did you run start-cassandra task? }, stopCassandra := { val pid = cassandraPid.value val clean = cleanCassandraAfterStop.value val targetDir = target.value stopCassandraMethod(clean, targetDir/ "data", pid) }, //make sure to Stop cassandra when tests are done. testOptions in Test += { val pid = cassandraPid.value val stop = stopCassandraAfterTests.value val clean = cleanCassandraAfterStop.value val targetDir = target.value Tests.Cleanup(() => { if(stop) stopCassandraMethod(clean, targetDir / "data", pid) }) } ) def stopCassandraMethod(clean: Boolean, dataDir: File, pid: String) = if(pid != "NO PID") { s"kill $pid" ! //give cassandra a chance to exit gracefully var counter = 40 val never = Promise().future while((s"jps" !!).split("\\n").exists(_ == s"$pid CassandraDaemon") && counter > 0) { try{ Await.ready(never, 250 millis) } catch { case _ : Throwable => counter = counter - 1 } } if(counter == 0) { //waited to long... s"kill -9 $pid" ! } if(clean) sbt.IO.delete(dataDir) } def waitCassandraShutdown(pid: String) = { var counter = 40 val never = Promise[Boolean].future while((s"jps" !!).split("\\n").exists(_ == s"$pid CassandraDaemon") && counter > 0) { try{ Await.ready(never, 250 millis) } catch { case _ : Throwable => counter = counter - 1 } } } def isCassandraRunning(port: String): Boolean = { import org.apache.thrift.transport.{TFramedTransport, TSocket} val rpcAddress = "localhost" val rpcPort = port.toInt val tr = new TFramedTransport(new TSocket(rpcAddress, rpcPort)) Try { tr.open }.isSuccess } def waitForCassandra(port: String, deadline: Int, infoPrintFunc: String => Unit): Unit = { import org.apache.thrift.transport.{TFramedTransport, TSocket, TTransport, TTransportException} import scala.concurrent.duration._ val rpcAddress = "localhost" val rpcPort = port.toInt var retry = true val deadlineTime = deadline.seconds.fromNow while (retry && deadlineTime.hasTimeLeft) { val tr: TTransport = new TFramedTransport(new TSocket(rpcAddress, rpcPort)) try { tr.open retry = false } catch { case e: TTransportException => { infoPrintFunc(s"waiting for cassandra to boot on port $rpcPort") Thread.sleep(500) } } if (tr.isOpen) { tr.close } } } def initCassandra(cli: String, cql: String, classpath: String, cassHome: File, host: String, port: String, cqlPort: String): Unit = { if(cli != defaultCliInit && cql != defaultCqlInit) { sys.error("use cli initiation commands, or cql initiation commands, but not both!") } else if(cli != defaultCliInit) { val bin = cassHome / "bin" / "cassandra-cli" val args = Seq(bin.getAbsolutePath, "-f", cli,"-h",host,"-p",port) Process(args,cassHome).! } else if(cql != defaultCqlInit) { val bin = cassHome / "bin" / "cqlsh" val cqlPath = new File(cql).getAbsolutePath val args = Seq(bin.getAbsolutePath, "-f", cqlPath,host,cqlPort) Process(args,cassHome).! } } def overrideConfigs(confDir: String, confMappings: Seq[(String,java.lang.Object)], logger: Logger): Unit = { val cassandraYamlPath = s"$confDir/cassandra.yaml" val yaml = new Yaml val cassandraYamlMap = yaml.load(new FileInputStream(new File(cassandraYamlPath))) .asInstanceOf[java.util.LinkedHashMap[String, java.lang.Object]] confMappings.foreach{ case (prop,value) => { logger.info(s"setting configuration [$prop] with [$value]") cassandraYamlMap.put(prop, value) } } val it = cassandraYamlMap.entrySet().iterator() val m = new java.util.LinkedHashMap[String, java.lang.Object]() while(it.hasNext) { val cur = it.next() if(cur.getValue != null) { m.put(cur.getKey, cur.getValue) } } val ymlContent = yaml.dump(m) logger.debug(ymlContent) sbt.IO.write(file(cassandraYamlPath), ymlContent, java.nio.charset.StandardCharsets.UTF_8, false) } }
e-orz/CM-Well
server/project/sbt-cassandra-plugin/src/main/scala/com/github/hochgi/sbt/cassandra/CassandraPlugin.scala
Scala
apache-2.0
11,209
package com.sksamuel.elastic4s.http.search.queries.span import com.sksamuel.elastic4s.searches.queries.span.{SpanNearQuery, SpanOrQuery, SpanTermQuery} import org.scalatest.FunSuite import scala.util.parsing.json.JSON class SpanOrQueryBodyFnTest extends FunSuite { test("SpanOrQueryBodyFn apply should return appropriate XContentBuilder") { val builder = SpanOrQueryBodyFn.apply(SpanOrQuery( Seq( SpanTermQuery("field1", "value1", Some("name1"), Some(4.0)), SpanTermQuery("field2", "value2", Some("name2"), Some(7.0)) ), boost = Some(2.0), queryName = Some("rootName") )) val actual = JSON.parseRaw(builder.string()) val expected = JSON.parseRaw( """ |{ | "span_or":{ | "clauses":[ | {"span_term":{"field1":"value1","boost":4.0,"_name":"name1"}}, | {"span_term":{"field2":"value2","boost":7.0,"_name":"name2"}} | ], | "boost":2.0, | "_name":"rootName" | } |}""".stripMargin) assert(actual === expected) } }
Tecsisa/elastic4s
elastic4s-http/src/test/scala/com/sksamuel/elastic4s/http/search/queries/span/SpanOrQueryBodyFnTest.scala
Scala
apache-2.0
1,098
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.regression import com.github.fommil.netlib.BLAS.{getInstance => blas} import org.apache.spark.Logging import org.apache.spark.annotation.Experimental import org.apache.spark.ml.{PredictionModel, Predictor} import org.apache.spark.ml.param.{Param, ParamMap} import org.apache.spark.ml.tree.{DecisionTreeModel, GBTParams, TreeEnsembleModel, TreeRegressorParams} import org.apache.spark.ml.util.{Identifiable, MetadataUtils} import org.apache.spark.mllib.linalg.Vector import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.tree.{GradientBoostedTrees => OldGBT} import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo} import org.apache.spark.mllib.tree.loss.{AbsoluteError => OldAbsoluteError, Loss => OldLoss, SquaredError => OldSquaredError} import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTModel} import org.apache.spark.rdd.RDD import org.apache.spark.sql.DataFrame /** * :: Experimental :: * [[http://en.wikipedia.org/wiki/Gradient_boosting Gradient-Boosted Trees (GBTs)]] * learning algorithm for regression. * It supports both continuous and categorical features. */ @Experimental final class GBTRegressor(override val uid: String) extends Predictor[Vector, GBTRegressor, GBTRegressionModel] with GBTParams with TreeRegressorParams with Logging { def this() = this(Identifiable.randomUID("gbtr")) // Override parameter setters from parent trait for Java API compatibility. // Parameters from TreeRegressorParams: override def setMaxDepth(value: Int): this.type = super.setMaxDepth(value) override def setMaxBins(value: Int): this.type = super.setMaxBins(value) override def setMinInstancesPerNode(value: Int): this.type = super.setMinInstancesPerNode(value) override def setMinInfoGain(value: Double): this.type = super.setMinInfoGain(value) override def setMaxMemoryInMB(value: Int): this.type = super.setMaxMemoryInMB(value) override def setCacheNodeIds(value: Boolean): this.type = super.setCacheNodeIds(value) override def setCheckpointInterval(value: Int): this.type = super.setCheckpointInterval(value) /** * The impurity setting is ignored for GBT models. * Individual trees are built using impurity "Variance." */ override def setImpurity(value: String): this.type = { logWarning("GBTRegressor.setImpurity should NOT be used") this } // Parameters from TreeEnsembleParams: override def setSubsamplingRate(value: Double): this.type = super.setSubsamplingRate(value) override def setSeed(value: Long): this.type = { logWarning("The 'seed' parameter is currently ignored by Gradient Boosting.") super.setSeed(value) } // Parameters from GBTParams: override def setMaxIter(value: Int): this.type = super.setMaxIter(value) override def setStepSize(value: Double): this.type = super.setStepSize(value) // Parameters for GBTRegressor: /** * Loss function which GBT tries to minimize. (case-insensitive) * Supported: "squared" (L2) and "absolute" (L1) * (default = squared) * @group param */ val lossType: Param[String] = new Param[String](this, "lossType", "Loss function which GBT" + " tries to minimize (case-insensitive). Supported options:" + s" ${GBTRegressor.supportedLossTypes.mkString(", ")}", (value: String) => GBTRegressor.supportedLossTypes.contains(value.toLowerCase)) setDefault(lossType -> "squared") /** @group setParam */ def setLossType(value: String): this.type = set(lossType, value) /** @group getParam */ def getLossType: String = $(lossType).toLowerCase /** (private[ml]) Convert new loss to old loss. */ override private[ml] def getOldLossType: OldLoss = { getLossType match { case "squared" => OldSquaredError case "absolute" => OldAbsoluteError case _ => // Should never happen because of check in setter method. throw new RuntimeException(s"GBTRegressorParams was given bad loss type: $getLossType") } } override protected def train(dataset: DataFrame): GBTRegressionModel = { val categoricalFeatures: Map[Int, Int] = MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset) val boostingStrategy = super.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Regression) val oldGBT = new OldGBT(boostingStrategy) val oldModel = oldGBT.run(oldDataset) GBTRegressionModel.fromOld(oldModel, this, categoricalFeatures) } override def copy(extra: ParamMap): GBTRegressor = defaultCopy(extra) } @Experimental object GBTRegressor { // The losses below should be lowercase. /** Accessor for supported loss settings: squared (L2), absolute (L1) */ final val supportedLossTypes: Array[String] = Array("squared", "absolute").map(_.toLowerCase) } /** * :: Experimental :: * * [[http://en.wikipedia.org/wiki/Gradient_boosting Gradient-Boosted Trees (GBTs)]] * model for regression. * It supports both continuous and categorical features. * @param _trees Decision trees in the ensemble. * @param _treeWeights Weights for the decision trees in the ensemble. */ @Experimental final class GBTRegressionModel( override val uid: String, private val _trees: Array[DecisionTreeRegressionModel], private val _treeWeights: Array[Double]) extends PredictionModel[Vector, GBTRegressionModel] with TreeEnsembleModel with Serializable { require(numTrees > 0, "GBTRegressionModel requires at least 1 tree.") require(_trees.length == _treeWeights.length, "GBTRegressionModel given trees, treeWeights of" + s" non-matching lengths (${_trees.length}, ${_treeWeights.length}, respectively).") override def trees: Array[DecisionTreeModel] = _trees.asInstanceOf[Array[DecisionTreeModel]] override def treeWeights: Array[Double] = _treeWeights override protected def predict(features: Vector): Double = { // TODO: Override transform() to broadcast model. SPARK-7127 // TODO: When we add a generic Boosting class, handle transform there? SPARK-7129 // Classifies by thresholding sum of weighted tree predictions val treePredictions = _trees.map(_.rootNode.predict(features)) blas.ddot(numTrees, treePredictions, 1, _treeWeights, 1) } override def copy(extra: ParamMap): GBTRegressionModel = { copyValues(new GBTRegressionModel(uid, _trees, _treeWeights), extra) } override def toString: String = { s"GBTRegressionModel with $numTrees trees" } /** (private[ml]) Convert to a model in the old API */ private[ml] def toOld: OldGBTModel = { new OldGBTModel(OldAlgo.Regression, _trees.map(_.toOld), _treeWeights) } } private[ml] object GBTRegressionModel { /** (private[ml]) Convert a model from the old API */ def fromOld( oldModel: OldGBTModel, parent: GBTRegressor, categoricalFeatures: Map[Int, Int]): GBTRegressionModel = { require(oldModel.algo == OldAlgo.Regression, "Cannot convert GradientBoostedTreesModel" + s" with algo=${oldModel.algo} (old API) to GBTRegressionModel (new API).") val newTrees = oldModel.trees.map { tree => // parent for each tree is null since there is no good way to set this. DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures) } val uid = if (parent != null) parent.uid else Identifiable.randomUID("gbtr") new GBTRegressionModel(parent.uid, newTrees, oldModel.treeWeights) } }
andrewor14/iolap
mllib/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala
Scala
apache-2.0
8,294
package forex import scalaz._ import Scalaz._ import scalaz.Validation.FlatMap._ import forex.{OrderTotal => OT} // a import forex.{ExchangeRateLookup => ERL} object OrderTotalConverter1 { def convert(rawCurrency: String, rawAmount: String): ValidationNel[String, OrderTotal] = { for { total <- OT.parse(rawCurrency, rawAmount) // b rate <- ERL.lookup(total.currency).toValidationNel // c base = OT(Currency.Eur, total.amount * rate) // d } yield base } }
alexanderdean/Unified-Log-Processing
ch07/7.4/forex/src/main/scala/forex/OrderTotalConverter1.scala
Scala
apache-2.0
575
/* * Copyright 2013 - 2017 Outworkers Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.outworkers.morpheus import java.sql.{Date => SqlDate} import java.util.Date import org.joda.time.{DateTime, DateTimeZone} import org.scalacheck.{Arbitrary, Gen} import scala.math.BigDecimal.RoundingMode trait CustomSamplers { val offset = 10000 implicit val dateGen: Arbitrary[Date] = Arbitrary(Gen.delay(new Date(new DateTime(DateTimeZone.UTC).getMillis))) implicit val sqlDateGen: Arbitrary[SqlDate] = Arbitrary(Gen.delay(new SqlDate(new DateTime(DateTimeZone.UTC).getMillis))) implicit val floatGen: Arbitrary[Float] = Arbitrary(Arbitrary.arbFloat.arbitrary.map(fl => BigDecimal(fl).setScale(2, RoundingMode.HALF_UP).toFloat)) implicit val jodaGen: Arbitrary[DateTime] = Arbitrary { for { offset <- Gen.choose(-offset, offset) now = DateTime.now(DateTimeZone.UTC) } yield now.plusMillis(offset) } implicit class JodaDateAug(val dt: DateTime) { def asSql: SqlDate = new SqlDate(dt.getMillis) } implicit class JavaDateAug(val dt: Date) { def asSql: SqlDate = new SqlDate(dt.getTime) } }
websudos/morpheus
morpheus-dsl/src/test/scala/com/outworkers/morpheus/CustomSamplers.scala
Scala
bsd-2-clause
1,664
package lila.security import lila.common.String._ import lila.hub.actorApi.message.LichessThread import lila.user.User import akka.actor.ActorSelection final class Greeter( sender: String, messenger: ActorSelection) { def apply(user: User) { messenger ! LichessThread( from = sender, to = user.id, subject = s"""Hi ${user.username}, welcome to lichess.org!""", message = s""" Thank you, ${user.username}, for joining lichess.org! Here, all features are completely free and available without limitation for now and forever. We are the world's community answer to commercial websites: yes, chess can be free! Note that we don't ask for your email during registration. However you can still link it to your account as a way to recover your password: http://lichess.org/account/email. Now play some games, enjoy the computer analysis, try out tournaments and maybe some variants! We wish you fantastic games and loads of fun :) Cheers, Lichess team """) } }
danilovsergey/i-bur
modules/security/src/main/Greeter.scala
Scala
mit
1,002
package shark.api import scala.reflect.classTag object ClassTags { // List of primitive ClassTags. val jBoolean = classTag[java.lang.Boolean] val jByte = classTag[java.lang.Byte] val jShort = classTag[java.lang.Short] val jInt = classTag[java.lang.Integer] val jLong = classTag[java.lang.Long] val jFloat = classTag[java.lang.Float] val jDouble = classTag[java.lang.Double] }
uclaabs/abs
src/main/scala/shark/api/ClassTags.scala
Scala
apache-2.0
394
package com.stulsoft.ysps.pmaybe /** * Created by Yuriy Stul on 9/18/2016. * * @see http://groz.github.io/scala/intro/monads/ */ object MaybeWithMapAndFlatMap { abstract class Maybe[+T] { def map[U](f: T => U): Maybe[U] def flatMap[U](f: T => Maybe[U]): Maybe[U] } case class Just[T](get: T) extends Maybe[T] { def map[U](f: T => U) = Just(f(get)) def flatMap[U](f: T => Maybe[U]): Maybe[U] = f(get) } case object Not extends Maybe[Nothing] { def map[U](f: Nothing => U) = Not // Always Not def flatMap[U](f: Nothing => Maybe[U]): Maybe[U] = Not } case class Person( name: String, nickname: Maybe[String], height: Maybe[Double], weight: Maybe[Double]) def isTall(p: Person): Maybe[Boolean] = p.height.map(_ > 1.9) def nicknameLength(p: Person): Maybe[Int] = p.nickname.map { _.length } def calcBMI(p: Person): Maybe[Double] = p.weight.flatMap { w => p.height.map { h => w / (h * h) } } def main(args: Array[String]): Unit = { println("==>main") test1() test2() test3() test4() println("<==main") } def test1(): Unit = { println("==>test1") val p = new Person("Yurik", Just("den"), Just(1.75), Just(75.0)) println(s"isTall(p) = ${isTall(p)}") println(s"nicknameLength(p) = ${nicknameLength(p)}") println(s"calcBMI(p) = ${calcBMI(p)}") println("<==test1") } def test2(): Unit = { println("==>test2") val p = new Person("Yurik", Just("den"), Just(2.1), Just(75.0)) println(s"isTall(p) = ${isTall(p)}") println(s"nicknameLength(p) = ${nicknameLength(p)}") println(s"calcBMI(p) = ${calcBMI(p)}") println("<==test2") } def test3(): Unit = { println("==>test3") val p = new Person("Yurik", Just("den"), Not, Just(75.0)) println(s"isTall(p) = ${isTall(p)}") println(s"nicknameLength(p) = ${nicknameLength(p)}") println(s"calcBMI(p) = ${calcBMI(p)}") println("<==test3") } def test4(): Unit = { println("==>test4") val p = new Person("Yurik", Not, Not, Not) println(s"isTall(p) = ${isTall(p)}") println(s"nicknameLength(p) = ${nicknameLength(p)}") println(s"calcBMI(p) = ${calcBMI(p)}") println("<==test4") } }
ysden123/ysps
src/main/scala/com/stulsoft/ysps/pmaybe/MaybeWithMapAndFlatMap.scala
Scala
mit
2,318
/* * Copyright 2011-2014 Chris de Vreeze * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nl.ebpi.yaidom.integrationtest import java.{ io => jio } import java.io.File import java.io.FileInputStream import java.{ util => jutil } import org.junit.Test import org.junit.runner.RunWith import org.scalatest.BeforeAndAfterAll import org.scalatest.ConfigMap import org.scalatest.Suite import org.scalatest.junit.JUnitRunner import nl.ebpi.yaidom.convert.StaxConversions.asIterator import nl.ebpi.yaidom.convert.StaxConversions.convertToEventWithEndStateIterator import nl.ebpi.yaidom.convert.StaxConversions.takeElem import javax.xml.stream.XMLInputFactory import javax.xml.stream.events.XMLEvent import javax.xml.transform.stream.StreamSource /** * Large XML test case, using streaming, thus keeping the memory footprint low. * * Acknowledgments: The large XML files come from http://javakata6425.appspot.com/#!goToPageIIIarticleIIIOptimally%20parse%20humongous%20XML%20files%20with%20vanilla%20Java. * * @author Chris de Vreeze */ @RunWith(classOf[JUnitRunner]) class StreamingLargeXmlTest extends Suite with BeforeAndAfterAll { @volatile private var xmlBytes: Array[Byte] = _ override def beforeAll(configMap: ConfigMap): Unit = { val zipFileUrl = classOf[StreamingLargeXmlTest].getResource("bigFile.zip") val zipFile = new jutil.zip.ZipFile(new jio.File(zipFileUrl.toURI)) val zipEntries = zipFile.entries() require(zipEntries.hasMoreElements()) val zipEntry: jutil.zip.ZipEntry = zipEntries.nextElement() val is = new jio.BufferedInputStream(zipFile.getInputStream(zipEntry)) val bos = new jio.ByteArrayOutputStream var b: Int = -1 while ({ b = is.read(); b >= 0 }) { bos.write(b) } is.close() this.xmlBytes = bos.toByteArray } /** * Test showing how StAX can help process very large XML inputs in many situations. * It is neither elegant nor fast code, but chunks of the input XML are processed by yaidom. * * This test example is simple, and does not use any namespaces. */ @Test def testProcessLargeXmlUsingStreaming(): Unit = { val inputFactory = XMLInputFactory.newInstance val streamSource = new StreamSource(new jio.ByteArrayInputStream(this.xmlBytes)) val xmlEventReader = inputFactory.createXMLEventReader(streamSource) var it = convertToEventWithEndStateIterator(asIterator(xmlEventReader)).buffered var contactCount = 0 var elemCount = 0 def isStartContact(xmlEvent: XMLEvent): Boolean = xmlEvent.isStartElement() && xmlEvent.asStartElement().getName.getLocalPart == "contact" it = it.dropWhile(es => !isStartContact(es.event)).buffered while (it.hasNext) { val contactResult = takeElem(it) val contactElem = contactResult.elem it = contactResult.remainder assert(contactElem.localName == "contact") contactCount += 1 elemCount += contactElem.findAllElemsOrSelf.size assertResult(true) { Set("firstName", "lastName").subsetOf(contactElem.findAllElems.map(_.localName).toSet) } it = it.dropWhile(es => !isStartContact(es.event)).buffered } assertResult(true) { contactCount >= 1000 } assertResult(true) { elemCount >= 10000 } } @Test def testProcessAnotherXmlUsingStreaming(): Unit = { val fileUri = classOf[StreamingLargeXmlTest].getResource("enterprise-info.xml").toURI val inputFactory = XMLInputFactory.newInstance val streamSource = new StreamSource(new FileInputStream(new File(fileUri))) val xmlEventReader = inputFactory.createXMLEventReader(streamSource) var it = convertToEventWithEndStateIterator(asIterator(xmlEventReader)).buffered var enterpriseCount = 0 def isEnterprise(xmlEvent: XMLEvent): Boolean = xmlEvent.isStartElement() && xmlEvent.asStartElement().getName.getLocalPart == "Enterprise" it = it.dropWhile(es => !isEnterprise(es.event)).buffered while (it.hasNext) { val enterpriseResult = takeElem(it) val enterpriseElem = enterpriseResult.elem it = enterpriseResult.remainder assert(enterpriseElem.localName == "Enterprise") enterpriseCount += 1 if (enterpriseCount % 100 == 0) { assertResult(true) { Set("Address", "LocalUnit").subsetOf(enterpriseElem.findAllChildElems.map(_.localName).toSet) } } it = it.dropWhile(es => !isEnterprise(es.event)).buffered } assertResult(2000) { enterpriseCount } } }
EBPI/yaidom
src/test/scala/nl/ebpi/yaidom/integrationtest/StreamingLargeXmlTest.scala
Scala
apache-2.0
5,050
package springnz.sparkplug.client import akka.actor.TypedActor.PreStart import akka.actor._ import springnz.sparkplug.core.SparkPlugException import springnz.sparkplug.executor.MessageTypes._ import springnz.sparkplug.util.Logging import scala.concurrent._ object SingleJobProcessor { def props(jobRequest: JobRequest, broker: ActorRef, requestor: Option[ActorRef], promise: Option[Promise[Any]], jobIndex: Int) = Props(new SingleJobProcessor(jobRequest, broker, requestor, promise, jobIndex)) } class SingleJobProcessor(jobRequest: JobRequest, broker: ActorRef, requestor: Option[ActorRef], promise: Option[Promise[Any]], jobIndex: Int) extends Actor with PreStart with Logging { import Coordinator._ override def preStart() = { log.info(s"SingleJobProcessor delegated to run request: $jobRequest") context.watch(broker) broker ! jobRequest } override def receive: Receive = { case jobSuccess @ JobSuccess(_, result) ⇒ log.info(s"Received Result from sender: $sender") log.info(s"Result value: $result") // tell the requestor the job succeeded, but make it look like it came from the coordinator val coordinator = context.parent requestor.foreach(_.tell(jobSuccess, coordinator)) promise.foreach(_.success(result)) coordinator ! JobCompleteIndex(jobIndex) self ! PoisonPill case jobFailure @ JobFailure(_, reason) ⇒ log.error(s"Received JobFailure from sender: $sender") log.error(s"Reason: ${reason.getMessage}") val coordinator = context.parent requestor.foreach(_.tell(jobFailure, coordinator)) promise.foreach(_.failure(reason)) coordinator ! JobCompleteIndex(jobIndex) self ! PoisonPill case Terminated(_) ⇒ val msg = s"Lost contact with broker, or broker terminated: $sender" log.error(msg) self ! JobFailure(jobRequest, new SparkPlugException(msg)) } }
springnz/sparkplug
sparkplug-launcher/src/main/scala/springnz/sparkplug/client/SingleJobProcessor.scala
Scala
mit
1,929
import scala.scalajs.js import scala.scalajs.js.annotation.* class A { @JSExport("toString") // error def a(): Int = 5 } class B { @JSExport("toString") // ok def a(x: Int): Int = x + 1 }
dotty-staging/dotty
tests/neg-scalajs/jsexport-bad-tostring.scala
Scala
apache-2.0
198
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.annotation.DeveloperApi import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.{InternalRow, CatalystTypeConverters} import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation import org.apache.spark.sql.catalyst.expressions.{Attribute, GenericMutableRow} import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Statistics} import org.apache.spark.sql.sources.BaseRelation import org.apache.spark.sql.types.DataType import org.apache.spark.sql.{Row, SQLContext} /** * :: DeveloperApi :: */ @DeveloperApi object RDDConversions { def productToRowRdd[A <: Product](data: RDD[A], outputTypes: Seq[DataType]): RDD[InternalRow] = { data.mapPartitions { iterator => val numColumns = outputTypes.length val mutableRow = new GenericMutableRow(numColumns) val converters = outputTypes.map(CatalystTypeConverters.createToCatalystConverter) iterator.map { r => var i = 0 while (i < numColumns) { mutableRow(i) = converters(i)(r.productElement(i)) i += 1 } mutableRow } } } /** * Convert the objects inside Row into the types Catalyst expected. * 将Row内的对象转换为预期的Catalyst类型 */ def rowToRowRdd(data: RDD[Row], outputTypes: Seq[DataType]): RDD[InternalRow] = { data.mapPartitions { iterator => val numColumns = outputTypes.length val mutableRow = new GenericMutableRow(numColumns) val converters = outputTypes.map(CatalystTypeConverters.createToCatalystConverter) iterator.map { r => var i = 0 while (i < numColumns) { mutableRow(i) = converters(i)(r(i)) i += 1 } mutableRow } } } } /** Logical plan node for scanning data from an RDD. * 用于扫描RDD数据的逻辑计划节点*/ private[sql] case class LogicalRDD( output: Seq[Attribute], rdd: RDD[InternalRow])(sqlContext: SQLContext) extends LogicalPlan with MultiInstanceRelation { //Nil是一个空的List,::向队列的头部追加数据,创造新的列表 override def children: Seq[LogicalPlan] = Nil override def newInstance(): LogicalRDD.this.type = LogicalRDD(output.map(_.newInstance()), rdd)(sqlContext).asInstanceOf[this.type] override def sameResult(plan: LogicalPlan): Boolean = plan match { case LogicalRDD(_, otherRDD) => rdd.id == otherRDD.id case _ => false } @transient override lazy val statistics: Statistics = Statistics( // TODO: Instead of returning a default value here, find a way to return a meaningful size // estimate for RDDs. See PR 1238 for more discussions. sizeInBytes = BigInt(sqlContext.conf.defaultSizeInBytes) ) } /** Physical plan node for scanning data from an RDD. * 用于扫描来自RDD的数据的物理计划节点 */ private[sql] case class PhysicalRDD( output: Seq[Attribute], rdd: RDD[InternalRow], extraInformation: String) extends LeafNode { protected override def doExecute(): RDD[InternalRow] = rdd override def simpleString: String = "Scan " + extraInformation + output.mkString("[", ",", "]") } private[sql] object PhysicalRDD { def createFromDataSource( output: Seq[Attribute], rdd: RDD[InternalRow], relation: BaseRelation): PhysicalRDD = { PhysicalRDD(output, rdd, relation.toString) } } /** Logical plan node for scanning data from a local collection. * 用于扫描本地集合中的数据的逻辑计划节点*/ private[sql] case class LogicalLocalTable(output: Seq[Attribute], rows: Seq[InternalRow])(sqlContext: SQLContext) extends LogicalPlan with MultiInstanceRelation { override def children: Seq[LogicalPlan] = Nil override def newInstance(): this.type = LogicalLocalTable(output.map(_.newInstance()), rows)(sqlContext).asInstanceOf[this.type] override def sameResult(plan: LogicalPlan): Boolean = plan match { case LogicalRDD(_, otherRDD) => rows == rows case _ => false } @transient override lazy val statistics: Statistics = Statistics( // TODO: Improve the statistics estimation. // This is made small enough so it can be broadcasted. sizeInBytes = sqlContext.conf.autoBroadcastJoinThreshold - 1 ) }
tophua/spark1.52
sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
Scala
apache-2.0
5,086
package blended.streams.jms import java.util.concurrent.atomic.AtomicBoolean import akka.Done import akka.stream._ import akka.stream.stage.{AsyncCallback, TimerGraphStageLogic} import blended.jms.utils.JmsSession import blended.util.logging.Logger import scala.concurrent.Future import scala.util.control.NonFatal // Common logic for the Source Stages with Auto Acknowledge and Client Acknowledge abstract class JmsStageLogic[S <: JmsSession, T <: JmsSettings]( settings: T, inheritedAttributes: Attributes, shape : Shape ) extends TimerGraphStageLogic(shape) with JmsConnector[S] { override protected def jmsSettings: T = settings // Is the Source currently stopping ? private[jms] val stopping = new AtomicBoolean(false) // Is the source stopped ? private[jms] var stopped = new AtomicBoolean(false) private[jms] def doMarkStopped = stopped.set(true) // Mark the source as stopped and try to finish handling all in flight messages private[jms] val markStopped = getAsyncCallback[Done.type] { _ => doMarkStopped } // Mark the source as failed and abort all message processing private[jms] val markAborted = getAsyncCallback[Throwable] { ex => stopped.set(true) failStage(ex) } // async callback, so that downstream flow elements can signal an error private[jms] val handleError : AsyncCallback[Throwable] // Start the configured sessions override def preStart(): Unit = { settings.log.info(s"Starting JMS Stage [$id] with [$jmsSettings]") materializer match { case am : ActorMaterializer => system = am.system ec = system.dispatchers.lookup("FixedPool") case _ => failStage(new Exception(s"Expected to run on top of an ActorSystem [$id]")) } initSessionAsync() } // Asynchronously close all sessions created on behalf of this Source stage // TODO: For the special case of using a BlendedSingleConnectionFactory, handle the ExceptionListener correctly private[jms] def stopSessions(): Unit = if (stopping.compareAndSet(false, true)) { val closeSessionFutures = jmsSessions.values.map { s => val f = s.closeSessionAsync() f.failed.foreach(e => settings.log.error(e)(s"Error closing jms session in JMS source stage [$id]")) f } Future .sequence(closeSessionFutures) .onComplete { _ => jmsConnection.foreach { connection => try { connection.close() } catch { case NonFatal(e) => settings.log.error(e)(s"Error closing JMS connection in Jms source stage [$id]") } finally { // By this time, after stopping the connection, closing sessions, all async message submissions to this // stage should have been invoked. We invoke markStopped as the last item so it gets delivered after // all JMS messages are delivered. This will allow the stage to complete after all pending messages // are delivered, thus preventing message loss due to premature stage completion. markStopped.invoke(Done) settings.log.debug(s"Successfully closed all sessions for Jms stage [$id][$settings]") } } } } private[jms] def abortSessions(ex: Throwable): Unit = if (stopping.compareAndSet(false, true)) { val abortSessionFutures = jmsSessions.values.map { s => val f = s.abortSessionAsync() f.failed.foreach(e => settings.log.error(e)(s"Error closing jms session in Jms source stage [$id][$settings]")) f } Future .sequence(abortSessionFutures) .onComplete { _ => jmsConnection.foreach { connection => try { connection.close() } catch { case NonFatal(e) => settings.log.error(e)(s"Error closing JMS connection in Jms source stage [$id]") } finally { markAborted.invoke(ex) } } } } // We expose the killswitch, so that the stage can be closed externally private[jms] def killSwitch = new KillSwitch { override def shutdown(): Unit = stopSessions() override def abort(ex: Throwable): Unit = abortSessions(ex) } override def postStop(): Unit = { stopSessions() } }
lefou/blended
blended.streams/src/main/scala/blended/streams/jms/JmsStageLogic.scala
Scala
apache-2.0
4,317
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.expressions.codegen.Block._ import org.apache.spark.sql.catalyst.plans.physical.Partitioning import org.apache.spark.sql.execution.metric.SQLMetrics import org.apache.spark.sql.types._ /** * For lazy computing, be sure the generator.terminate() called in the very last * TODO reusing the CompletionIterator? */ private[execution] sealed case class LazyIterator(func: () => TraversableOnce[InternalRow]) extends Iterator[InternalRow] { lazy val results: Iterator[InternalRow] = func().toIterator override def hasNext: Boolean = results.hasNext override def next(): InternalRow = results.next() } /** * Applies a [[Generator]] to a stream of input rows, combining the * output of each into a new stream of rows. This operation is similar to a `flatMap` in functional * programming with one important additional feature, which allows the input rows to be joined with * their output. * * This operator supports whole stage code generation for generators that do not implement * terminate(). * * @param generator the generator expression * @param requiredChildOutput required attributes from child's output * @param outer when true, each input row will be output at least once, even if the output of the * given `generator` is empty. * @param generatorOutput the qualified output attributes of the generator of this node, which * constructed in analysis phase, and we can not change it, as the * parent node bound with it already. */ case class GenerateExec( generator: Generator, requiredChildOutput: Seq[Attribute], outer: Boolean, generatorOutput: Seq[Attribute], child: SparkPlan) extends UnaryExecNode with CodegenSupport { override def output: Seq[Attribute] = requiredChildOutput ++ generatorOutput override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) override def producedAttributes: AttributeSet = AttributeSet(output) override def outputPartitioning: Partitioning = child.outputPartitioning lazy val boundGenerator: Generator = BindReferences.bindReference(generator, child.output) protected override def doExecute(): RDD[InternalRow] = { // boundGenerator.terminate() should be triggered after all of the rows in the partition val numOutputRows = longMetric("numOutputRows") child.execute().mapPartitionsWithIndexInternal { (index, iter) => val generatorNullRow = new GenericInternalRow(generator.elementSchema.length) val rows = if (requiredChildOutput.nonEmpty) { val pruneChildForResult: InternalRow => InternalRow = if (child.outputSet == AttributeSet(requiredChildOutput)) { identity } else { UnsafeProjection.create(requiredChildOutput, child.output) } val joinedRow = new JoinedRow iter.flatMap { row => // we should always set the left (required child output) joinedRow.withLeft(pruneChildForResult(row)) val outputRows = boundGenerator.eval(row) if (outer && outputRows.isEmpty) { joinedRow.withRight(generatorNullRow) :: Nil } else { outputRows.map(joinedRow.withRight) } } ++ LazyIterator(() => boundGenerator.terminate()).map { row => // we leave the left side as the last element of its child output // keep it the same as Hive does joinedRow.withRight(row) } } else { iter.flatMap { row => val outputRows = boundGenerator.eval(row) if (outer && outputRows.isEmpty) { Seq(generatorNullRow) } else { outputRows } } ++ LazyIterator(() => boundGenerator.terminate()) } // Convert the rows to unsafe rows. val proj = UnsafeProjection.create(output, output) proj.initialize(index) rows.map { r => numOutputRows += 1 proj(r) } } } override def supportCodegen: Boolean = false override def inputRDDs(): Seq[RDD[InternalRow]] = { child.asInstanceOf[CodegenSupport].inputRDDs() } protected override def doProduce(ctx: CodegenContext): String = { child.asInstanceOf[CodegenSupport].produce(ctx, this) } override def needCopyResult: Boolean = true override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = { // Add input rows to the values when we are joining val values = if (requiredChildOutput.nonEmpty) { input } else { Seq.empty } boundGenerator match { case e: CollectionGenerator => codeGenCollection(ctx, e, values, row) case g => codeGenTraversableOnce(ctx, g, values, row) } } /** * Generate code for [[CollectionGenerator]] expressions. */ private def codeGenCollection( ctx: CodegenContext, e: CollectionGenerator, input: Seq[ExprCode], row: ExprCode): String = { // Generate code for the generator. val data = e.genCode(ctx) // Generate looping variables. val index = ctx.freshName("index") // Add a check if the generate outer flag is true. val checks = optionalCode(outer, s"($index == -1)") // Add position val position = if (e.position) { if (outer) { Seq(ExprCode( JavaCode.isNullExpression(s"$index == -1"), JavaCode.variable(index, IntegerType))) } else { Seq(ExprCode(FalseLiteral, JavaCode.variable(index, IntegerType))) } } else { Seq.empty } // Generate code for either ArrayData or MapData val (initMapData, updateRowData, values) = e.collectionType match { case ArrayType(st: StructType, nullable) if e.inline => val row = codeGenAccessor(ctx, data.value, "col", index, st, nullable, checks) val fieldChecks = checks ++ optionalCode(nullable, row.isNull) val columns = st.fields.toSeq.zipWithIndex.map { case (f, i) => codeGenAccessor( ctx, row.value, s"st_col${i}", i.toString, f.dataType, f.nullable, fieldChecks) } ("", row.code, columns) case ArrayType(dataType, nullable) => ("", "", Seq(codeGenAccessor(ctx, data.value, "col", index, dataType, nullable, checks))) case MapType(keyType, valueType, valueContainsNull) => // Materialize the key and the value arrays before we enter the loop. val keyArray = ctx.freshName("keyArray") val valueArray = ctx.freshName("valueArray") val initArrayData = s""" |ArrayData $keyArray = ${data.isNull} ? null : ${data.value}.keyArray(); |ArrayData $valueArray = ${data.isNull} ? null : ${data.value}.valueArray(); """.stripMargin val values = Seq( codeGenAccessor(ctx, keyArray, "key", index, keyType, nullable = false, checks), codeGenAccessor(ctx, valueArray, "value", index, valueType, valueContainsNull, checks)) (initArrayData, "", values) } // In case of outer=true we need to make sure the loop is executed at-least once when the // array/map contains no input. We do this by setting the looping index to -1 if there is no // input, evaluation of the array is prevented by a check in the accessor code. val numElements = ctx.freshName("numElements") val init = if (outer) { s"$numElements == 0 ? -1 : 0" } else { "0" } val numOutput = metricTerm(ctx, "numOutputRows") s""" |${data.code} |$initMapData |int $numElements = ${data.isNull} ? 0 : ${data.value}.numElements(); |for (int $index = $init; $index < $numElements; $index++) { | $numOutput.add(1); | $updateRowData | ${consume(ctx, input ++ position ++ values)} |} """.stripMargin } /** * Generate code for a regular [[TraversableOnce]] returning [[Generator]]. */ private def codeGenTraversableOnce( ctx: CodegenContext, e: Expression, input: Seq[ExprCode], row: ExprCode): String = { // Generate the code for the generator val data = e.genCode(ctx) // Generate looping variables. val iterator = ctx.freshName("iterator") val hasNext = ctx.freshName("hasNext") val current = ctx.freshName("row") // Add a check if the generate outer flag is true. val checks = optionalCode(outer, s"!$hasNext") val values = e.dataType match { case ArrayType(st: StructType, nullable) => st.fields.toSeq.zipWithIndex.map { case (f, i) => codeGenAccessor(ctx, current, s"st_col${i}", s"$i", f.dataType, f.nullable, checks) } } // In case of outer=true we need to make sure the loop is executed at-least-once when the // iterator contains no input. We do this by adding an 'outer' variable which guarantees // execution of the first iteration even if there is no input. Evaluation of the iterator is // prevented by checks in the next() and accessor code. val numOutput = metricTerm(ctx, "numOutputRows") if (outer) { val outerVal = ctx.freshName("outer") s""" |${data.code} |scala.collection.Iterator<InternalRow> $iterator = ${data.value}.toIterator(); |boolean $outerVal = true; |while ($iterator.hasNext() || $outerVal) { | $numOutput.add(1); | boolean $hasNext = $iterator.hasNext(); | InternalRow $current = (InternalRow)($hasNext? $iterator.next() : null); | $outerVal = false; | ${consume(ctx, input ++ values)} |} """.stripMargin } else { s""" |${data.code} |scala.collection.Iterator<InternalRow> $iterator = ${data.value}.toIterator(); |while ($iterator.hasNext()) { | $numOutput.add(1); | InternalRow $current = (InternalRow)($iterator.next()); | ${consume(ctx, input ++ values)} |} """.stripMargin } } /** * Generate accessor code for ArrayData and InternalRows. */ private def codeGenAccessor( ctx: CodegenContext, source: String, name: String, index: String, dt: DataType, nullable: Boolean, initialChecks: Seq[String]): ExprCode = { val value = ctx.freshName(name) val javaType = CodeGenerator.javaType(dt) val getter = CodeGenerator.getValue(source, dt, index) val checks = initialChecks ++ optionalCode(nullable, s"$source.isNullAt($index)") if (checks.nonEmpty) { val isNull = ctx.freshName("isNull") val code = code""" |boolean $isNull = ${checks.mkString(" || ")}; |$javaType $value = $isNull ? ${CodeGenerator.defaultValue(dt)} : $getter; """.stripMargin ExprCode(code, JavaCode.isNullVariable(isNull), JavaCode.variable(value, dt)) } else { ExprCode(code"$javaType $value = $getter;", FalseLiteral, JavaCode.variable(value, dt)) } } private def optionalCode(condition: Boolean, code: => String): Seq[String] = { if (condition) Seq(code) else Seq.empty } }
bravo-zhang/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/GenerateExec.scala
Scala
apache-2.0
12,294
trait Assoc[T] { type U def foo(t: T): U } trait Link[T, A] case class Foo(i: Int) object Foo { println(s"Foo companion") erased implicit val barLink: Link[Foo, FooAssoc.type] = null } implicit object FooAssoc extends Assoc[Foo] { println(s"FooAssoc") type U = Int def foo(t: Foo): Int = t.i } import compiletime.summonFrom transparent inline def link[T]: Any = summonFrom { case _: Link[T, s] => summonFrom { case stuff: s => stuff } } object Test { println(s"Test") def main(args: Array[String]): Unit = { val foo = Foo(23) println(s"foo: $foo") val assoc = link[Foo] val res: Int = assoc.foo(foo) println(s"assoc: ${res}") } }
dotty-staging/dotty
tests/run-custom-args/companion-loading.scala
Scala
apache-2.0
708
package com.jsuereth.pgp package cli import sbt._ import sbt.complete._ import sbt.complete.DefaultParsers._ import CommonParsers._ case class ExportPublicKey(id: String) extends PgpCommand { def run(ctx: PgpCommandContext): Unit = { val key = (ctx.publicKeyRing.findPubKeyRing(id) getOrElse sys.error("Could not find key: " + id)) ctx.output(key.saveToString) } override def isReadOnly: Boolean = true } object ExportPublicKey { def parser(ctx: PgpStaticContext): Parser[ExportPublicKey] = { (token("export-pub-key") ~ Space) ~> existingKeyIdOrUser(ctx) map ExportPublicKey.apply } }
voetha/sbt-pgp
pgp-plugin/src/main/scala/com/jsuereth/pgp/cli/ExportKey.scala
Scala
bsd-3-clause
616
package com.socrata.querycoordinator.caching import scala.language.implicitConversions import scala.collection.immutable.{SortedMap, SortedSet} import java.nio.charset.StandardCharsets import java.io.{ByteArrayOutputStream, DataOutputStream} import java.security.MessageDigest import com.rojoma.json.v3.ast.{JObject, JString} import com.rojoma.json.v3.io.CompactJsonWriter import com.socrata.soql.stdlib.{Context, UserContext} import com.socrata.soql.types.SoQLFloatingTimestamp object Hasher { trait ImplicitlyByteable { def asBytes: Array[Byte] } object ImplicitlyByteable { implicit def implicitlyByteable(s: String): ImplicitlyByteable = new ImplicitlyByteable { override def asBytes: Array[Byte] = s.getBytes(StandardCharsets.UTF_8) } implicit def implicitlyByteable(bs: Array[Byte]): ImplicitlyByteable = new ImplicitlyByteable { override def asBytes: Array[Byte] = bs.clone() } implicit def implicitlyBytable(optStr: Option[String]): ImplicitlyByteable = new ImplicitlyByteable { override def asBytes: Array[Byte] = optStr.toString.getBytes(StandardCharsets.UTF_8) } implicit def implicitlyBytable(n: Long): ImplicitlyByteable = new ImplicitlyByteable { override def asBytes: Array[Byte] = { val os = new Array[Byte](8) os(0) = (n >> 56).toByte os(1) = (n >> 48).toByte os(2) = (n >> 40).toByte os(3) = (n >> 32).toByte os(4) = (n >> 24).toByte os(5) = (n >> 16).toByte os(6) = (n >> 8).toByte os(7) = n.toByte os } } implicit def implicitlyByteable(ctx: Context): ImplicitlyByteable = new ImplicitlyByteable { override def asBytes: Array[Byte] = { val baos = new ByteArrayOutputStream val dos = new DataOutputStream(baos) val Context(system, UserContext(text, bool, num, float, fixed)) = ctx // n.b., 255 and 254 are bytes that do not appear in UTF-8-encoded text dos.writeInt(system.size) for(k <- system.keys.to[SortedSet]) { dos.write(k.getBytes(StandardCharsets.UTF_8)) dos.write(255) dos.write(system(k).getBytes(StandardCharsets.UTF_8)) dos.write(255) } dos.writeInt(text.size) for(k <- text.keys.to[SortedSet]) { dos.write(k.getBytes(StandardCharsets.UTF_8)) dos.write(255) dos.write(text(k).value.getBytes(StandardCharsets.UTF_8)) dos.write(255) } dos.writeInt(bool.size) for(k <- bool.keys.to[SortedSet]) { dos.write(k.getBytes(StandardCharsets.UTF_8)) dos.write(if(bool(k).value) 255 else 254) } dos.writeInt(num.size) for(k <- num.keys.to[SortedSet]) { dos.write(k.getBytes(StandardCharsets.UTF_8)) dos.write(255) dos.write(num(k).toString.getBytes(StandardCharsets.UTF_8)) // ick dos.write(255) } dos.writeInt(float.size) for(k <- float.keys.to[SortedSet]) { dos.write(k.getBytes(StandardCharsets.UTF_8)) dos.write(255) dos.write(SoQLFloatingTimestamp.StringRep(float(k).value).getBytes(StandardCharsets.UTF_8)) // blech dos.write(255) } dos.writeInt(fixed.size) for(k <- fixed.keys.to[SortedSet]) { dos.write(k.getBytes(StandardCharsets.UTF_8)) dos.write(255) dos.writeLong(fixed(k).value.getMillis) // fixed size, doesn't need a terminator } dos.flush() baos.toByteArray } } } def hash(items: ImplicitlyByteable*): Array[Byte] = { val md = MessageDigest.getInstance("SHA-256") val lenBuf = new Array[Byte](4) items.foreach { item => val bs = item.asBytes val len = bs.length lenBuf(0) = (len >> 24).toByte lenBuf(1) = (len >> 16).toByte lenBuf(2) = (len >> 8).toByte lenBuf(3) = len.toByte md.update(lenBuf) md.update(bs) } md.digest() } }
socrata-platform/query-coordinator
query-coordinator/src/main/scala/com/socrata/querycoordinator/caching/Hasher.scala
Scala
apache-2.0
4,027
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.rules.physical.stream import org.apache.flink.table.planner.calcite.FlinkTypeFactory import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistribution import org.apache.flink.table.planner.plan.nodes.FlinkConventions import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalRank import org.apache.flink.table.planner.plan.nodes.physical.stream.{StreamPhysicalDeduplicate, StreamPhysicalRank} import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, RankType} import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall} import org.apache.calcite.rel.`type`.RelDataType import org.apache.calcite.rel.convert.ConverterRule import org.apache.calcite.rel.{RelCollation, RelNode} /** * Rule that matches [[FlinkLogicalRank]] which is sorted by time attribute and * limits 1 and its rank type is ROW_NUMBER, and converts it to [[StreamPhysicalDeduplicate]]. * * NOTES: Queries that can be converted to [[StreamPhysicalDeduplicate]] could be converted to * [[StreamPhysicalRank]] too. [[StreamPhysicalDeduplicate]] is more efficient than * [[StreamPhysicalRank]] due to mini-batch and less state access. * * e.g. * 1. {{{ * SELECT a, b, c FROM ( * SELECT a, b, c, proctime, * ROW_NUMBER() OVER (PARTITION BY a ORDER BY proctime ASC) as row_num * FROM MyTable * ) WHERE row_num <= 1 * }}} will be converted to StreamExecDeduplicate which keeps first row in proctime. * * 2. {{{ * SELECT a, b, c FROM ( * SELECT a, b, c, rowtime, * ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as row_num * FROM MyTable * ) WHERE row_num <= 1 * }}} will be converted to StreamExecDeduplicate which keeps last row in rowtime. */ class StreamPhysicalDeduplicateRule extends ConverterRule( classOf[FlinkLogicalRank], FlinkConventions.LOGICAL, FlinkConventions.STREAM_PHYSICAL, "StreamPhysicalDeduplicateRule") { override def matches(call: RelOptRuleCall): Boolean = { val rank: FlinkLogicalRank = call.rel(0) StreamPhysicalDeduplicateRule.canConvertToDeduplicate(rank) } override def convert(rel: RelNode): RelNode = { val rank = rel.asInstanceOf[FlinkLogicalRank] val requiredDistribution = if (rank.partitionKey.isEmpty) { FlinkRelDistribution.SINGLETON } else { FlinkRelDistribution.hash(rank.partitionKey.toList) } val requiredTraitSet = rel.getCluster.getPlanner.emptyTraitSet() .replace(FlinkConventions.STREAM_PHYSICAL) .replace(requiredDistribution) val convInput: RelNode = RelOptRule.convert(rank.getInput, requiredTraitSet) // order by timeIndicator desc ==> lastRow, otherwise is firstRow val fieldCollation = rank.orderKey.getFieldCollations.get(0) val isLastRow = fieldCollation.direction.isDescending val fieldType = rank.getInput().getRowType.getFieldList .get(fieldCollation.getFieldIndex).getType val isRowtime = FlinkTypeFactory.isRowtimeIndicatorType(fieldType) val providedTraitSet = rel.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL) new StreamPhysicalDeduplicate( rel.getCluster, providedTraitSet, convInput, rank.partitionKey.toArray, isRowtime, isLastRow) } } object StreamPhysicalDeduplicateRule { val RANK_INSTANCE = new StreamPhysicalDeduplicateRule /** * Whether the given rank could be converted to [[StreamPhysicalDeduplicate]]. * * Returns true if the given rank is sorted by time attribute and limits 1 * and its RankFunction is ROW_NUMBER, else false. * * @param rank The [[FlinkLogicalRank]] node * @return True if the input rank could be converted to [[StreamPhysicalDeduplicate]] */ def canConvertToDeduplicate(rank: FlinkLogicalRank): Boolean = { val sortCollation = rank.orderKey val rankRange = rank.rankRange val isRowNumberType = rank.rankType == RankType.ROW_NUMBER val isLimit1 = rankRange match { case rankRange: ConstantRankRange => rankRange.getRankStart == 1 && rankRange.getRankEnd == 1 case _ => false } val inputRowType = rank.getInput.getRowType val isSortOnTimeAttribute = sortOnTimeAttribute(sortCollation, inputRowType) !rank.outputRankNumber && isLimit1 && isSortOnTimeAttribute && isRowNumberType } private def sortOnTimeAttribute( sortCollation: RelCollation, inputRowType: RelDataType): Boolean = { if (sortCollation.getFieldCollations.size() != 1) { false } else { val firstSortField = sortCollation.getFieldCollations.get(0) val fieldType = inputRowType.getFieldList.get(firstSortField.getFieldIndex).getType FlinkTypeFactory.isProctimeIndicatorType(fieldType) || FlinkTypeFactory.isRowtimeIndicatorType(fieldType) } } }
tillrohrmann/flink
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalDeduplicateRule.scala
Scala
apache-2.0
5,650
package org.jetbrains.plugins.scala.highlighter import junit.framework.TestCase class ScalaSyntaxHighlighterTest extends TestCase { def testScalaSyntaxHighlighterObjectFieldsAreSuccessfullyInitialized(): Unit = ScalaSyntaxHighlighter.toString }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/highlighter/ScalaSyntaxHighlighterTest.scala
Scala
apache-2.0
253
package auth import org.scalatest.{FlatSpec, Matchers} import twitter4j.{Query, QueryResult, RateLimitStatus, TwitterFactory} /** * The last test is volatile, depends on the API but the * authentication should be able to create application-like * twitter instances and not user-like authentications (the limits are * higher) * * Created by @davidelnunes on 05-02-2015. */ class TestTwitterAuth extends FlatSpec with Matchers{ val twitterAuth: ToolConfig = ToolConfig("twitter-tools") "TwitterAuth" should "read the API keys from a configuration file" in { twitterAuth.apiConfig.get(ToolConfig.ConsumerKeyK) should be ('defined) twitterAuth.apiConfig.get(ToolConfig.ConsumerSecretK) should be ('defined) twitterAuth.apiConfig.get(ToolConfig.AccessKeyK) should be ('defined) twitterAuth.apiConfig.get(ToolConfig.AccessSecretK) should be ('defined) } "The authentication object" should "be able to retrieve a token" in{ val token = twitterAuth.oauth2Token token should be ('defined) } "The authentication object" should "provide a configuration for the application" in { val cfg = twitterAuth.twitterCfg val tf = new TwitterFactory(cfg) val twitter = tf.getInstance() } "Twitter Client" should "should be authenticated as an application and give you appropriate limits" in { val cfg = twitterAuth.twitterCfg val tf = new TwitterFactory(cfg) val twitter = tf.getInstance() val status: RateLimitStatus = twitter.getRateLimitStatus().get("/search/tweets") //current API says that status.getLimit should be (450) } "One Query" should "decrese the remaining queries by 1" in { val cfg = twitterAuth.twitterCfg val tf = new TwitterFactory(cfg) val twitter = tf.getInstance() val query: Query = new Query("lang:en AND "+"\\"hello world\\"") query.setCount(100) val status1: RateLimitStatus = twitter.getRateLimitStatus().get("/search/tweets") val result:QueryResult = twitter.search(query) result.getCount should be (100) val status2: RateLimitStatus = twitter.getRateLimitStatus().get("/search/tweets") //current API says that status2.getRemaining should be < (status1.getRemaining) } }
davidelnunes/TwitterTools
src/test/scala/auth/TestTwitterAuth.scala
Scala
gpl-3.0
2,229
package com.arcusys.learn.scorm.manifest.sequencing.storage.impl import com.arcusys.learn.storage.impl.{ EntityStorageExt, KeyedEntityStorageExt } import com.arcusys.valamis.lesson.scorm.model.manifest.ChildrenSelection import com.arcusys.valamis.lesson.scorm.storage.sequencing.ChildrenSelectionStorage /** * User: Yulia.Glushonkova * Date: 02.04.13 */ trait ChildrenSelectionEntityStorage extends ChildrenSelectionStorage with EntityStorageExt[ChildrenSelection] { def create(sequencingID: Int, entity: ChildrenSelection) { create(entity, "sequencingID" -> sequencingID) } def get(sequencingID: Int): Option[ChildrenSelection] = getOne("sequencingID" -> sequencingID) def delete(sequencingID: Int) { delete("sequencingID" -> sequencingID) } }
ViLPy/Valamis
learn-persistence-api/src/main/scala/com/arcusys/learn/scorm/manifest/sequencing/storage/impl/ChildrenSelectionEntityStorage.scala
Scala
lgpl-3.0
762
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.filter(_.state == WorkerState.ALIVE).size }) // Gauge for application numbers in cluster metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.waitingApps.size }) }
ArvinDevel/onlineAggregationOnSparkV2
core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
Scala
apache-2.0
1,872
import gh2013.events._ import gh2013.models.EventBody import net.liftweb.json.JsonAST.JValue package object gh2013 { def parse(event: JValue): Option[GH2013Event] = { val parsers = Seq( parser(PushEventParser)(_), parser(WatchEventParser)(_), parser(CreateEventParser)(_), parser(IssuesEventParser)(_), parser(ForkEventParser)(_), PublicEventParser.parse(_), parser(PullRequestEventParser)(_), parser(DeleteEventParser)(_), GistEventParser.parse(_), FollowEventParser.parse(_), parser(GollumEventParser)(_), parser(CommitCommentEventParser)(_), parser(MemberEventParser)(_), parser(DownloadEventParser)(_), // parser(ForkApplyEventParser)(_), parser(IssueCommentEventParser)(_) ) //println(parsers.map(p => p(event))) parsers .toStream .flatMap(parser => parser(event)) .headOption } def parser[E,P](e: EventParser[E,P])(json: JValue): Option[GH2013Event] = { val `type` = gh3.node2String(json)("type") if(`type`.isEmpty || `type`.get != e.name) return None val event = EventBody(json) val payload = e.parsePayload(json \\ "payload") if(event.isDefined && payload.isDefined) Some(e.make(event.get, payload.get)) else None } }
mgoeminne/github_etl
src/main/scala/gh2013/package.scala
Scala
mit
1,384
package ch.epfl.lamp.grading import org.scalatest.{Tag, FunSuiteLike} import java.security._ import java.util.concurrent._ trait GradingSuite extends FunSuiteLike { /** * For the real grading, ScalaTest is executed in a separate JVM by the ScalaTestRunner * with a security manager enabled. You can run the separte JVM using the `scalaTest` task. * In this case we run tests in a security manager, and using a test timeout. * * However, you can still run the tests in SBT's JVM using the standard `test` task. In * that case, we can't use the security manager (it has not been activated in the command * line), and also we don't use the timeout. */ val securityEnabled: Boolean = util.Properties.propIsSet("java.security.manager") // Timeout per test, defined in Settings.scala. There's also a global timeout // when running Scalatest in a separate JVM, see ScalaTestRunner val individualTestTimeout: Int = util.Properties.propOrEmpty("scalatest.individualTestTimeout").toInt // List of files that are readable under the security manager val readableFiles: List[String] = util.Properties.propOrEmpty("scalatest.readableFiles").split(":").filterNot(_.isEmpty).toList val defaultWeight: Int = util.Properties.propOrEmpty("scalatest.defaultWeight").toInt /** * Run `task` and abort if it takes too long. Seems the only way to do it * is using (deprecated) Thread.stop() * http://stackoverflow.com/questions/5715235/java-set-timeout-on-a-certain-block-of-code */ def timeoutTask(task: => Unit): Unit = { val executor = Executors.newSingleThreadExecutor() val t = new Thread { override def run(): Unit = task } val future: Future[Unit] = executor.submit(new Callable[Unit] { override def call(): Unit = t.run() }) try { future.get(individualTestTimeout, TimeUnit.SECONDS) } catch { case to: TimeoutException => t.stop() future.cancel(true) throw to } finally { executor.shutdown() } } def runWithoutPrivileges(op: => Unit): Unit = { val action = new PrivilegedAction[Unit] { def run { try { op } catch { case td: java.lang.ThreadDeath => // appears when there are timeouts. no need to do anything, timeouts // are caught below () case err: StackOverflowError => // re-throw as error - gives nicer feedback output val trace = err.getStackTrace.take(20).mkString( "Stack trace:\\n", "\\n", "\\n\\nReported through:") throw new Exception( s"Error occurred during test execution: $err\\n$trace") } } } val originalContext = AccessController.getContext val combiner = new DomainCombiner { def combine(p1: Array[ProtectionDomain], p2: Array[ProtectionDomain]): Array[ProtectionDomain] = { // revoke most permissions val permissions = new Permissions() permissions.add(new java.util.PropertyPermission("*", "read")) for (file <- readableFiles) permissions.add(new java.io.FilePermission(file, "read")) permissions.add(new java.lang.reflect.ReflectPermission("suppressAccessChecks")) permissions.add(new java.lang.RuntimePermission("getenv.*", "read")) permissions.add(new java.lang.RuntimePermission("setContextClassLoader")) permissions.add(new java.lang.RuntimePermission("modifyThread")) permissions.add(new java.lang.RuntimePermission("accessDeclaredMembers")) Array(new ProtectionDomain(null, permissions)) } } val context = new AccessControlContext(originalContext, combiner) try { timeoutTask(AccessController.doPrivileged(action, context)) } catch { case to: TimeoutException => throw new Exception( s"Test timeout: aborted after $individualTestTimeout " + "seconds; Check for infinite loops!") } } private var totalWeight: Int = 0 override def suiteName = { assert(!super.suiteName.contains("::")) s"${super.suiteName}::$totalWeight" } def test(testName: String, weight: Int, testTags: Tag*)(testFun: => Unit): Unit = { assert(!testName.contains("::")) val name = s"${super.suiteName}::$testName::$weight" super.test(name, testTags: _*) { if (securityEnabled) runWithoutPrivileges(testFun) else testFun } totalWeight += weight } override def test(testName: String, testTags: Tag*)(testFun: => Unit): Unit = { test(testName, defaultWeight, testTags: _*)(testFun) } def ignore(testName: String, weight: Int, testTags: Tag*)(testFun: => Unit): Unit = { val name = weight.toString + "\\n" + testName super.ignore(name, testTags: _*) { if (securityEnabled) runWithoutPrivileges(testFun) else testFun } } override def ignore(testName: String, testTags: Tag*)(testFun: => Unit): Unit = { ignore(testName, defaultWeight, testTags: _*)(testFun) } }
sbt-coursera/scala-grading
runtime/src/main/scala/ch/epfl/lamp/grading/GradingSuite.scala
Scala
bsd-3-clause
5,062
trait Symbols { trait Symbol } object Types { self: Symbols => // As of 11.10.2009, this would multi-resolve to scala.Symbol and Symbols.Symbol. trait B extends <ref>Symbol }
LPTK/intellij-scala
testdata/resolve/nonlocal/selfTypeShadow.scala
Scala
apache-2.0
199
package com.prezi.haskell.gradle.tasks import java.io.File import com.prezi.haskell.gradle.ApiHelper._ import com.prezi.haskell.gradle.external.SnapshotVersions import com.prezi.haskell.gradle.model.{Sandbox, StackYamlWriter} import com.prezi.haskell.gradle.util.FileLock import org.gradle.api.tasks.TaskAction import org.gradle.api.{DefaultTask, GradleException} import scala.collection.JavaConverters._ import resource._ class GenerateStackYaml extends DefaultTask with HaskellProjectSupport with HaskellDependencies with UsingHaskellTools with UsingGit with DependsOnStoreDependentSandboxes with TaskLogging { private var targetFile_ : Option[File] = None findCabalFile() match { case Some(cabalFile) => getInputs.file(cabalFile) case None => } def targetFile = targetFile_ def targetFile_=(value: Option[File]): Unit = { targetFile_ = value if (value.isDefined) { getOutputs.file(value.get) } } @TaskAction def run(): Unit = { val fileLock = new FileLock(new File(getProject.getRootProject.getBuildDir, "generate-yaml.lock")) fileLock.lock() try { needsConfigurationSet needsGitSet needsToolsSet if (targetFile.isEmpty) { throw new IllegalStateException("targetFile is not specified") } debug(s"GenerateStackYaml dependentSandboxes: $dependentSandboxes") generateContent(targetFile.get, dependentSandboxes) } finally { fileLock.release() } } private def generateContent(target: File, sandboxes: List[Sandbox]): Unit = { for (builder <- managed(new StackYamlWriter(target))) { val pkgFlags = haskellExtension .getPackageFlags .asScala .toMap .filter { case (_, value) => !value.isEmpty } .map { case (key, value) => (key, value.asScala.toMap) } if (pkgFlags.nonEmpty) { builder.flags(pkgFlags) } builder.packages(List(".")) builder.extraPackageDbs(sandboxes.map(_.packageDb.getAbsolutePath)) val isOffline = getProject.getGradle.getStartParameter.isOffline val snapshotVersions = new SnapshotVersions(isOffline, haskellExtension.getOverriddenSnapshotVersionsCacheDir.map(path => new File(path)), haskellExtension.getStackRoot, getProject.exec, tools.get, git.get) val deps = snapshotVersions.run(haskellExtension.snapshotId, getCabalFile()) builder.extraDeps(deps) val binPath = getProject.getBuildDir </> "sandbox" </> "files" </> "bin" binPath.mkdirs() builder.localBinPath(binPath.getAbsolutePath) builder.ghcVersion(haskellExtension.parsedGHCVersion) } } private def findCabalFile(): Option[File] = getProject.getProjectDir.listFiles().find(_.getName.endsWith(".cabal")) private def getCabalFile(): File = findCabalFile() match { case Some(file) => file case None => throw new GradleException(s"Could not find any .cabal files in ${getProject.getRootDir.getAbsolutePath}") } }
prezi/gradle-haskell-plugin
src/main/scala/com/prezi/haskell/gradle/tasks/GenerateStackYaml.scala
Scala
apache-2.0
3,005
package com.eclipsesource.schema import com.eclipsesource.schema.drafts.Version7 import com.eclipsesource.schema.test.JsonSpec import org.specs2.mutable.Specification class IfThenElseSpec extends Specification with JsonSpec { import Version7._ implicit val validator = SchemaValidator(Some(Version7)) validate("if-then-else", "draft7") }
eclipsesource/play-json-schema-validator
src/test/scala/com/eclipsesource/schema/IfThenElseSpec.scala
Scala
apache-2.0
346
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import kafka.utils.Logging class KafkaServerStartable(val serverConfig: KafkaConfig) extends Logging { private val server = new KafkaServer(serverConfig) def startup() { try { server.startup() } catch { case e: Throwable => fatal("Fatal error during KafkaServerStartable startup. Prepare to shutdown", e) // KafkaServer already calls shutdown() internally, so this is purely for logging & the exit code System.exit(1) } } def shutdown() { try { server.shutdown() } catch { case e: Throwable => fatal("Fatal error during KafkaServerStable shutdown. Prepare to halt", e) // Calling exit() can lead to deadlock as exit() can be called multiple times. Force exit. Runtime.getRuntime.halt(1) } } /** * Allow setting broker state from the startable. * This is needed when a custom kafka server startable want to emit new states that it introduces. */ def setServerState(newState: Byte) { server.brokerState.newState(newState) } def awaitShutdown() = server.awaitShutdown }
usakey/kafka
core/src/main/scala/kafka/server/KafkaServerStartable.scala
Scala
apache-2.0
1,941
package co.spendabit.webapp.forms.ui.bootstrap import co.spendabit.XMLHelpers import co.spendabit.webapp.forms.controls._ import co.spendabit.webapp.forms.v2.{BaseWebForm, WebForm1} import org.scalatest.FunSuite class HorizontalFormTests extends FunSuite with XMLHelpers { test("column widths are properly calculated/rendered") { class Renderer extends HorizontalForm { override protected val leftColumnWidth: Int = 3 } val f = new BaseWebForm[String] with WebForm1[String] { val action = "/go" val method = POST val fields = TextInput(label = "Color", name = "color") } val markup = f.html(new Renderer) val form = markup \\\\ "form" val formGroup = form.head.child.filter(_.isInstanceOf[xml.Elem]).head val Seq(column1, column2): Seq[xml.Node] = formGroup.child.filter(_.isInstanceOf[xml.Elem]) assert(getAttr(column1, "class").get.contains("col-sm-3")) assert(getAttr(column2, "class").get.contains("col-sm-9")) } }
spendabit/webapp-tools
test/co/spendabit/webapp/forms/ui/bootstrap/HorizontalFormTests.scala
Scala
unlicense
992
package debop4s.timeperiod.base import debop4s.timeperiod._ import debop4s.timeperiod.tests.AbstractTimeFunSuite import debop4s.timeperiod.utils.Times._ /** * debop4s.timeperiod.tests.debop4s.redis.base.TimePeriodContainerTest * * @author 배성혁 sunghyouk.bae@gmail.com * @since 2014. 2. 19. 오후 4:11 */ class TimePeriodContainerFunSuite extends AbstractTimeFunSuite { test("constructor") { val period1 = TimeRange(asDate(2011, 4, 15), asDate(2011, 4, 20)) val period2 = TimeRange(asDate(2011, 4, 22), asDate(2011, 4, 25)) val container = TimePeriodContainer(period1, period2) log.trace(s"container=$container") container.size should equal(2) // 값 중복을 허용하지 않습니다^^ val container2 = TimePeriodContainer(period1, period2, container) assert(container2.length == 2) } }
debop/debop4s
debop4s-timeperiod/src/test/scala/debop4s/timeperiod/base/TimePeriodContainerFunSuite.scala
Scala
apache-2.0
843
/*********************************************************************** * Copyright (c) 2013-2019 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.accumulo.tools.stats import com.beust.jcommander.Parameters import org.locationtech.geomesa.accumulo.data.AccumuloDataStore import org.locationtech.geomesa.accumulo.tools.stats.AccumuloStatsAnalyzeCommand.AccumuloStatsAnalyzeParams import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams} import org.locationtech.geomesa.tools.RequiredTypeNameParam import org.locationtech.geomesa.tools.stats.StatsAnalyzeCommand import org.locationtech.geomesa.tools.stats.StatsAnalyzeCommand.StatsAnalyzeParams class AccumuloStatsAnalyzeCommand extends StatsAnalyzeCommand[AccumuloDataStore] with AccumuloDataStoreCommand{ override val params = new AccumuloStatsAnalyzeParams } object AccumuloStatsAnalyzeCommand { @Parameters(commandDescription = "Analyze statistics on a GeoMesa feature type") class AccumuloStatsAnalyzeParams extends StatsAnalyzeParams with AccumuloDataStoreParams with RequiredTypeNameParam }
elahrvivaz/geomesa
geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/stats/AccumuloStatsAnalyzeCommand.scala
Scala
apache-2.0
1,448
package demo package components import japgolly.scalajs.react._ import japgolly.scalajs.react.vdom.prefix_<^._ import scala.scalajs.js object CodeExample { object Style { val pageBodyContent = Seq(^.borderRadius := "2px", ^.boxShadow := "0 1px 4px rgba(223, 228, 228, 0.79)", ^.maxWidth := "1024px") val contentDemo = Seq(^.padding := "30px") val contentCode = Seq(^.borderTop := "solid 1px #e0e0e0" ) val title = Seq( ^.paddingBottom := "15px") } val component = ReactComponentB[Props]("codeexample") .render((P, C) => { <.div( P.title.nonEmpty ?= <.h3(P.title,Style.title), <.div(Style.pageBodyContent)( <.div(Style.contentDemo, ^.key := "dan")( C ), <.pre(Style.contentCode, ^.key := "code")( CodeHighLighter(P.code) ) ) ) }) .build case class Props(code: String,title: String) def apply(code: String, title: String = "", ref: js.UndefOr[String] = "", key: js.Any = {})(children: ReactNode*) = component.set(key, ref)(Props(code,title), children) }
tpdi/scalajs-react-components
demo/src/main/scala/demo/components/CodeExample.scala
Scala
apache-2.0
1,098
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.runtime.stream.sql import org.apache.flink.api.common.time.Time import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2} import org.apache.flink.api.java.typeutils.RowTypeInfo import org.apache.flink.api.scala._ import org.apache.flink.streaming.api.TimeCharacteristic import org.apache.flink.streaming.api.scala.DataStream import org.apache.flink.table.api.{Types, _} import org.apache.flink.table.api.bridge.scala._ import org.apache.flink.table.api.internal.TableEnvironmentInternal import org.apache.flink.table.planner.factories.TestValuesTableFactory import org.apache.flink.table.planner.functions.aggfunctions.{ListAggWithRetractAggFunction, ListAggWsWithRetractAggFunction} import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.VarSumAggFunction import org.apache.flink.table.planner.runtime.batch.sql.agg.{MyPojoAggFunction, VarArgsAggFunction} import org.apache.flink.table.planner.runtime.utils.StreamingWithAggTestBase.AggMode import org.apache.flink.table.planner.runtime.utils.StreamingWithMiniBatchTestBase.MiniBatchMode import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode import org.apache.flink.table.planner.runtime.utils.TimeTestUtil.TimestampAndWatermarkWithOffset import org.apache.flink.table.planner.runtime.utils.UserDefinedFunctionTestUtils._ import org.apache.flink.table.planner.runtime.utils._ import org.apache.flink.table.planner.utils.DateTimeTestUtil.{localDate, localDateTime, localTime => mLocalTime} import org.apache.flink.table.runtime.typeutils.BigDecimalTypeInfo import org.apache.flink.types.{Row, RowKind} import org.junit.Assert.assertEquals import org.junit._ import org.junit.runner.RunWith import org.junit.runners.Parameterized import java.lang.{Integer => JInt, Long => JLong} import java.math.{BigDecimal => JBigDecimal} import scala.collection.{Seq, mutable} import scala.util.Random @RunWith(classOf[Parameterized]) class AggregateITCase( aggMode: AggMode, miniBatch: MiniBatchMode, backend: StateBackendMode) extends StreamingWithAggTestBase(aggMode, miniBatch, backend) { val data = List( (1000L, 1, "Hello"), (2000L, 2, "Hello"), (3000L, 3, "Hello"), (4000L, 4, "Hello"), (5000L, 5, "Hello"), (6000L, 6, "Hello"), (7000L, 7, "Hello World"), (8000L, 8, "Hello World"), (20000L, 20, "Hello World")) @Test def testEmptyInputAggregation(): Unit = { val data = new mutable.MutableList[(Int, Int)] data .+= ((1, 1)) data .+= ((2, 2)) data .+= ((3, 3)) val t = failingDataSource(data).toTable(tEnv, 'a, 'b) tEnv.registerTable("T", t) val t1 = tEnv.sqlQuery( "select sum(a), avg(a), min(a), count(a), count(1) from T where a > 9999 group by b") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List() assertEquals(expected, sink.getRetractResults) } @Test def testShufflePojo(): Unit = { val data = new mutable.MutableList[(Int, Int)] data .+= ((1, 1)) data .+= ((2, 2)) data .+= ((3, 3)) val t = failingDataSource(data).toTable(tEnv, 'a, 'b) tEnv.registerTable("T", t) tEnv.registerFunction("pojoFunc", MyToPojoFunc) val t1 = tEnv.sqlQuery( "select sum(a), avg(a), min(a), count(a), count(1) from T group by pojoFunc(b)") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List( "1,1,1,1,1", "2,2,2,1,1", "3,3,3,1,1") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Ignore("[FLINK-12215] Fix this when introduce SqlProcessFunction.") @Test def testEmptyInputAggregationWithoutGroupBy(): Unit = { val data = new mutable.MutableList[(Int, Int)] data .+= ((1, 1)) data .+= ((2, 2)) data .+= ((3, 3)) val t = failingDataSource(data).toTable(tEnv, 'a, 'b) tEnv.registerTable("T", t) val t1 = tEnv.sqlQuery( "select sum(a), avg(a), min(a), count(a), count(1) from T where a > 9999") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("null,null,null,0,0") assertEquals(expected, sink.getRetractResults) } @Test def testAggregationWithoutWatermark(): Unit = { // NOTE: Different from AggregateITCase, we do not set stream time characteristic // of environment to event time, so that emitWatermark() actually does nothing. env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime) val data = new mutable.MutableList[(Int, Int)] data .+= ((1, 1)) data .+= ((2, 2)) data .+= ((3, 3)) val t = failingDataSource(data).toTable(tEnv, 'a, 'b) tEnv.registerTable("T", t) val t1 = tEnv.sqlQuery( "select sum(a), avg(a), min(a), count(a), count(1) from T") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("6,2,1,3,3") assertEquals(expected, sink.getRetractResults) } @Test def testDistinctGroupBy(): Unit = { val sqlQuery = "SELECT b, " + " SUM(DISTINCT (a * 3)), " + " COUNT(DISTINCT SUBSTRING(c FROM 1 FOR 2))," + " COUNT(DISTINCT c) " + "FROM MyTable " + "GROUP BY b" val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val result = tEnv.sqlQuery(sqlQuery).toRetractStream[Row] val sink = new TestingRetractSink result.addSink(sink) env.execute() val expected = List( "1,3,1,1", "2,15,1,2", "3,45,3,3", "4,102,1,4", "5,195,1,5", "6,333,1,6") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testCountDistinct(): Unit = { val ids = List( 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5) val dateTimes = List( "1970-01-01 00:00:01", "1970-01-01 00:00:02", null, "1970-01-01 00:00:04", "1970-01-01 00:00:05", "1970-01-01 00:00:06", "1970-01-01 00:00:07", null, null, "1970-01-01 00:00:10", "1970-01-01 00:00:11", "1970-01-01 00:00:11", "1970-01-01 00:00:13", "1970-01-01 00:00:14", "1970-01-01 00:00:15") val dates = List( "1970-01-01", "1970-01-02", null, "1970-01-04", "1970-01-05", "1970-01-06", "1970-01-07", null, null, "1970-01-10", "1970-01-11", "1970-01-11", "1970-01-13", "1970-01-14", "1970-01-15") val times = List( "00:00:01", "00:00:02", null, "00:00:04", "00:00:05", "00:00:06", "00:00:07", null, null, "00:00:10", "00:00:11", "00:00:11", "00:00:13", "00:00:14", "00:00:15") val integers = List( "1", "2", null, "4", "5", "6", "7", null, null, "10", "11", "11", "13", "14", "15") val chars = List( "A", "B", null, "D", "E", "F", "H", null, null, "K", "L", "L", "N", "O", "P") val data = new mutable.MutableList[Row] for (i <- ids.indices) { val v = integers(i) val decimal = if (v == null) null else new JBigDecimal(v) val int = if (v == null) null else JInt.valueOf(v) val long = if (v == null) null else JLong.valueOf(v) data.+=(Row.of( Int.box(ids(i)), localDateTime(dateTimes(i)), localDate(dates(i)), mLocalTime(times(i)), decimal, int, long, chars(i))) } val inputs = util.Random.shuffle(data) val rowType = new RowTypeInfo( Types.INT, Types.LOCAL_DATE_TIME, Types.LOCAL_DATE, Types.LOCAL_TIME, Types.DECIMAL, Types.INT, Types.LONG, Types.STRING) val t = failingDataSource(inputs)(rowType).toTable(tEnv, 'id, 'a, 'b, 'c, 'd, 'e, 'f, 'g) tEnv.createTemporaryView("T", t) val t1 = tEnv.sqlQuery( s""" |SELECT | id, | count(distinct a), | count(distinct b), | count(distinct c), | count(distinct d), | count(distinct e), | count(distinct f), | count(distinct g) |FROM T GROUP BY id """.stripMargin) val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List( "1,1,1,1,1,1,1,1", "2,1,1,1,1,1,1,1", "3,3,3,3,3,3,3,3", "4,2,2,2,2,2,2,2", "5,4,4,4,4,4,4,4") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testDistinctWithRetract(): Unit = { // this case covers LongArrayValueWithRetractionGenerator and LongValueWithRetractionGenerator val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((1, 1L, "A")) data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, "C")) data.+=((7, 4L, "B")) data.+=((8, 4L, "A")) data.+=((9, 4L, "D")) data.+=((10, 4L, "E")) data.+=((11, 5L, "A")) data.+=((12, 5L, "B")) // b, count(a) as cnt // 1, 3 // 2, 2 // 3, 3 // 4, 4 // 5, 2 val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val sql = """ |SELECT | count(distinct cnt), | sum(distinct cnt), | max(distinct cnt), | min(distinct cnt), | avg(distinct cnt), | count(distinct max_a) |FROM ( | SELECT b, count(a) as cnt, max(a) as max_a | FROM T | GROUP BY b) """.stripMargin val t1 = tEnv.sqlQuery(sql) val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("3,9,4,2,3,5") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testDistinctAggregateMoreThan64(): Unit = { // this case is used to cover DistinctAggCodeGen#LongArrayValueWithoutRetractionGenerator val data = new mutable.MutableList[(Int, Int)] for (i <- 0 until 100) { for (j <- 0 until 100 - i) { data.+=((j, i)) } } val t = failingDataSource(Random.shuffle(data)).toTable(tEnv, 'a, 'b) tEnv.registerTable("T", t) val distincts = for (i <- 0 until 100) yield { s"count(distinct a) filter (where b = $i)" } val sql = s""" |SELECT | ${distincts.mkString(", ")} |FROM T """.stripMargin val t1 = tEnv.sqlQuery(sql) val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List((1 to 100).reverse.mkString(",")) assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testDistinctAggWithNullValues(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, null)) data.+=((7, 3L, "C")) data.+=((8, 4L, "B")) data.+=((9, 4L, null)) data.+=((10, 4L, null)) data.+=((11, 4L, "A")) data.+=((12, 4L, "D")) data.+=((13, 4L, null)) data.+=((14, 4L, "E")) data.+=((15, 5L, "A")) data.+=((16, 5L, null)) data.+=((17, 5L, "B")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) tEnv.registerFunction("CntNullNonNull", new CountNullNonNull) val t1 = tEnv.sqlQuery( "SELECT b, count(*), CntNullNonNull(DISTINCT c) FROM T GROUP BY b") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,1,1|0", "2,2,1|0", "3,4,1|1", "4,7,4|1", "5,3,2|1") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testGroupByAgg(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, "C")) data.+=((7, 4L, "B")) data.+=((8, 4L, "A")) data.+=((9, 4L, "D")) data.+=((10, 4L, "E")) data.+=((11, 5L, "A")) data.+=((12, 5L, "B")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val t1 = tEnv.sqlQuery("SELECT b, count(c), sum(a) FROM T GROUP BY b") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,1,1", "2,2,5", "3,3,15", "4,4,34", "5,2,23") assertEquals(expected.sorted, sink.getRetractResults.sorted) } def testCountWithNullableIfCall(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, "C")) data.+=((7, 4L, "B")) data.+=((8, 4L, "A")) data.+=((9, 4L, "D")) data.+=((10, 4L, "E")) data.+=((11, 5L, "A")) data.+=((12, 5L, "B")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val sql = s""" |select | b | ,count(1) | ,count(if(c in ('A', 'B'), cast(null as integer), 1)) as cnt | ,count(if(c not in ('A', 'B'), 1, cast(null as integer))) as cnt1 |from T |group by b """.stripMargin val t1 = tEnv.sqlQuery(sql) val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,1,0,0", "2,2,0,0", "3,3,3,3", "4,4,2,2", "5,2,0,0") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testNestedGroupByAgg(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, "C")) data.+=((7, 4L, "B")) data.+=((8, 4L, "A")) data.+=((9, 4L, "D")) data.+=((10, 4L, "E")) data.+=((11, 5L, "A")) data.+=((12, 5L, "B")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val sql = """ |SELECT sum(b), count(a), max(a), min(a), c |FROM ( | SELECT b, count(c) as c, sum(a) as a | FROM T | GROUP BY b) |GROUP BY c """.stripMargin val t1 = tEnv.sqlQuery(sql) val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,1,1,1,1", "3,1,15,15,3", "4,1,34,34,4", "7,2,23,5,2") assertEquals(expected.sorted, sink.getRetractResults.sorted) } /** test unbounded groupBy (without window) **/ @Test def testUnboundedGroupBy(): Unit = { val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val sqlQuery = "SELECT b, COUNT(a) FROM MyTable GROUP BY b" val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,1", "2,2", "3,3", "4,4", "5,5", "6,6") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testWindowWithUnboundedAgg(): Unit = { val t = failingDataSource(TestData.tupleData5.map { case (a, b, c, d, e) => (b, a, c, d, e) }).assignTimestampsAndWatermarks( new TimestampAndWatermarkWithOffset[(Long, Int, Int, String, Long)](0L)) .toTable(tEnv, 'rowtime.rowtime, 'a, 'c, 'd, 'e) tEnv.registerTable("MyTable", t) val innerSql = """ |SELECT a, | SUM(DISTINCT e) b, | MIN(DISTINCT e) c, | COUNT(DISTINCT e) d |FROM MyTable |GROUP BY a, TUMBLE(rowtime, INTERVAL '0.005' SECOND) """.stripMargin val sqlQuery = "SELECT c, MAX(a), COUNT(DISTINCT d) FROM (" + innerSql + ") GROUP BY c" val results = tEnv.sqlQuery(sqlQuery).toRetractStream[Row] val sink = new TestingRetractSink results.addSink(sink) env.execute() val expected = List( "1,5,3", "2,5,2") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testListAggWithNullData(): Unit = { val dataWithNull = List( (1, 1, null), (2, 1, null), (3, 1, null)) val t: DataStream[(Int, Int, String)] = failingDataSource(dataWithNull) val streamTable = t.toTable(tEnv, 'id, 'len, 'content) tEnv.registerTable("T", streamTable) val sqlQuery = s""" |SELECT len, listagg(content, '#') FROM T GROUP BY len """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,null") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testListAggWithoutDelimiterTreatNull(): Unit = { val dataWithNull = List( (1, 1, null), (2, 1, null), (3, 1, null)) val t: DataStream[(Int, Int, String)] = failingDataSource(dataWithNull) val streamTable = t.toTable(tEnv, 'id, 'len, 'content) tEnv.registerTable("T", streamTable) val sqlQuery = s""" |SELECT len, listagg(content) FROM T GROUP BY len """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,null") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testListAggWithDistinct(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, "A")) data.+=((7, 4L, "EF")) data.+=((1, 1L, "A")) data.+=((8, 4L, "EF")) data.+=((8, 4L, null)) val sqlQuery = "SELECT b, LISTAGG(DISTINCT c, '#') FROM MyTable GROUP BY b" tEnv.registerTable("MyTable", failingDataSource(data).toTable(tEnv).as("a", "b", "c")) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("1,A", "2,B", "3,C#A", "4,EF") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testUnboundedGroupByCollect(): Unit = { val sqlQuery = "SELECT b, COLLECT(a) FROM MyTable GROUP BY b" val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() // TODO: [BLINK-16716210] the string result of collect is not determinist // TODO: sort the map result in the future val expected = List( "1,{1=1}", "2,{2=1, 3=1}", "3,{4=1, 5=1, 6=1}", "4,{7=1, 8=1, 9=1, 10=1}", "5,{11=1, 12=1, 13=1, 14=1, 15=1}", "6,{16=1, 17=1, 18=1, 19=1, 20=1, 21=1}") assertMapStrEquals(expected.sorted.toString, sink.getRetractResults.sorted.toString) } @Test def testUnboundedGroupByCollectWithObject(): Unit = { val sqlQuery = "SELECT b, COLLECT(c) FROM MyTable GROUP BY b" val data = List( (1, 1, List(12, "45.6")), (2, 2, List(12, "45.612")), (3, 2, List(13, "41.6")), (4, 3, List(14, "45.2136")), (5, 3, List(18, "42.6")) ) tEnv.registerTable("MyTable", failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List( "1,{List(12, 45.6)=1}", "2,{List(13, 41.6)=1, List(12, 45.612)=1}", "3,{List(18, 42.6)=1, List(14, 45.2136)=1}") assertMapStrEquals(expected.sorted.toString, sink.getRetractResults.sorted.toString) } @Test def testGroupBySingleValue(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((6, 3L, "C")) data.+=((7, 4L, "B")) data.+=((8, 4L, "A")) data.+=((9, 4L, "D")) data.+=((10, 4L, "E")) data.+=((11, 5L, "A")) data.+=((12, 5L, "B")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T1", t) tEnv.registerTable("T2", t) val t1 = tEnv.sqlQuery("SELECT * FROM T2 WHERE T2.a < (SELECT count(*) * 0.3 FROM T1)") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("1,1,A", "2,2,B", "3,2,B", "4,3,C", "5,3,C") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testPojoField(): Unit = { val data = Seq( (1, new MyPojo(5, 105)), (1, new MyPojo(6, 11)), (1, new MyPojo(7, 12))) val t = failingDataSource(data).toTable(tEnv, 'a, 'b) tEnv.registerTable("MyTable", t) tEnv.registerFunction("pojoFunc", new MyPojoAggFunction) tEnv.registerFunction("pojoToInt", MyPojoFunc) val sql = "SELECT pojoToInt(pojoFunc(b)) FROM MyTable group by a" val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("128") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testDecimalSum(): Unit = { val data = new mutable.MutableList[Row] data.+=(Row.of(BigDecimal(1).bigDecimal)) data.+=(Row.of(BigDecimal(2).bigDecimal)) data.+=(Row.of(BigDecimal(2).bigDecimal)) data.+=(Row.of(BigDecimal(3).bigDecimal)) val rowType = new RowTypeInfo(BigDecimalTypeInfo.of(7, 2)) val t = failingDataSource(data)(rowType).toTable(tEnv, 'd) tEnv.registerTable("T", t) val sql = """ |select c, sum(d) from ( | select d, count(d) c from T group by d |) group by c """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,4.00", "2,2.00") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testDifferentTypesSumWithRetract(): Unit = { val data = List( (1.toByte, 1.toShort, 1, 1L, 1.0F, 1.0, "a"), (2.toByte, 2.toShort, 2, 2L, 2.0F, 2.0, "a"), (3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "a"), (3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "a"), (1.toByte, 1.toShort, 1, 1L, 1.0F, 1.0, "b"), (2.toByte, 2.toShort, 2, 2L, 2.0F, 2.0, "b"), (3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "c"), (3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "c") ) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c, 'd, 'e, 'f, 'g) tEnv.registerTable("T", t) // We use sub-query + limit here to ensure retraction val sql = """ |SELECT sum(a), sum(b), sum(c), sum(d), sum(e), sum(f), sum(h) FROM ( | SELECT *, CAST(c AS DECIMAL(3, 2)) AS h FROM T LIMIT 8 |) GROUP BY g """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("9,9,9,9,9.0,9.0,9.00", "3,3,3,3,3.0,3.0,3.00", "6,6,6,6,6.0,6.0,6.00") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testAggAfterUnion(): Unit = { val data = List( (1L, 1, "Hello"), (2L, 2, "Hello"), (2L, 3, "Hello"), (3L, 4, "Hello"), (3L, 5, "Hello"), (7L, 6, "Hello"), (7L, 7, "Hello World"), (7L, 8, "Hello World"), (10L, 20, "Hello World")) val t1 = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T1", t1) val t2 = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T2", t2) val sql = """ |SELECT a, sum(b), count(distinct c) |FROM ( | SELECT * FROM T1 | UNION ALL | SELECT * FROM T2 |) GROUP BY a """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,2,1", "2,10,1", "3,18,1", "7,42,2", "10,40,1") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testVarArgsNoGroupBy(): Unit = { val data = List( (1, 1L, "5", "3"), (1, 22L, "15", "13"), (3, 33L, "25", "23")) val t = failingDataSource(data).toTable(tEnv, 'id, 's, 's1, 's2) tEnv.registerTable("MyTable", t) tEnv.registerFunction("func", new VarArgsAggFunction) val sql = "SELECT func(s, s1, s2) FROM MyTable" val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("140") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testVarArgsWithGroupBy(): Unit = { val data = List( (1, 1L, "5", "3"), (1, 22L, "15", "13"), (3, 33L, "25", "23")) val t = failingDataSource(data).toTable(tEnv, 'id, 's, 's1, 's2) tEnv.registerTable("MyTable", t) tEnv.registerFunction("func", new VarArgsAggFunction) val sink = new TestingRetractSink tEnv .sqlQuery("SELECT id, func(s, s1, s2) FROM MyTable group by id") .toRetractStream[Row] .addSink(sink) env.execute() val expected = List("1,59", "3,81") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testMinMaxWithBinaryString(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "BC")) data.+=((4, 3L, "C")) data.+=((5, 3L, "CD")) data.+=((6, 3L, "DE")) data.+=((7, 4L, "EF")) data.+=((8, 4L, "FG")) data.+=((9, 4L, "HI")) data.+=((10, 4L, "IJ")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val sql = """ |SELECT b, min(c), max(c) |FROM ( | SELECT a, b, listagg(c) as c | FROM T | GROUP BY a, b) |GROUP BY b """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,A,A", "2,B,BC", "3,C,DE", "4,EF,IJ") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testBigDataOfMinMaxWithBinaryString(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] for (i <- 0 until 100) { data.+=((i % 10, i, i.toString)) } val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val sql = """ |SELECT a, min(b), max(c), min(c) FROM T GROUP BY a """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("0,0,90,0", "1,1,91,1", "2,2,92,12", "3,3,93,13", "4,4,94,14", "5,5,95,15", "6,6,96,16", "7,7,97,17", "8,8,98,18", "9,9,99,19") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testAggWithFilterClause(): Unit = { val data = new mutable.MutableList[(Int, Long, String, Boolean)] data.+=((1, 5L, "B", true)) data.+=((1, 4L, "C", false)) data.+=((1, 2L, "A", true)) data.+=((2, 1L, "A", true)) data.+=((2, 2L, "B", false)) data.+=((1, 6L, "A", true)) data.+=((2, 2L, "B", false)) data.+=((3, 5L, "B", true)) data.+=((2, 3L, "C", true)) data.+=((2, 3L, "D", true)) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c, 'd) tEnv.registerTable("T", t) // test declarative and imperative aggregates val sql = """ |SELECT | a, | sum(b) filter (where c = 'A'), | count(distinct c) filter (where d is true), | max(b) |FROM T GROUP BY a """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,8,2,6", "2,1,3,3", "3,null,1,5") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testMinMaxWithDecimal(): Unit = { val data = new mutable.MutableList[Row] data.+=(Row.of(BigDecimal(1).bigDecimal)) data.+=(Row.of(BigDecimal(2).bigDecimal)) data.+=(Row.of(BigDecimal(2).bigDecimal)) data.+=(Row.of(BigDecimal(4).bigDecimal)) data.+=(Row.of(BigDecimal(3).bigDecimal)) // a, count(a) as cnt // 1, 1 // 2, 2 // 4, 1 // 3, 1 // // cnt, min(a), max(a) // 1, 1, 4 // 2, 2, 2 val rowType = new RowTypeInfo(BigDecimalTypeInfo.of(7, 2)) val t = failingDataSource(data)(rowType).toTable(tEnv, 'a) tEnv.registerTable("T", t) val sql = """ |select cnt, min(a), max(a) from ( | select a, count(a) as cnt from T group by a |) group by cnt """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,1.00,4.00", "2,2.00,2.00") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testCollectOnClusteredFields(): Unit = { val data = List( (1, 1, (12, "45.6")), (2, 2, (12, "45.612")), (3, 2, (13, "41.6")), (4, 3, (14, "45.2136")), (5, 3, (18, "42.6")) ) tEnv.registerTable("src", env.fromCollection(data).toTable(tEnv, 'a, 'b, 'c)) val sql = "SELECT a, b, COLLECT(c) as `set` FROM src GROUP BY a, b" val view1 = tEnv.sqlQuery(sql) tEnv.registerTable("v1", view1) val toCompositeObj = ToCompositeObj tEnv.registerFunction("toCompObj", toCompositeObj) val sql1 = s""" |SELECT | a, b, COLLECT(toCompObj(t.sid, 'a', 100, t.point)) as info |from ( | select | a, b, uuid() as u, V.sid, V.point | from | v1, unnest(v1.`set`) as V(sid, point) |) t |group by t.a, t.b, t.u """.stripMargin val sink = new TestingRetractSink tEnv.sqlQuery(sql1).toRetractStream[Row].addSink(sink) env.execute() val expected = List( "1,1,{CompositeObj(12,a,100,45.6)=1}", "2,2,{CompositeObj(12,a,100,45.612)=1}", "3,2,{CompositeObj(13,a,100,41.6)=1}", "4,3,{CompositeObj(14,a,100,45.2136)=1}", "5,3,{CompositeObj(18,a,100,42.6)=1}") assertEquals(expected.sorted, sink.getRetractResults.sorted) } /** Test LISTAGG **/ @Test def testListAgg(): Unit = { tEnv.registerFunction("listagg_retract", new ListAggWithRetractAggFunction) tEnv.registerFunction("listagg_ws_retract", new ListAggWsWithRetractAggFunction) val sqlQuery = s""" |SELECT | listagg(c), listagg(c, '-'), listagg_retract(c), listagg_ws_retract(c, '+') |FROM MyTable |GROUP BY c |""".stripMargin val data = new mutable.MutableList[(Int, Long, String)] for (i <- 0 until 10) { data.+=((i, 1L, "Hi")) } val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List("Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi-Hi-Hi-Hi-Hi-Hi-Hi-Hi-Hi-Hi," + "Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi+Hi+Hi+Hi+Hi+Hi+Hi+Hi+Hi+Hi") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testSTDDEV(): Unit = { val sqlQuery = "SELECT STDDEV_SAMP(a), STDDEV_POP(a) FROM MyTable GROUP BY c" val data = new mutable.MutableList[(Double, Long, String)] for (i <- 0 until 10) { data.+=((i, 1L, "Hi")) } val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List("3.0276503540974917,2.8722813232690143") assertEquals(expected.sorted, sink.getRetractResults.sorted) } /** test VAR_POP **/ @Test def testVAR_POP(): Unit = { val sqlQuery = "SELECT VAR_POP(a) FROM MyTable GROUP BY c" val data = new mutable.MutableList[(Int, Long, String)] data.+=((2900, 1L, "Hi")) data.+=((2500, 1L, "Hi")) data.+=((2600, 1L, "Hi")) data.+=((3100, 1L, "Hello")) data.+=((11000, 1L, "Hello")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() // TODO: define precise behavior of VAR_POP() val expected = List(15602500.toString, 28889.toString) assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testLongVarargsAgg(): Unit = { tEnv.registerFunction("var_sum", new VarSumAggFunction) val sqlQuery = s"SELECT a, " + s"var_sum(${0.until(260).map(_ => "b").mkString(",")}) from MyTable group by a" val data = Seq[(Int, Int)]((1, 1), (2,2)) val t = failingDataSource(data).toTable(tEnv, 'a, 'b) tEnv.registerTable("MyTable", t) val sink = new TestingRetractSink tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink) env.execute() val expected = List("1,260", "2,520") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testCountDistinctWithBinaryRowSource(): Unit = { // this case is failed before, because of object reuse problem val data = (0 until 100).map {i => ("1", "1", s"${i%50}", "1")}.toList // use BinaryRowData source here for StringData reuse val t = failingBinaryRowSource(data).toTable(tEnv, 'a, 'b, 'c, 'd) tEnv.registerTable("src", t) val sql = s""" |SELECT | a, | b, | COUNT(distinct c) as uv |FROM ( | SELECT | a, b, c, d | FROM | src where b <> '' | UNION ALL | SELECT | a, 'ALL' as b, c, d | FROM | src where b <> '' |) t |GROUP BY | a, b """.stripMargin val t1 = tEnv.sqlQuery(sql) val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink) env.execute("test") val expected = List("1,1,50", "1,ALL,50") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testDistinctWithMultiFilter(): Unit = { val t = failingDataSource(TestData.tupleData3).toTable(tEnv).as("a", "b", "c") tEnv.registerTable("MyTable", t) val sqlQuery = s""" |SELECT | b, | SUM(DISTINCT (a * 3)), | COUNT(DISTINCT SUBSTRING(c FROM 1 FOR 2)), | COUNT(DISTINCT c), | COUNT(DISTINCT c) filter (where MOD(a, 3) = 0), | COUNT(DISTINCT c) filter (where MOD(a, 3) = 1) |FROM MyTable |GROUP BY b """.stripMargin val result = tEnv.sqlQuery(sqlQuery).toRetractStream[Row] val sink = new TestingRetractSink result.addSink(sink) env.execute() val expected = List( "1,3,1,1,0,1", "2,15,1,2,1,0", "3,45,3,3,1,1", "4,102,1,4,1,2", "5,195,1,5,2,1", "6,333,1,6,2,2") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testPruneUselessAggCall(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data .+= ((1, 1L, "Hi")) data .+= ((2, 2L, "Hello")) data .+= ((3, 2L, "Hello world")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("T", t) val t1 = tEnv.sqlQuery( "select a from (select b, max(a) as a, count(*), max(c) as c from T group by b) T1") val sink = new TestingRetractSink t1.toRetractStream[Row].addSink(sink).setParallelism(1) env.execute() val expected = List("1", "3") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testGenericTypesWithoutStateClean(): Unit = { // because we don't provide a way to disable state cleanup. // TODO verify all tests with state cleanup closed. tEnv.getConfig.setIdleStateRetentionTime(Time.days(0), Time.days(0)) val t = failingDataSource(Seq(1, 2, 3)).toTable(tEnv, 'a) val results = t .select(new GenericAggregateFunction()('a)) .toRetractStream[Row] val sink = new TestingRetractSink results.addSink(sink).setParallelism(1) env.execute() } @Test def testConstantGroupKeyWithUpsertSink(): Unit = { val data = new mutable.MutableList[(Int, Long, String)] data.+=((1, 1L, "A")) data.+=((2, 2L, "B")) data.+=((3, 2L, "B")) data.+=((4, 3L, "C")) data.+=((5, 3L, "C")) val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c) tEnv.registerTable("MyTable", t) val tableSink = new TestingUpsertTableSink(Array(0)).configure( Array[String]("c", "bMax"), Array[TypeInformation[_]](Types.STRING, Types.LONG)) tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal("testSink", tableSink) execInsertSqlAndWaitResult( """ |insert into testSink |select c, max(b) from | (select b, c, true as f from MyTable) t |group by c, f """.stripMargin) val expected = List("A,1", "B,2", "C,3") assertEquals(expected.sorted, tableSink.getUpsertResults.sorted) } @Test def testAggregateOnChangelogSource(): Unit = { val dataId = TestValuesTableFactory.registerChangelogData(TestData.userChangelog) val ddl = s""" |CREATE TABLE user_logs ( | user_id STRING, | user_name STRING, | email STRING, | balance DECIMAL(18,2) |) WITH ( | 'connector' = 'values', | 'data-id' = '$dataId', | 'changelog-mode' = 'I,UA,UB,D' |) |""".stripMargin tEnv.executeSql(ddl) val query = s""" |SELECT count(*), sum(balance), max(email) |FROM user_logs |""".stripMargin val result = tEnv.sqlQuery(query).toRetractStream[Row] val sink = new TestingRetractSink() result.addSink(sink).setParallelism(result.parallelism) env.execute() val expected = Seq("3,29.39,tom123@gmail.com") assertEquals(expected.sorted, sink.getRetractResults.sorted) } @Test def testAggregateOnInsertDeleteChangelogSource(): Unit = { // only contains INSERT and DELETE val userChangelog = TestData.userChangelog.map { tuple => tuple.f0 match { case RowKind.INSERT | RowKind.DELETE => tuple case RowKind.UPDATE_BEFORE => JTuple2.of(RowKind.DELETE, tuple.f1) case RowKind.UPDATE_AFTER => JTuple2.of(RowKind.INSERT, tuple.f1) } } val dataId = TestValuesTableFactory.registerChangelogData(userChangelog) val ddl = s""" |CREATE TABLE user_logs ( | user_id STRING, | user_name STRING, | email STRING, | balance DECIMAL(18,2) |) WITH ( | 'connector' = 'values', | 'data-id' = '$dataId', | 'changelog-mode' = 'I,D' |) |""".stripMargin tEnv.executeSql(ddl) val query = s""" |SELECT count(*), sum(balance), max(email) |FROM user_logs |""".stripMargin val result = tEnv.sqlQuery(query).toRetractStream[Row] val sink = new TestingRetractSink() result.addSink(sink).setParallelism(result.parallelism) env.execute() val expected = Seq("3,29.39,tom123@gmail.com") assertEquals(expected.sorted, sink.getRetractResults.sorted) } }
GJL/flink
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/AggregateITCase.scala
Scala
apache-2.0
41,064
/* ,i::, :;;;;;;; ;:,,::;. 1ft1;::;1tL t1;::;1, :;::; _____ __ ___ __ fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_ CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\ 1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / / CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/ Lft1,:;: , 1tfL: ;it1i ,,,:::;;;::1tti s_mach.explain_json .t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc. Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com .L1 1tt1ttt,,Li ...1LLLL... */ package s_mach.explain_json import org.scalatest.{FlatSpec, Matchers} import s_mach.explain_json.JsonExplanationNode._ import s_mach.explain_json.impl.JsonExplanationOps._ import s_mach.string.CharGroup._ class JsonExplanationOpsTest extends FlatSpec with Matchers { import TestDI._ "explainCharGroup" should "print an i18n explanation for a character group" in { explainCharGroup(UnicodeLetter) shouldBe "unicode letters" explainCharGroup(UppercaseLetter) shouldBe "uppercase letters" explainCharGroup(LowercaseLetter) shouldBe "lowercase letters" explainCharGroup(Letter) shouldBe "letters" explainCharGroup(WordLetter) shouldBe "word letters" explainCharGroup(Digit) shouldBe "digits" explainCharGroup(Underscore) shouldBe "underscores" explainCharGroup(Hyphen) shouldBe "hyphens" explainCharGroup(Space) shouldBe "spaces" explainCharGroup(Whitespace) shouldBe "whitespace" } "explainCharGroups" should "print an i18n explanation for a seq of character groups" in { explainCharGroups(Seq(Letter,Digit,Space)) shouldBe "must contain only letters, digits or spaces" } "printJsonType" should "print an i18n string for a JsonType" in { printJsonType(JsonBoolean()) shouldBe "boolean" printJsonType(JsonString()) shouldBe "string" printJsonType(JsonObject()) shouldBe "object" printJsonType(JsonNumber()) shouldBe "number" printJsonType(JsonInteger()) shouldBe "integer" printJsonType(JsonArray()) shouldBe "array" } "printJsonTypeRemark" should "print an i18n type remark for a JsonType" in { printJsonTypeRemark(JsonBoolean()) shouldBe "must be boolean" } "printJsonRuleRemark" should "print an i18n rule remark for a JsonRule" in { printJsonRuleRemark(JsonRule.Maximum(BigDecimal("0"),true)) shouldBe "must be less than 0" printJsonRuleRemark(JsonRule.Maximum(BigDecimal("0"),false)) shouldBe "must be less than or equal to 0" printJsonRuleRemark(JsonRule.Minimum(BigDecimal("0"),true)) shouldBe "must be greater than 0" printJsonRuleRemark(JsonRule.Minimum(BigDecimal("0"),false)) shouldBe "must be greater than or equal to 0" printJsonRuleRemark(JsonRule.StringMaxLength(64)) shouldBe "must not be longer than 64 characters" printJsonRuleRemark(JsonRule.StringMinLength(64)) shouldBe "must have at least 64 characters" printJsonRuleRemark(JsonRule.StringPattern("[A-Za-z0-9_]+")) shouldBe "must match regex pattern '[A-Za-z0-9_]+'" } }
S-Mach/s_mach.explain
explain_json/src/test/scala/s_mach/explain_json/JsonExplanationOpsTest.scala
Scala
mit
3,222
package au.com.dius.pact.provider import au.com.dius.pact.model.{OptionalBody, Request} object EnterStateRequest { def apply(url: String, state: String): Request = { new Request("POST", url, null, null, OptionalBody.body("{\\"state\\": \\"" + state + "\\"}"), null) } }
Fitzoh/pact-jvm
pact-jvm-provider/src/main/scala/au/com/dius/pact/provider/EnterStateRequest.scala
Scala
apache-2.0
276
package org.jetbrains.plugins.scala package lang package parser package parsing package top import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder import org.jetbrains.plugins.scala.lang.parser.parsing.statements.PatVarDef import scala.annotation.tailrec /** * @author Alexander Podkhalyuzin * Date: 06.02.2008 */ /* * EarlyDef ::= '{' [PatVarDef {semi PatVarDef}] '}' 'with' */ object EarlyDef extends ParsingRule { override def parse(implicit builder: ScalaPsiBuilder): Boolean = { val earlyMarker = builder.mark() //Look for { builder.getTokenType match { case ScalaTokenTypes.tLBRACE => builder.advanceLexer() //Ate { builder.enableNewlines() case _ => builder error ScalaBundle.message("unreachable.error") earlyMarker.drop() return false } //this metod parse recursively PatVarDef {semi PatVarDef} @tailrec def parseSub(): Boolean = { builder.getTokenType match { case ScalaTokenTypes.tRBRACE => builder.advanceLexer() //Ate } true case _ => if (PatVarDef()) { builder.getTokenType match { case ScalaTokenTypes.tRBRACE => builder.advanceLexer() //Ate } true case ScalaTokenTypes.tSEMICOLON => builder.advanceLexer() //Ate semicolon parseSub() case _ => if (builder.newlineBeforeCurrentToken) { parseSub() } else { false } } } else { false } } } if (!parseSub()) { builder.restoreNewlinesState() builder error ScalaBundle.message("unreachable.error") earlyMarker.rollbackTo() return false } builder.restoreNewlinesState() //finally look for 'with' keyword builder.getTokenType match { case ScalaTokenTypes.kWITH => earlyMarker.done(ScalaElementType.EARLY_DEFINITIONS) builder.advanceLexer() //Ate with true case _ => builder error ScalaBundle.message("unreachable.error") earlyMarker.rollbackTo() false } } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/top/EarlyDef.scala
Scala
apache-2.0
2,316
/* * Copyright 2016 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package reactivemongo import reactivemongo.core.nodeset.Authenticate import scala.concurrent.ExecutionContext import reactivemongo.api.{MongoConnectionOptions, FailoverStrategy, DB, MongoDriver} object ReactiveMongoHelper { @deprecated(message = "use case class constructor that takes MongoConnectionOptions") def apply(dbName: String, servers: Seq[String], auth: Seq[Authenticate], nbChannelsPerNode: Option[Int], failoverStrategy: Option[FailoverStrategy]):ReactiveMongoHelper = { val mongoOpts = nbChannelsPerNode.map { n => MongoConnectionOptions().copy(nbChannelsPerNode = n) }.getOrElse(MongoConnectionOptions()) this(dbName, servers, auth, failoverStrategy, mongoOpts) } } case class ReactiveMongoHelper(dbName: String, servers: Seq[String], auth: Seq[Authenticate], failoverStrategy: Option[FailoverStrategy], connectionOptions: MongoConnectionOptions = MongoConnectionOptions()) { implicit val ec: ExecutionContext = ExecutionContext.Implicits.global lazy val driver = new MongoDriver lazy val connection = driver.connection( servers, authentications = auth, options = connectionOptions ) lazy val db = failoverStrategy match { case Some(fs : FailoverStrategy) => DB(dbName, connection, fs) case None => DB(dbName, connection) } }
hmrc/reactivemongo-json
src/main/scala/reactivemongo/ReactiveMongoHelper.scala
Scala
apache-2.0
2,066
/* * The Bluejelly project, Copyright 2012. * * This source code is distributed under the terms of * the BSD license, see the LICENSE file for details. */ import sbt._ import Keys._ import sbtassembly.Plugin._ import AssemblyKeys._ object BluejellyBuild extends Build { import Dist._ override lazy val settings = super.settings ++ Seq( name := "bluejelly", version := "1.0", scalacOptions ++= Seq("-deprecation","-unchecked","-feature") ) lazy val bluejelly = Project( id = "bluejelly", base = file(".")) aggregate(runtime,utils,asm,l4,bjc) lazy val runtime = Project( id = "bluejelly-runtime", base = file("bluejelly-runtime"), settings = Project.defaultSettings ++ assemblySettings ++ Seq( libraryDependencies ++= Seq(Deps.asmWeb, Deps.junit), fork in Test := true, resourceGenerators in Compile <+= ( resourceManaged in Compile, version) map { (dir,v) => Resources.genCfg(dir, "brt-cfg.properties", "brt.version=%s" format(v)) }, compile in Compile <<= ( streams, sourceDirectory in Compile, classDirectory in Compile, dependencyClasspath in Compile, compile in Compile) map { (s,sd,cd,cp,a) => val classPath = cd +: (cp.files filter {_.toString contains "asm"}) Prims.gen(s.log, sd, cd, classPath) a }, test in assembly := {}, assembleArtifact in packageScala := false, jarName in assembly := "%s-%s.jar" format (name.value,version.value), distTask ) ) lazy val utils = Project( id = "bluejelly-utils", base = file("bluejelly-utils") ) lazy val asm = Project( id = "bluejelly-asm", base = file("bluejelly-asm"), settings = Project.defaultSettings ++ assemblySettings ++ Seq( libraryDependencies ++= Seq(Deps.asmWeb,Deps.scalaTest), resourceGenerators in Compile <+= ( resourceManaged in Compile, version) map { (dir,v) => Resources.genCfg(dir, "bas-cfg.properties", "bas.version=%s" format(v)) }, resourceGenerators in Test <+= ( streams, resourceManaged in Test, classDirectory in (runtime,Compile), dependencyClasspath in Test) map { (s,dir,cd,cp) => val out = dir / "brt.properties" Resources.genBrtProps(out, cd, cp.files) Seq(out) }, test in assembly := {}, jarName in assembly := "%s-%s.jar" format (name.value,version.value), distTask ) ) dependsOn(utils, runtime % "test->compile") lazy val l4 = Project( id = "bluejelly-l4", base = file("bluejelly-l4"), settings = Project.defaultSettings ++ assemblySettings ++ Seq( libraryDependencies += Deps.scalaTest, resourceGenerators in Compile <+= ( resourceManaged in Compile, version) map { (dir,v) => Resources.genCfg(dir, "l4c-cfg.properties", "l4c.version=%s" format(v)) }, resourceGenerators in Test <+= ( streams, resourceManaged in Test, classDirectory in (runtime,Compile), dependencyClasspath in Test) map { (s,dir,cd,cp) => val out = dir / "brt.properties" Resources.genBrtProps(out, cd, cp.files) Seq(out) }, test in assembly := {}, jarName in assembly := "%s-%s.jar" format (name.value,version.value), distTask ) ) dependsOn(asm, utils, runtime % "test->compile") lazy val bjc = Project( id = "bluejelly-bjc", base = file("bluejelly-bjc"), settings = Project.defaultSettings ++ assemblySettings ++ Seq( initialize ~= { _ => sys.props("scalac.patmat.analysisBudget") = "off" }, libraryDependencies += Deps.scalaTest, test in assembly := {}, jarName in assembly := "%s-%s.jar" format (name.value,version.value), distTask ) ) dependsOn(l4,utils) /** * Bag of dependency specifications. */ private object Deps { val asmWeb = "asm" % "asm-all" % "3.2" val junit = "com.novocode" % "junit-interface" % "0.10-M1" % "test" val scalaTest = "org.scalatest" %% "scalatest" % "1.9.1" % "test" } /** * Auxiliary code generating resources. */ private object Resources { // Generate properties file for bluejelly-runtinme classpath, // user for testing compiled code def genBrtProps(out:File, brtBin:File, brtCp:Seq[File]) { val sep = System getProperty "path.separator" val cp = brtBin +: (brtCp filter {_.toString contains "asm-all"}) IO.write(out, "cp=%s" format (cp mkString sep)) } // Generate a configuration properties file with the given contents def genCfg(dir:File, name:String, contents:String):Seq[File] = { val f = dir / name IO.write(f, contents) Seq(f) } } /** * Transform Runtime primitive classes. */ private object Prims { private val sep = System getProperty "path.separator" // Expand this to include further primitive modules private val prims = Seq("Int","Double","BigInt") def gen(log:Logger, src:File, bin:File, cp:Seq[File]) { cleanup(log, bin) compile(log, src, bin, cp) transform(log, cp) } private def cleanup(log:Logger, bin:File) { val primFiles = prims map { n => bin / "bluejelly" / (n + ".class") } IO.delete(primFiles) //log.info("Removed prims") } private def compile(log:Logger, src:File, bin:File, cp:Seq[File]) { val cps = cp mkString sep val sources = prims map { n => src / "java" / "bluejelly" / (n + ".java")} val cmd = Seq("javac","-cp",cps,"-d",bin.toString) ++ sources map {_.toString} //log.info("Executing: " + (cmd mkString " ")) val status = cmd.! log.info("Prims.compile returned with status: " + status) } private def transform(log:Logger, cp:Seq[File]) { import scala.sys.process._ val cps = cp mkString sep val gen = "bluejelly.PrimTransformer" val cmd = Seq("java","-cp",cps,gen) ++ prims //log.info("Executing: " + (cmd mkString " ")) val status = cmd.! log.info("Prims.gen returned with status: " + status) } } /* * Custom task designed to create a distribution zip file for some app, * consisting of the app's jar and a launcher script assumed to live * under source-directory/scripts. * * Caveat: IO.zip loses the executable flag of the launcher script :( */ private object Dist { val dist = TaskKey[Unit]("dist","generate distribution zip for the current project") val distTask = dist <<= ( assembly,sourceDirectory,name,version,target) map { (f,d,n,v,t) => val files:Seq[File] = IO.listFiles(d / "scripts") :+ f val contents = files pair Path.flat val out = t / ("%s-%s.zip" format (n,v)) IO.zip(contents map { case (f,p) => (f, "%s-%s/%s" format (n,v,p)) }, out) } } }
ppedemon/Bluejelly
project/Build.scala
Scala
bsd-3-clause
6,934
package learningconcurrency.ch2 import scala.collection._ import parallelprogramming._ object SynchronizedBadPool extends App { private val tasks = mutable.Queue[() => Unit]() val worker = new Thread { def poll() = tasks.synchronized { if(tasks.nonEmpty) Some(tasks.dequeue()) else None } override def run(): Unit = while (true) poll() match { case Some(task) => task() case None => } } worker.setName("worker") worker.setDaemon(true) worker.start() def asynchronous(body: =>Unit) = tasks.synchronized { tasks.enqueue(() => body) } asynchronous { log("Hello ") } asynchronous { log("world!") } Thread.sleep(5000) }
tomduhourq/learning-concurrent-programming
src/main/scala/learningconcurrency/ch2/SynchronizedBadPool.scala
Scala
apache-2.0
681
package im.mange.jetpac trait Fillable extends Identifiable { def fill(content: Renderable) = element.fill(content) def empty = element.empty }
alltonp/jetboot
src/main/scala/im/mange/jetpac/Fillable.scala
Scala
apache-2.0
149
package unfiltered.filter import org.specs2.mutable._ object AsyncSpec extends Specification with unfiltered.specs2.jetty.Served { import unfiltered.response._ import unfiltered.request._ import unfiltered.request.{Path => UFPath} object APlan extends async.Plan { def intent = { case GET(UFPath("/pass")) => Pass case req@GET(UFPath("/")) => req.respond(ResponseString("test") ~> Ok) } } def setup = _.plan(APlan).plan(Planify { case GET(UFPath("/pass")) => ResponseString("pass") ~> Ok }) "An Async Filter Server" should { "respond to requests" in { http(host).as_string must_== "test" } "pass upstream on Pass, respond in last handler" in { http(req(host / "pass")).as_string must_== "pass" } } }
omarkilani/unfiltered
filter-async/src/test/scala/AsyncSpec.scala
Scala
mit
783
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.regression import scala.util.Random import org.apache.spark.SparkConf import org.apache.spark.ml.classification.LogisticRegressionSuite._ import org.apache.spark.ml.feature.{Instance, OffsetInstance} import org.apache.spark.ml.feature.{LabeledPoint, RFormula} import org.apache.spark.ml.linalg.{BLAS, DenseVector, Vector, Vectors} import org.apache.spark.ml.param.{ParamMap, ParamsSuite} import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils} import org.apache.spark.ml.util.TestingUtils._ import org.apache.spark.mllib.random._ import org.apache.spark.serializer.KryoSerializer import org.apache.spark.sql.{DataFrame, Row} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.FloatType class GeneralizedLinearRegressionSuite extends MLTest with DefaultReadWriteTest { import testImplicits._ private val seed: Int = 42 @transient var datasetGaussianIdentity: DataFrame = _ @transient var datasetGaussianLog: DataFrame = _ @transient var datasetGaussianInverse: DataFrame = _ @transient var datasetBinomial: DataFrame = _ @transient var datasetPoissonLog: DataFrame = _ @transient var datasetPoissonLogWithZero: DataFrame = _ @transient var datasetPoissonIdentity: DataFrame = _ @transient var datasetPoissonSqrt: DataFrame = _ @transient var datasetGammaInverse: DataFrame = _ @transient var datasetGammaIdentity: DataFrame = _ @transient var datasetGammaLog: DataFrame = _ override def beforeAll(): Unit = { super.beforeAll() import GeneralizedLinearRegressionSuite._ datasetGaussianIdentity = generateGeneralizedLinearRegressionInput( intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "gaussian", link = "identity").toDF() datasetGaussianLog = generateGeneralizedLinearRegressionInput( intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "gaussian", link = "log").toDF() datasetGaussianInverse = generateGeneralizedLinearRegressionInput( intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "gaussian", link = "inverse").toDF() datasetBinomial = { val nPoints = 10000 val coefficients = Array(-0.57997, 0.912083, -0.371077, -0.819866, 2.688191) val xMean = Array(5.843, 3.057, 3.758, 1.199) val xVariance = Array(0.6856, 0.1899, 3.116, 0.581) val testData = generateMultinomialLogisticInput(coefficients, xMean, xVariance, addIntercept = true, nPoints, seed) testData.toDF() } datasetPoissonLog = generateGeneralizedLinearRegressionInput( intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "poisson", link = "log").toDF() datasetPoissonLogWithZero = Seq( LabeledPoint(0.0, Vectors.dense(18, 1.0)), LabeledPoint(1.0, Vectors.dense(12, 0.0)), LabeledPoint(0.0, Vectors.dense(15, 0.0)), LabeledPoint(0.0, Vectors.dense(13, 2.0)), LabeledPoint(0.0, Vectors.dense(15, 1.0)), LabeledPoint(1.0, Vectors.dense(16, 1.0)) ).toDF() datasetPoissonIdentity = generateGeneralizedLinearRegressionInput( intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "poisson", link = "identity").toDF() datasetPoissonSqrt = generateGeneralizedLinearRegressionInput( intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "poisson", link = "sqrt").toDF() datasetGammaInverse = generateGeneralizedLinearRegressionInput( intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "gamma", link = "inverse").toDF() datasetGammaIdentity = generateGeneralizedLinearRegressionInput( intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "gamma", link = "identity").toDF() datasetGammaLog = generateGeneralizedLinearRegressionInput( intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5), xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01, family = "gamma", link = "log").toDF() } /** * Enable the ignored test to export the dataset into CSV format, * so we can validate the training accuracy compared with R's glm and glmnet package. */ ignore("export test data into CSV format") { datasetGaussianIdentity.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianIdentity") datasetGaussianLog.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianLog") datasetGaussianInverse.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianInverse") datasetBinomial.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetBinomial") datasetPoissonLog.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLog") datasetPoissonLogWithZero.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLogWithZero") datasetPoissonIdentity.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonIdentity") datasetPoissonSqrt.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonSqrt") datasetGammaInverse.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetGammaInverse") datasetGammaIdentity.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetGammaIdentity") datasetGammaLog.rdd.map { case Row(label: Double, features: Vector) => label + "," + features.toArray.mkString(",") }.repartition(1).saveAsTextFile( "target/tmp/GeneralizedLinearRegressionSuite/datasetGammaLog") } test("params") { ParamsSuite.checkParams(new GeneralizedLinearRegression) val model = new GeneralizedLinearRegressionModel("genLinReg", Vectors.dense(0.0), 0.0) ParamsSuite.checkParams(model) } test("generalized linear regression: default params") { val glr = new GeneralizedLinearRegression assert(glr.getLabelCol === "label") assert(glr.getFeaturesCol === "features") assert(glr.getPredictionCol === "prediction") assert(glr.getFitIntercept) assert(glr.getTol === 1E-6) assert(!glr.isDefined(glr.weightCol)) assert(glr.getRegParam === 0.0) assert(glr.getSolver == "irls") assert(glr.getVariancePower === 0.0) // TODO: Construct model directly instead of via fitting. val model = glr.setFamily("gaussian").setLink("identity") .fit(datasetGaussianIdentity) MLTestingUtils.checkCopyAndUids(glr, model) assert(model.hasSummary) val copiedModel = model.copy(ParamMap.empty) assert(copiedModel.hasSummary) model.setSummary(None) assert(!model.hasSummary) assert(model.getFeaturesCol === "features") assert(model.getPredictionCol === "prediction") assert(model.intercept !== 0.0) assert(model.hasParent) assert(model.getFamily === "gaussian") assert(model.getLink === "identity") } test("prediction on single instance") { val glr = new GeneralizedLinearRegression val model = glr.setFamily("gaussian").setLink("identity") .fit(datasetGaussianIdentity) testPredictionModelSinglePrediction(model, datasetGaussianIdentity) } test("generalized linear regression: gaussian family against glm") { /* R code: f1 <- data$V1 ~ data$V2 + data$V3 - 1 f2 <- data$V1 ~ data$V2 + data$V3 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family="gaussian", data=data) print(as.vector(coef(model))) } [1] 2.2958751 0.8088523 [1] 2.5009266 2.1997901 0.5999522 data <- read.csv("path", header=FALSE) model1 <- glm(f1, family=gaussian(link=log), data=data, start=c(0,0)) model2 <- glm(f2, family=gaussian(link=log), data=data, start=c(0,0,0)) print(as.vector(coef(model1))) print(as.vector(coef(model2))) [1] 0.23063118 0.07995495 [1] 0.25016124 0.21995737 0.05999335 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family=gaussian(link=inverse), data=data) print(as.vector(coef(model))) } [1] 2.3320341 0.8121904 [1] 2.2837064 2.2487147 0.6120262 */ val expected = Seq( Vectors.dense(0.0, 2.2958751, 0.8088523), Vectors.dense(2.5009266, 2.1997901, 0.5999522), Vectors.dense(0.0, 0.23063118, 0.07995495), Vectors.dense(0.25016124, 0.21995737, 0.05999335), Vectors.dense(0.0, 2.3320341, 0.8121904), Vectors.dense(2.2837064, 2.2487147, 0.6120262)) import GeneralizedLinearRegression._ var idx = 0 for ((link, dataset) <- Seq(("identity", datasetGaussianIdentity), ("log", datasetGaussianLog), ("inverse", datasetGaussianInverse))) { for (fitIntercept <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression().setFamily("gaussian").setLink(link) .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction").setTol(1e-3) val model = trainer.fit(dataset) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gaussian family, " + s"$link link and fitIntercept = $fitIntercept.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Vector)](dataset, model, "features", "prediction", "linkPrediction") { case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"gaussian family, $link link and fitIntercept = $fitIntercept.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with gaussian family, $link link and fitIntercept = $fitIntercept.") } idx += 1 } } } test("generalized linear regression: gaussian family against glmnet") { /* R code: library(glmnet) data <- read.csv("path", header=FALSE) label = data$V1 features = as.matrix(data.frame(data$V2, data$V3)) for (intercept in c(FALSE, TRUE)) { for (lambda in c(0.0, 0.1, 1.0)) { model <- glmnet(features, label, family="gaussian", intercept=intercept, lambda=lambda, alpha=0, thresh=1E-14) print(as.vector(coef(model))) } } [1] 0.0000000 2.2958757 0.8088521 [1] 0.0000000 2.2128149 0.8310136 [1] 0.0000000 1.7174260 0.9611137 [1] 2.5009266 2.1997901 0.5999522 [1] 3.1113269 2.0932659 0.5712717 [1] 6.7604302 1.4578902 0.3994153 */ val expected = Seq( Vectors.dense(0.0, 2.2958757, 0.8088521), Vectors.dense(0.0, 2.2128149, 0.8310136), Vectors.dense(0.0, 1.7174260, 0.9611137), Vectors.dense(2.5009266, 2.1997901, 0.5999522), Vectors.dense(3.1113269, 2.0932659, 0.5712717), Vectors.dense(6.7604302, 1.4578902, 0.3994153)) var idx = 0 for (fitIntercept <- Seq(false, true); regParam <- Seq(0.0, 0.1, 1.0)) { val trainer = new GeneralizedLinearRegression().setFamily("gaussian") .setFitIntercept(fitIntercept).setRegParam(regParam).setTol(1e-3) val model = trainer.fit(datasetGaussianIdentity) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gaussian family, " + s"fitIntercept = $fitIntercept and regParam = $regParam.") idx += 1 } } test("generalized linear regression: binomial family against glm") { /* R code: f1 <- data$V1 ~ data$V2 + data$V3 + data$V4 + data$V5 - 1 f2 <- data$V1 ~ data$V2 + data$V3 + data$V4 + data$V5 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family="binomial", data=data) print(as.vector(coef(model))) } [1] -0.3560284 1.3010002 -0.3570805 -0.7406762 [1] 2.8367406 -0.5896187 0.8931655 -0.3925169 -0.7996989 for (formula in c(f1, f2)) { model <- glm(formula, family=binomial(link=probit), data=data) print(as.vector(coef(model))) } [1] -0.2134390 0.7800646 -0.2144267 -0.4438358 [1] 1.6995366 -0.3524694 0.5332651 -0.2352985 -0.4780850 for (formula in c(f1, f2)) { model <- glm(formula, family=binomial(link=cloglog), data=data) print(as.vector(coef(model))) } [1] -0.2832198 0.8434144 -0.2524727 -0.5293452 [1] 1.5063590 -0.4038015 0.6133664 -0.2687882 -0.5541758 */ val expected = Seq( Vectors.dense(0.0, -0.3560284, 1.3010002, -0.3570805, -0.7406762), Vectors.dense(2.8367406, -0.5896187, 0.8931655, -0.3925169, -0.7996989), Vectors.dense(0.0, -0.2134390, 0.7800646, -0.2144267, -0.4438358), Vectors.dense(1.6995366, -0.3524694, 0.5332651, -0.2352985, -0.4780850), Vectors.dense(0.0, -0.2832198, 0.8434144, -0.2524727, -0.5293452), Vectors.dense(1.5063590, -0.4038015, 0.6133664, -0.2687882, -0.5541758)) import GeneralizedLinearRegression._ var idx = 0 for ((link, dataset) <- Seq(("logit", datasetBinomial), ("probit", datasetBinomial), ("cloglog", datasetBinomial))) { for (fitIntercept <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression().setFamily("binomial").setLink(link) .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction").setTol(1e-3) val model = trainer.fit(dataset) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1), model.coefficients(2), model.coefficients(3)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with binomial family, " + s"$link link and fitIntercept = $fitIntercept.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Vector)](dataset, model, "features", "prediction", "linkPrediction") { case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"binomial family, $link link and fitIntercept = $fitIntercept.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with binomial family, $link link and fitIntercept = $fitIntercept.") } idx += 1 } } } test("generalized linear regression: poisson family against glm") { /* R code: f1 <- data$V1 ~ data$V2 + data$V3 - 1 f2 <- data$V1 ~ data$V2 + data$V3 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family="poisson", data=data) print(as.vector(coef(model))) } [1] 0.22999393 0.08047088 [1] 0.25022353 0.21998599 0.05998621 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family=poisson(link=identity), data=data) print(as.vector(coef(model))) } [1] 2.2929501 0.8119415 [1] 2.5012730 2.1999407 0.5999107 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family=poisson(link=sqrt), data=data) print(as.vector(coef(model))) } [1] 2.2958947 0.8090515 [1] 2.5000480 2.1999972 0.5999968 */ val expected = Seq( Vectors.dense(0.0, 0.22999393, 0.08047088), Vectors.dense(0.25022353, 0.21998599, 0.05998621), Vectors.dense(0.0, 2.2929501, 0.8119415), Vectors.dense(2.5012730, 2.1999407, 0.5999107), Vectors.dense(0.0, 2.2958947, 0.8090515), Vectors.dense(2.5000480, 2.1999972, 0.5999968)) import GeneralizedLinearRegression._ var idx = 0 for ((link, dataset) <- Seq(("log", datasetPoissonLog), ("identity", datasetPoissonIdentity), ("sqrt", datasetPoissonSqrt))) { for (fitIntercept <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression().setFamily("poisson").setLink(link) .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction").setTol(1e-3) val model = trainer.fit(dataset) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with poisson family, " + s"$link link and fitIntercept = $fitIntercept.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Vector)](dataset, model, "features", "prediction", "linkPrediction") { case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"poisson family, $link link and fitIntercept = $fitIntercept.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with poisson family, $link link and fitIntercept = $fitIntercept.") } idx += 1 } } } test("generalized linear regression: poisson family against glm (with zero values)") { /* R code: f1 <- data$V1 ~ data$V2 + data$V3 - 1 f2 <- data$V1 ~ data$V2 + data$V3 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family="poisson", data=data) print(as.vector(coef(model))) } [1] -0.0457441 -0.6833928 [1] 1.8121235 -0.1747493 -0.5815417 R code for deivance calculation: data = cbind(y=c(0,1,0,0,0,1), x1=c(18, 12, 15, 13, 15, 16), x2=c(1,0,0,2,1,1)) summary(glm(y~x1+x2, family=poisson, data=data.frame(data)))$deviance [1] 3.70055 summary(glm(y~x1+x2-1, family=poisson, data=data.frame(data)))$deviance [1] 3.809296 */ val expected = Seq( Vectors.dense(0.0, -0.0457441, -0.6833928), Vectors.dense(1.8121235, -0.1747493, -0.5815417)) val residualDeviancesR = Array(3.809296, 3.70055) var idx = 0 val link = "log" val dataset = datasetPoissonLogWithZero for (fitIntercept <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression().setFamily("poisson").setLink(link) .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction").setTol(1e-3) val model = trainer.fit(dataset) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with poisson family, " + s"$link link and fitIntercept = $fitIntercept (with zero values).") assert(model.summary.deviance ~== residualDeviancesR(idx) absTol 1E-3) idx += 1 } } test("generalized linear regression: gamma family against glm") { /* R code: f1 <- data$V1 ~ data$V2 + data$V3 - 1 f2 <- data$V1 ~ data$V2 + data$V3 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family="Gamma", data=data) print(as.vector(coef(model))) } [1] 2.3392419 0.8058058 [1] 2.3507700 2.2533574 0.6042991 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family=Gamma(link=identity), data=data) print(as.vector(coef(model))) } [1] 2.2908883 0.8147796 [1] 2.5002406 2.1998346 0.6000059 data <- read.csv("path", header=FALSE) for (formula in c(f1, f2)) { model <- glm(formula, family=Gamma(link=log), data=data) print(as.vector(coef(model))) } [1] 0.22958970 0.08091066 [1] 0.25003210 0.21996957 0.06000215 */ val expected = Seq( Vectors.dense(0.0, 2.3392419, 0.8058058), Vectors.dense(2.3507700, 2.2533574, 0.6042991), Vectors.dense(0.0, 2.2908883, 0.8147796), Vectors.dense(2.5002406, 2.1998346, 0.6000059), Vectors.dense(0.0, 0.22958970, 0.08091066), Vectors.dense(0.25003210, 0.21996957, 0.06000215)) import GeneralizedLinearRegression._ var idx = 0 for ((link, dataset) <- Seq(("inverse", datasetGammaInverse), ("identity", datasetGammaIdentity), ("log", datasetGammaLog))) { for (fitIntercept <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression().setFamily("Gamma").setLink(link) .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction").setTol(1e-3) val model = trainer.fit(dataset) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gamma family, " + s"$link link and fitIntercept = $fitIntercept.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Vector)](dataset, model, "features", "prediction", "linkPrediction") { case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"gamma family, $link link and fitIntercept = $fitIntercept.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with gamma family, $link link and fitIntercept = $fitIntercept.") } idx += 1 } } } test("generalized linear regression: tweedie family against glm") { /* R code: library(statmod) df <- as.data.frame(matrix(c( 1.0, 1.0, 0.0, 5.0, 0.5, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 2.0, 1.0, 3.0, 3.0), 4, 4, byrow = TRUE)) f1 <- V1 ~ -1 + V3 + V4 f2 <- V1 ~ V3 + V4 for (f in c(f1, f2)) { for (lp in c(0, 1, -1)) for (vp in c(1.6, 2.5)) { model <- glm(f, df, family = tweedie(var.power = vp, link.power = lp)) print(as.vector(coef(model))) } } [1] 0.1496480 -0.0122283 [1] 0.1373567 -0.0120673 [1] 0.3919109 0.1846094 [1] 0.3684426 0.1810662 [1] 0.1759887 0.2195818 [1] 0.1108561 0.2059430 [1] -1.3163732 0.4378139 0.2464114 [1] -1.4396020 0.4817364 0.2680088 [1] -0.7090230 0.6256309 0.3294324 [1] -0.9524928 0.7304267 0.3792687 [1] 2.1188978 -0.3360519 -0.2067023 [1] 2.1659028 -0.3499170 -0.2128286 */ val datasetTweedie = Seq( Instance(1.0, 1.0, Vectors.dense(0.0, 5.0)), Instance(0.5, 1.0, Vectors.dense(1.0, 2.0)), Instance(1.0, 1.0, Vectors.dense(2.0, 1.0)), Instance(2.0, 1.0, Vectors.dense(3.0, 3.0)) ).toDF() val expected = Seq( Vectors.dense(0, 0.149648, -0.0122283), Vectors.dense(0, 0.1373567, -0.0120673), Vectors.dense(0, 0.3919109, 0.1846094), Vectors.dense(0, 0.3684426, 0.1810662), Vectors.dense(0, 0.1759887, 0.2195818), Vectors.dense(0, 0.1108561, 0.205943), Vectors.dense(-1.3163732, 0.4378139, 0.2464114), Vectors.dense(-1.439602, 0.4817364, 0.2680088), Vectors.dense(-0.709023, 0.6256309, 0.3294324), Vectors.dense(-0.9524928, 0.7304267, 0.3792687), Vectors.dense(2.1188978, -0.3360519, -0.2067023), Vectors.dense(2.1659028, -0.349917, -0.2128286)) import GeneralizedLinearRegression._ var idx = 0 for (fitIntercept <- Seq(false, true); linkPower <- Seq(0.0, 1.0, -1.0); variancePower <- Seq(1.6, 2.5)) { val trainer = new GeneralizedLinearRegression().setFamily("tweedie") .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction") .setVariancePower(variancePower).setLinkPower(linkPower).setTol(1e-4) val model = trainer.fit(datasetTweedie) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with tweedie family, " + s"linkPower = $linkPower, fitIntercept = $fitIntercept " + s"and variancePower = $variancePower.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Double, Vector)](datasetTweedie, model, "features", "prediction", "linkPrediction") { case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"tweedie family, linkPower = $linkPower, fitIntercept = $fitIntercept " + s"and variancePower = $variancePower.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with tweedie family, linkPower = $linkPower, fitIntercept = $fitIntercept " + s"and variancePower = $variancePower.") } idx += 1 } } test("generalized linear regression: tweedie family against glm (default power link)") { /* R code: library(statmod) df <- as.data.frame(matrix(c( 1.0, 1.0, 0.0, 5.0, 0.5, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 2.0, 1.0, 3.0, 3.0), 4, 4, byrow = TRUE)) var.power <- c(0, 1, 2, 1.5) f1 <- V1 ~ -1 + V3 + V4 f2 <- V1 ~ V3 + V4 for (f in c(f1, f2)) { for (vp in var.power) { model <- glm(f, df, family = tweedie(var.power = vp)) print(as.vector(coef(model))) } } [1] 0.4310345 0.1896552 [1] 0.15776482 -0.01189032 [1] 0.1468853 0.2116519 [1] 0.2282601 0.2132775 [1] -0.5158730 0.5555556 0.2936508 [1] -1.2689559 0.4230934 0.2388465 [1] 2.137852 -0.341431 -0.209090 [1] 1.5953393 -0.1884985 -0.1106335 */ val datasetTweedie = Seq( Instance(1.0, 1.0, Vectors.dense(0.0, 5.0)), Instance(0.5, 1.0, Vectors.dense(1.0, 2.0)), Instance(1.0, 1.0, Vectors.dense(2.0, 1.0)), Instance(2.0, 1.0, Vectors.dense(3.0, 3.0)) ).toDF() val expected = Seq( Vectors.dense(0, 0.4310345, 0.1896552), Vectors.dense(0, 0.15776482, -0.01189032), Vectors.dense(0, 0.1468853, 0.2116519), Vectors.dense(0, 0.2282601, 0.2132775), Vectors.dense(-0.515873, 0.5555556, 0.2936508), Vectors.dense(-1.2689559, 0.4230934, 0.2388465), Vectors.dense(2.137852, -0.341431, -0.20909), Vectors.dense(1.5953393, -0.1884985, -0.1106335)) import GeneralizedLinearRegression._ var idx = 0 for (fitIntercept <- Seq(false, true)) { for (variancePower <- Seq(0.0, 1.0, 2.0, 1.5)) { val trainer = new GeneralizedLinearRegression().setFamily("tweedie") .setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction") .setVariancePower(variancePower).setTol(1e-3) val model = trainer.fit(datasetTweedie) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with tweedie family, " + s"fitIntercept = $fitIntercept and variancePower = $variancePower.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Double, Vector)](datasetTweedie, model, "features", "prediction", "linkPrediction") { case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"tweedie family, fitIntercept = $fitIntercept " + s"and variancePower = $variancePower.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with tweedie family, fitIntercept = $fitIntercept " + s"and variancePower = $variancePower.") } idx += 1 } } } test("generalized linear regression: intercept only") { /* R code: library(statmod) y <- c(1.0, 0.5, 0.7, 0.3) w <- c(1, 2, 3, 4) for (fam in list(binomial(), Gamma(), gaussian(), poisson(), tweedie(1.6))) { model1 <- glm(y ~ 1, family = fam) model2 <- glm(y ~ 1, family = fam, weights = w) print(as.vector(c(coef(model1), coef(model2)))) } [1] 0.5108256 0.1201443 [1] 1.600000 1.886792 [1] 0.625 0.530 [1] -0.4700036 -0.6348783 [1] 1.325782 1.463641 */ val dataset = Seq( Instance(1.0, 1.0, Vectors.zeros(0)), Instance(0.5, 2.0, Vectors.zeros(0)), Instance(0.7, 3.0, Vectors.zeros(0)), Instance(0.3, 4.0, Vectors.zeros(0)) ).toDF() val expected = Seq(0.5108256, 0.1201443, 1.600000, 1.886792, 0.625, 0.530, -0.4700036, -0.6348783, 1.325782, 1.463641) var idx = 0 for (family <- GeneralizedLinearRegression.supportedFamilyNames.sortWith(_ < _)) { for (useWeight <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression().setFamily(family) if (useWeight) trainer.setWeightCol("weight") if (family == "tweedie") trainer.setVariancePower(1.6) val model = trainer.fit(dataset) val actual = model.intercept assert(actual ~== expected(idx) absTol 1E-3, "Model mismatch: intercept only GLM with " + s"useWeight = $useWeight and family = $family.") assert(model.coefficients === new DenseVector(Array.empty[Double])) idx += 1 } } // throw exception for empty model val trainer = new GeneralizedLinearRegression().setFitIntercept(false) withClue("Specified model is empty with neither intercept nor feature") { intercept[IllegalArgumentException] { trainer.fit(dataset) } } } test("generalized linear regression with weight and offset") { /* R code: library(statmod) df <- as.data.frame(matrix(c( 0.2, 1.0, 2.0, 0.0, 5.0, 0.5, 2.1, 0.5, 1.0, 2.0, 0.9, 0.4, 1.0, 2.0, 1.0, 0.7, 0.7, 0.0, 3.0, 3.0), 4, 5, byrow = TRUE)) families <- list(binomial, Gamma, gaussian, poisson, tweedie(1.5)) f1 <- V1 ~ -1 + V4 + V5 f2 <- V1 ~ V4 + V5 for (f in c(f1, f2)) { for (fam in families) { model <- glm(f, df, family = fam, weights = V2, offset = V3) print(as.vector(coef(model))) } } [1] 0.9419107 -0.6864404 [1] -0.2869094 0.7857710 [1] 0.5169222 -0.3344444 [1] 0.1812436 -0.6568422 [1] 0.1055254 0.2979113 [1] -0.2147117 0.9911750 -0.6356096 [1] 0.3390397 -0.3406099 0.6870259 [1] -0.05990345 0.53188982 -0.32118415 [1] -1.5616130 0.6646470 -0.3192581 [1] 0.3665034 0.1039416 0.1484616 */ val dataset = Seq( OffsetInstance(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)), OffsetInstance(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)), OffsetInstance(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)), OffsetInstance(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0)) ).toDF() val expected = Seq( Vectors.dense(0, 0.9419107, -0.6864404), Vectors.dense(0, -0.2869094, 0.785771), Vectors.dense(0, 0.5169222, -0.3344444), Vectors.dense(0, 0.1812436, -0.6568422), Vectors.dense(0, 0.1055254, 0.2979113), Vectors.dense(-0.2147117, 0.991175, -0.6356096), Vectors.dense(0.3390397, -0.3406099, 0.6870259), Vectors.dense(-0.05990345, 0.53188982, -0.32118415), Vectors.dense(-1.561613, 0.664647, -0.3192581), Vectors.dense(0.3665034, 0.1039416, 0.1484616)) import GeneralizedLinearRegression._ var idx = 0 for (fitIntercept <- Seq(false, true)) { for (family <- GeneralizedLinearRegression.supportedFamilyNames.sortWith(_ < _)) { val trainer = new GeneralizedLinearRegression().setFamily(family) .setFitIntercept(fitIntercept).setOffsetCol("offset") .setWeightCol("weight").setLinkPredictionCol("linkPrediction") if (family == "tweedie") trainer.setVariancePower(1.5) val model = trainer.fit(dataset) val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1)) assert(actual ~= expected(idx) absTol 1e-4, s"Model mismatch: GLM with family = $family," + s" and fitIntercept = $fitIntercept.") val familyLink = FamilyAndLink(trainer) testTransformer[(Double, Double, Double, Vector)](dataset, model, "features", "offset", "prediction", "linkPrediction") { case Row(features: DenseVector, offset: Double, prediction1: Double, linkPrediction1: Double) => val eta = BLAS.dot(features, model.coefficients) + model.intercept + offset val prediction2 = familyLink.fitted(eta) val linkPrediction2 = eta assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " + s"family = $family, and fitIntercept = $fitIntercept.") assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " + s"GLM with family = $family, and fitIntercept = $fitIntercept.") } idx += 1 } } } test("glm summary: gaussian family with weight and offset") { /* R code: A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2) b <- c(17, 19, 23, 29) w <- c(1, 2, 3, 4) off <- c(2, 3, 1, 4) df <- as.data.frame(cbind(A, b)) */ val dataset = Seq( OffsetInstance(17.0, 1.0, 2.0, Vectors.dense(0.0, 5.0).toSparse), OffsetInstance(19.0, 2.0, 3.0, Vectors.dense(1.0, 7.0)), OffsetInstance(23.0, 3.0, 1.0, Vectors.dense(2.0, 11.0)), OffsetInstance(29.0, 4.0, 4.0, Vectors.dense(3.0, 13.0)) ).toDF() /* R code: model <- glm(formula = "b ~ .", family = "gaussian", data = df, weights = w, offset = off) summary(model) Deviance Residuals: 1 2 3 4 0.9600 -0.6788 -0.5543 0.4800 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 5.5400 4.8040 1.153 0.455 V1 -0.9600 2.7782 -0.346 0.788 V2 1.7000 0.9798 1.735 0.333 (Dispersion parameter for gaussian family taken to be 1.92) Null deviance: 152.10 on 3 degrees of freedom Residual deviance: 1.92 on 1 degrees of freedom AIC: 13.238 Number of Fisher Scoring iterations: 2 residuals(model, type = "pearson") 1 2 3 4 0.9600000 -0.6788225 -0.5542563 0.4800000 residuals(model, type = "working") 1 2 3 4 0.96 -0.48 -0.32 0.24 residuals(model, type = "response") 1 2 3 4 0.96 -0.48 -0.32 0.24 */ val trainer = new GeneralizedLinearRegression() .setWeightCol("weight").setOffsetCol("offset") val model = trainer.fit(dataset) val coefficientsR = Vectors.dense(Array(-0.96, 1.7)) val interceptR = 5.54 val devianceResidualsR = Array(0.96, -0.67882, -0.55426, 0.48) val pearsonResidualsR = Array(0.96, -0.67882, -0.55426, 0.48) val workingResidualsR = Array(0.96, -0.48, -0.32, 0.24) val responseResidualsR = Array(0.96, -0.48, -0.32, 0.24) val seCoefR = Array(2.7782, 0.9798, 4.804) val tValsR = Array(-0.34555, 1.73506, 1.15321) val pValsR = Array(0.78819, 0.33286, 0.45478) val dispersionR = 1.92 val nullDevianceR = 152.1 val residualDevianceR = 1.92 val residualDegreeOfFreedomNullR = 3 val residualDegreeOfFreedomR = 1 val aicR = 13.23758 assert(model.hasSummary) val summary = model.summary assert(summary.isInstanceOf[GeneralizedLinearRegressionTrainingSummary]) val devianceResiduals = summary.residuals() .select(col("devianceResiduals")) .collect() .map(_.getDouble(0)) val pearsonResiduals = summary.residuals("pearson") .select(col("pearsonResiduals")) .collect() .map(_.getDouble(0)) val workingResiduals = summary.residuals("working") .select(col("workingResiduals")) .collect() .map(_.getDouble(0)) val responseResiduals = summary.residuals("response") .select(col("responseResiduals")) .collect() .map(_.getDouble(0)) assert(model.coefficients ~== coefficientsR absTol 1E-3) assert(model.intercept ~== interceptR absTol 1E-3) devianceResiduals.zip(devianceResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } pearsonResiduals.zip(pearsonResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } workingResiduals.zip(workingResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } responseResiduals.zip(responseResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } summary.coefficientStandardErrors.zip(seCoefR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } assert(summary.dispersion ~== dispersionR absTol 1E-3) assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3) assert(summary.deviance ~== residualDevianceR absTol 1E-3) assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR) assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR) assert(summary.aic ~== aicR absTol 1E-3) assert(summary.solver === "irls") val summary2: GeneralizedLinearRegressionSummary = model.evaluate(dataset) assert(summary.predictions.columns.toSet === summary2.predictions.columns.toSet) assert(summary.predictionCol === summary2.predictionCol) assert(summary.rank === summary2.rank) assert(summary.degreesOfFreedom === summary2.degreesOfFreedom) assert(summary.residualDegreeOfFreedom === summary2.residualDegreeOfFreedom) assert(summary.residualDegreeOfFreedomNull === summary2.residualDegreeOfFreedomNull) assert(summary.nullDeviance === summary2.nullDeviance) assert(summary.deviance === summary2.deviance) assert(summary.dispersion === summary2.dispersion) assert(summary.aic === summary2.aic) } test("glm summary: binomial family with weight and offset") { /* R code: df <- as.data.frame(matrix(c( 0.2, 1.0, 2.0, 0.0, 5.0, 0.5, 2.1, 0.5, 1.0, 2.0, 0.9, 0.4, 1.0, 2.0, 1.0, 0.7, 0.7, 0.0, 3.0, 3.0), 4, 5, byrow = TRUE)) */ val dataset = Seq( OffsetInstance(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)), OffsetInstance(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)), OffsetInstance(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)), OffsetInstance(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0)) ).toDF() /* R code: model <- glm(formula = "V1 ~ V4 + V5", family = "binomial", data = df, weights = V2, offset = V3) summary(model) Deviance Residuals: 1 2 3 4 0.002584 -0.003800 0.012478 -0.001796 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -0.2147 3.5687 -0.060 0.952 V4 0.9912 1.2344 0.803 0.422 V5 -0.6356 0.9669 -0.657 0.511 (Dispersion parameter for binomial family taken to be 1) Null deviance: 2.17560881 on 3 degrees of freedom Residual deviance: 0.00018005 on 1 degrees of freedom AIC: 10.245 Number of Fisher Scoring iterations: 4 residuals(model, type = "pearson") 1 2 3 4 0.002586113 -0.003799744 0.012372235 -0.001796892 residuals(model, type = "working") 1 2 3 4 0.006477857 -0.005244163 0.063541250 -0.004691064 residuals(model, type = "response") 1 2 3 4 0.0010324375 -0.0013110318 0.0060225522 -0.0009832738 */ val trainer = new GeneralizedLinearRegression() .setFamily("Binomial") .setWeightCol("weight") .setOffsetCol("offset") val model = trainer.fit(dataset) val coefficientsR = Vectors.dense(Array(0.99117, -0.63561)) val interceptR = -0.21471 val devianceResidualsR = Array(0.00258, -0.0038, 0.01248, -0.0018) val pearsonResidualsR = Array(0.00259, -0.0038, 0.01237, -0.0018) val workingResidualsR = Array(0.00648, -0.00524, 0.06354, -0.00469) val responseResidualsR = Array(0.00103, -0.00131, 0.00602, -0.00098) val seCoefR = Array(1.23439, 0.9669, 3.56866) val tValsR = Array(0.80297, -0.65737, -0.06017) val pValsR = Array(0.42199, 0.51094, 0.95202) val dispersionR = 1.0 val nullDevianceR = 2.17561 val residualDevianceR = 0.00018 val residualDegreeOfFreedomNullR = 3 val residualDegreeOfFreedomR = 1 val aicR = 10.24453 val summary = model.summary val devianceResiduals = summary.residuals() .select(col("devianceResiduals")) .collect() .map(_.getDouble(0)) val pearsonResiduals = summary.residuals("pearson") .select(col("pearsonResiduals")) .collect() .map(_.getDouble(0)) val workingResiduals = summary.residuals("working") .select(col("workingResiduals")) .collect() .map(_.getDouble(0)) val responseResiduals = summary.residuals("response") .select(col("responseResiduals")) .collect() .map(_.getDouble(0)) assert(model.coefficients ~== coefficientsR absTol 1E-3) assert(model.intercept ~== interceptR absTol 1E-3) devianceResiduals.zip(devianceResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } pearsonResiduals.zip(pearsonResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } workingResiduals.zip(workingResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } responseResiduals.zip(responseResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } summary.coefficientStandardErrors.zip(seCoefR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } assert(summary.dispersion === dispersionR) assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3) assert(summary.deviance ~== residualDevianceR absTol 1E-3) assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR) assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR) assert(summary.aic ~== aicR absTol 1E-3) assert(summary.solver === "irls") } test("glm summary: poisson family with weight and offset") { /* R code: A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2) b <- c(2, 8, 3, 9) w <- c(1, 2, 3, 4) off <- c(2, 3, 1, 4) df <- as.data.frame(cbind(A, b)) */ val dataset = Seq( OffsetInstance(2.0, 1.0, 2.0, Vectors.dense(0.0, 5.0).toSparse), OffsetInstance(8.0, 2.0, 3.0, Vectors.dense(1.0, 7.0)), OffsetInstance(3.0, 3.0, 1.0, Vectors.dense(2.0, 11.0)), OffsetInstance(9.0, 4.0, 4.0, Vectors.dense(3.0, 13.0)) ).toDF() /* R code: model <- glm(formula = "b ~ .", family = "poisson", data = df, weights = w, offset = off) summary(model) Deviance Residuals: 1 2 3 4 -2.0480 1.2315 1.8293 -0.7107 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -4.5678 1.9625 -2.328 0.0199 V1 -2.8784 1.1683 -2.464 0.0137 V2 0.8859 0.4170 2.124 0.0336 (Dispersion parameter for poisson family taken to be 1) Null deviance: 22.5585 on 3 degrees of freedom Residual deviance: 9.5622 on 1 degrees of freedom AIC: 51.242 Number of Fisher Scoring iterations: 5 residuals(model, type = "pearson") 1 2 3 4 -1.7480418 1.3037611 2.0750099 -0.6972966 residuals(model, type = "working") 1 2 3 4 -0.6891489 0.3833588 0.9710682 -0.1096590 residuals(model, type = "response") 1 2 3 4 -4.433948 2.216974 1.477983 -1.108487 */ val trainer = new GeneralizedLinearRegression() .setFamily("Poisson") .setWeightCol("weight") .setOffsetCol("offset") val model = trainer.fit(dataset) val coefficientsR = Vectors.dense(Array(-2.87843, 0.88589)) val interceptR = -4.56784 val devianceResidualsR = Array(-2.04796, 1.23149, 1.82933, -0.71066) val pearsonResidualsR = Array(-1.74804, 1.30376, 2.07501, -0.6973) val workingResidualsR = Array(-0.68915, 0.38336, 0.97107, -0.10966) val responseResidualsR = Array(-4.43395, 2.21697, 1.47798, -1.10849) val seCoefR = Array(1.16826, 0.41703, 1.96249) val tValsR = Array(-2.46387, 2.12428, -2.32757) val pValsR = Array(0.01374, 0.03365, 0.01993) val dispersionR = 1.0 val nullDevianceR = 22.55853 val residualDevianceR = 9.5622 val residualDegreeOfFreedomNullR = 3 val residualDegreeOfFreedomR = 1 val aicR = 51.24218 val summary = model.summary val devianceResiduals = summary.residuals() .select(col("devianceResiduals")) .collect() .map(_.getDouble(0)) val pearsonResiduals = summary.residuals("pearson") .select(col("pearsonResiduals")) .collect() .map(_.getDouble(0)) val workingResiduals = summary.residuals("working") .select(col("workingResiduals")) .collect() .map(_.getDouble(0)) val responseResiduals = summary.residuals("response") .select(col("responseResiduals")) .collect() .map(_.getDouble(0)) assert(model.coefficients ~== coefficientsR absTol 1E-3) assert(model.intercept ~== interceptR absTol 1E-3) devianceResiduals.zip(devianceResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } pearsonResiduals.zip(pearsonResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } workingResiduals.zip(workingResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } responseResiduals.zip(responseResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } summary.coefficientStandardErrors.zip(seCoefR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } assert(summary.dispersion === dispersionR) assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3) assert(summary.deviance ~== residualDevianceR absTol 1E-3) assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR) assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR) assert(summary.aic ~== aicR absTol 1E-3) assert(summary.solver === "irls") } test("glm summary: gamma family with weight and offset") { /* R code: A <- matrix(c(0, 5, 1, 2, 2, 1, 3, 3), 4, 2, byrow = TRUE) b <- c(1, 2, 1, 2) w <- c(1, 2, 3, 4) off <- c(0, 0.5, 1, 0) df <- as.data.frame(cbind(A, b)) */ val dataset = Seq( OffsetInstance(1.0, 1.0, 0.0, Vectors.dense(0.0, 5.0)), OffsetInstance(2.0, 2.0, 0.5, Vectors.dense(1.0, 2.0)), OffsetInstance(1.0, 3.0, 1.0, Vectors.dense(2.0, 1.0)), OffsetInstance(2.0, 4.0, 0.0, Vectors.dense(3.0, 3.0)) ).toDF() /* R code: model <- glm(formula = "b ~ .", family = "Gamma", data = df, weights = w, offset = off) summary(model) Deviance Residuals: 1 2 3 4 -0.17095 0.19867 -0.23604 0.03241 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.56474 0.23866 -2.366 0.255 V1 0.07695 0.06931 1.110 0.467 V2 0.28068 0.07320 3.835 0.162 (Dispersion parameter for Gamma family taken to be 0.1212174) Null deviance: 2.02568 on 3 degrees of freedom Residual deviance: 0.12546 on 1 degrees of freedom AIC: 0.93388 Number of Fisher Scoring iterations: 4 residuals(model, type = "pearson") 1 2 3 4 -0.16134949 0.20807694 -0.22544551 0.03258777 residuals(model, type = "working") 1 2 3 4 0.135315831 -0.084390309 0.113219135 -0.008279688 residuals(model, type = "response") 1 2 3 4 -0.1923918 0.2565224 -0.1496381 0.0320653 */ val trainer = new GeneralizedLinearRegression() .setFamily("Gamma") .setWeightCol("weight") .setOffsetCol("offset") val model = trainer.fit(dataset) val coefficientsR = Vectors.dense(Array(0.07695, 0.28068)) val interceptR = -0.56474 val devianceResidualsR = Array(-0.17095, 0.19867, -0.23604, 0.03241) val pearsonResidualsR = Array(-0.16135, 0.20808, -0.22545, 0.03259) val workingResidualsR = Array(0.13532, -0.08439, 0.11322, -0.00828) val responseResidualsR = Array(-0.19239, 0.25652, -0.14964, 0.03207) val seCoefR = Array(0.06931, 0.0732, 0.23866) val tValsR = Array(1.11031, 3.83453, -2.3663) val pValsR = Array(0.46675, 0.16241, 0.25454) val dispersionR = 0.12122 val nullDevianceR = 2.02568 val residualDevianceR = 0.12546 val residualDegreeOfFreedomNullR = 3 val residualDegreeOfFreedomR = 1 val aicR = 0.93388 val summary = model.summary val devianceResiduals = summary.residuals() .select(col("devianceResiduals")) .collect() .map(_.getDouble(0)) val pearsonResiduals = summary.residuals("pearson") .select(col("pearsonResiduals")) .collect() .map(_.getDouble(0)) val workingResiduals = summary.residuals("working") .select(col("workingResiduals")) .collect() .map(_.getDouble(0)) val responseResiduals = summary.residuals("response") .select(col("responseResiduals")) .collect() .map(_.getDouble(0)) assert(model.coefficients ~== coefficientsR absTol 1E-3) assert(model.intercept ~== interceptR absTol 1E-3) devianceResiduals.zip(devianceResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } pearsonResiduals.zip(pearsonResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } workingResiduals.zip(workingResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } responseResiduals.zip(responseResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } summary.coefficientStandardErrors.zip(seCoefR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } assert(summary.dispersion ~== dispersionR absTol 1E-3) assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3) assert(summary.deviance ~== residualDevianceR absTol 1E-3) assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR) assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR) assert(summary.aic ~== aicR absTol 1E-3) assert(summary.solver === "irls") } test("glm summary: tweedie family with weight and offset") { /* R code: df <- as.data.frame(matrix(c( 1.0, 1.0, 1.0, 0.0, 5.0, 0.5, 2.0, 3.0, 1.0, 2.0, 1.0, 3.0, 2.0, 2.0, 1.0, 0.0, 4.0, 0.0, 3.0, 3.0), 4, 5, byrow = TRUE)) */ val dataset = Seq( OffsetInstance(1.0, 1.0, 1.0, Vectors.dense(0.0, 5.0)), OffsetInstance(0.5, 2.0, 3.0, Vectors.dense(1.0, 2.0)), OffsetInstance(1.0, 3.0, 2.0, Vectors.dense(2.0, 1.0)), OffsetInstance(0.0, 4.0, 0.0, Vectors.dense(3.0, 3.0)) ).toDF() /* R code: library(statmod) model <- glm(V1 ~ V4 + V5, data = df, weights = V2, offset = V3, family = tweedie(var.power = 1.6, link.power = 0.0)) summary(model) Deviance Residuals: 1 2 3 4 0.8917 -2.1396 1.2252 -1.7946 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.03047 3.65000 -0.008 0.995 V4 -1.14577 1.41674 -0.809 0.567 V5 -0.36585 0.97065 -0.377 0.771 (Dispersion parameter for Tweedie family taken to be 6.334961) Null deviance: 12.784 on 3 degrees of freedom Residual deviance: 10.095 on 1 degrees of freedom AIC: NA Number of Fisher Scoring iterations: 18 residuals(model, type = "pearson") 1 2 3 4 1.1472554 -1.4642569 1.4935199 -0.8025842 residuals(model, type = "working") 1 2 3 4 1.3624928 -0.8322375 0.9894580 -1.0000000 residuals(model, type = "response") 1 2 3 4 0.57671828 -2.48040354 0.49735052 -0.01040646 */ val trainer = new GeneralizedLinearRegression() .setFamily("tweedie") .setVariancePower(1.6) .setLinkPower(0.0) .setWeightCol("weight") .setOffsetCol("offset") val model = trainer.fit(dataset) val coefficientsR = Vectors.dense(Array(-1.14577, -0.36585)) val interceptR = -0.03047 val devianceResidualsR = Array(0.89171, -2.13961, 1.2252, -1.79463) val pearsonResidualsR = Array(1.14726, -1.46426, 1.49352, -0.80258) val workingResidualsR = Array(1.36249, -0.83224, 0.98946, -1) val responseResidualsR = Array(0.57672, -2.4804, 0.49735, -0.01041) val seCoefR = Array(1.41674, 0.97065, 3.65) val tValsR = Array(-0.80873, -0.37691, -0.00835) val pValsR = Array(0.56707, 0.77053, 0.99468) val dispersionR = 6.33496 val nullDevianceR = 12.78358 val residualDevianceR = 10.09488 val residualDegreeOfFreedomNullR = 3 val residualDegreeOfFreedomR = 1 val summary = model.summary val devianceResiduals = summary.residuals() .select(col("devianceResiduals")) .collect() .map(_.getDouble(0)) val pearsonResiduals = summary.residuals("pearson") .select(col("pearsonResiduals")) .collect() .map(_.getDouble(0)) val workingResiduals = summary.residuals("working") .select(col("workingResiduals")) .collect() .map(_.getDouble(0)) val responseResiduals = summary.residuals("response") .select(col("responseResiduals")) .collect() .map(_.getDouble(0)) assert(model.coefficients ~== coefficientsR absTol 1E-3) assert(model.intercept ~== interceptR absTol 1E-3) devianceResiduals.zip(devianceResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } pearsonResiduals.zip(pearsonResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } workingResiduals.zip(workingResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } responseResiduals.zip(responseResidualsR).foreach { x => assert(x._1 ~== x._2 absTol 1E-3) } summary.coefficientStandardErrors.zip(seCoefR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) } assert(summary.dispersion ~== dispersionR absTol 1E-3) assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3) assert(summary.deviance ~== residualDevianceR absTol 1E-3) assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR) assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR) assert(summary.solver === "irls") } test("glm handle collinear features") { val collinearInstances = Seq( Instance(1.0, 1.0, Vectors.dense(1.0, 2.0)), Instance(2.0, 1.0, Vectors.dense(2.0, 4.0)), Instance(3.0, 1.0, Vectors.dense(3.0, 6.0)), Instance(4.0, 1.0, Vectors.dense(4.0, 8.0)) ).toDF() val trainer = new GeneralizedLinearRegression() val model = trainer.fit(collinearInstances) // to make it clear that underlying WLS did not solve analytically intercept[UnsupportedOperationException] { model.summary.coefficientStandardErrors } intercept[UnsupportedOperationException] { model.summary.pValues } intercept[UnsupportedOperationException] { model.summary.tValues } } test("read/write") { def checkModelData( model: GeneralizedLinearRegressionModel, model2: GeneralizedLinearRegressionModel): Unit = { assert(model.intercept === model2.intercept) assert(model.coefficients.toArray === model2.coefficients.toArray) } val glr = new GeneralizedLinearRegression() testEstimatorAndModelReadWrite(glr, datasetPoissonLog, GeneralizedLinearRegressionSuite.allParamSettings, GeneralizedLinearRegressionSuite.allParamSettings, checkModelData) } test("should support all NumericType labels and weights, and not support other types") { val glr = new GeneralizedLinearRegression().setMaxIter(1) MLTestingUtils.checkNumericTypes[ GeneralizedLinearRegressionModel, GeneralizedLinearRegression]( glr, spark, isClassification = false) { (expected, actual) => assert(expected.intercept === actual.intercept) assert(expected.coefficients === actual.coefficients) } } test("glm accepts Dataset[LabeledPoint]") { val context = spark import context.implicits._ new GeneralizedLinearRegression() .setFamily("gaussian") .fit(datasetGaussianIdentity.as[LabeledPoint]) } test("glm summary: feature name") { // dataset1 with no attribute val dataset1 = Seq( Instance(2.0, 1.0, Vectors.dense(0.0, 5.0)), Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)), Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)), Instance(9.0, 4.0, Vectors.dense(3.0, 13.0)), Instance(2.0, 5.0, Vectors.dense(2.0, 3.0)) ).toDF() // dataset2 with attribute val datasetTmp = Seq( (2.0, 1.0, 0.0, 5.0), (8.0, 2.0, 1.0, 7.0), (3.0, 3.0, 2.0, 11.0), (9.0, 4.0, 3.0, 13.0), (2.0, 5.0, 2.0, 3.0) ).toDF("y", "w", "x1", "x2") val formula = new RFormula().setFormula("y ~ x1 + x2") val dataset2 = formula.fit(datasetTmp).transform(datasetTmp) val expectedFeature = Seq(Array("features_0", "features_1"), Array("x1", "x2")) var idx = 0 for (dataset <- Seq(dataset1, dataset2)) { val model = new GeneralizedLinearRegression().fit(dataset) model.summary.featureNames.zip(expectedFeature(idx)) .foreach{ x => assert(x._1 === x._2) } idx += 1 } } test("glm summary: coefficient with statistics") { /* R code: A <- matrix(c(0, 1, 2, 3, 2, 5, 7, 11, 13, 3), 5, 2) b <- c(2, 8, 3, 9, 2) df <- as.data.frame(cbind(A, b)) model <- glm(formula = "b ~ .", data = df) summary(model) Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 0.7903 4.0129 0.197 0.862 V1 0.2258 2.1153 0.107 0.925 V2 0.4677 0.5815 0.804 0.506 */ val dataset = Seq( Instance(2.0, 1.0, Vectors.dense(0.0, 5.0)), Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)), Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)), Instance(9.0, 4.0, Vectors.dense(3.0, 13.0)), Instance(2.0, 5.0, Vectors.dense(2.0, 3.0)) ).toDF() val expectedFeature = Seq(Array("features_0", "features_1"), Array("(Intercept)", "features_0", "features_1")) val expectedEstimate = Seq(Vectors.dense(0.2884, 0.538), Vectors.dense(0.7903, 0.2258, 0.4677)) val expectedStdError = Seq(Vectors.dense(1.724, 0.3787), Vectors.dense(4.0129, 2.1153, 0.5815)) var idx = 0 for (fitIntercept <- Seq(false, true)) { val trainer = new GeneralizedLinearRegression() .setFamily("gaussian") .setFitIntercept(fitIntercept) val model = trainer.fit(dataset) val coefficientsWithStatistics = model.summary.coefficientsWithStatistics coefficientsWithStatistics.map(_._1).zip(expectedFeature(idx)).foreach { x => assert(x._1 === x._2, "Feature name mismatch in coefficientsWithStatistics") } assert(Vectors.dense(coefficientsWithStatistics.map(_._2)) ~= expectedEstimate(idx) absTol 1E-3, "Coefficients mismatch in coefficientsWithStatistics") assert(Vectors.dense(coefficientsWithStatistics.map(_._3)) ~= expectedStdError(idx) absTol 1E-3, "Standard error mismatch in coefficientsWithStatistics") idx += 1 } } test("generalized linear regression: regularization parameter") { /* R code: a1 <- c(0, 1, 2, 3) a2 <- c(5, 2, 1, 3) b <- c(1, 0, 1, 0) data <- as.data.frame(cbind(a1, a2, b)) df <- suppressWarnings(createDataFrame(data)) for (regParam in c(0.0, 0.1, 1.0)) { model <- spark.glm(df, b ~ a1 + a2, regParam = regParam) print(as.vector(summary(model)$aic)) } [1] 12.88188 [1] 12.92681 [1] 13.32836 */ val dataset = Seq( LabeledPoint(1, Vectors.dense(5, 0)), LabeledPoint(0, Vectors.dense(2, 1)), LabeledPoint(1, Vectors.dense(1, 2)), LabeledPoint(0, Vectors.dense(3, 3)) ).toDF() val expected = Seq(12.88188, 12.92681, 13.32836) var idx = 0 for (regParam <- Seq(0.0, 0.1, 1.0)) { val trainer = new GeneralizedLinearRegression() .setRegParam(regParam) .setLabelCol("label") .setFeaturesCol("features") val model = trainer.fit(dataset) val actual = model.summary.aic assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with regParam = $regParam.") idx += 1 } } test("evaluate with labels that are not doubles") { // Evaulate with a dataset that contains Labels not as doubles to verify correct casting val dataset = Seq( Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse), Instance(19.0, 1.0, Vectors.dense(1.0, 7.0)), Instance(23.0, 1.0, Vectors.dense(2.0, 11.0)), Instance(29.0, 1.0, Vectors.dense(3.0, 13.0)) ).toDF() val trainer = new GeneralizedLinearRegression() .setMaxIter(1) val model = trainer.fit(dataset) assert(model.hasSummary) val summary = model.summary val longLabelDataset = dataset.select(col(model.getLabelCol).cast(FloatType), col(model.getFeaturesCol)) val evalSummary = model.evaluate(longLabelDataset) // The calculations below involve pattern matching with Label as a double assert(evalSummary.nullDeviance === summary.nullDeviance) assert(evalSummary.deviance === summary.deviance) assert(evalSummary.aic === summary.aic) } test("SPARK-23131 Kryo raises StackOverflow during serializing GLR model") { val conf = new SparkConf(false) val ser = new KryoSerializer(conf).newInstance() val trainer = new GeneralizedLinearRegression() val model = trainer.fit(Seq(Instance(1.0, 1.0, Vectors.dense(1.0, 7.0))).toDF) ser.serialize[GeneralizedLinearRegressionModel](model) } } object GeneralizedLinearRegressionSuite { /** * Mapping from all Params to valid settings which differ from the defaults. * This is useful for tests which need to exercise all Params, such as save/load. * This excludes input columns to simplify some tests. */ val allParamSettings: Map[String, Any] = Map( "family" -> "poisson", "link" -> "log", "fitIntercept" -> true, "maxIter" -> 2, // intentionally small "tol" -> 0.8, "regParam" -> 0.01, "predictionCol" -> "myPrediction", "variancePower" -> 1.0) def generateGeneralizedLinearRegressionInput( intercept: Double, coefficients: Array[Double], xMean: Array[Double], xVariance: Array[Double], nPoints: Int, seed: Int, noiseLevel: Double, family: String, link: String): Seq[LabeledPoint] = { val rnd = new Random(seed) def rndElement(i: Int) = { (rnd.nextDouble() - 0.5) * math.sqrt(12.0 * xVariance(i)) + xMean(i) } val (generator, mean) = family match { case "gaussian" => (new StandardNormalGenerator, 0.0) case "poisson" => (new PoissonGenerator(1.0), 1.0) case "gamma" => (new GammaGenerator(1.0, 1.0), 1.0) } generator.setSeed(seed) (0 until nPoints).map { _ => val features = Vectors.dense(coefficients.indices.map(rndElement).toArray) val eta = BLAS.dot(Vectors.dense(coefficients), features) + intercept val mu = link match { case "identity" => eta case "log" => math.exp(eta) case "sqrt" => math.pow(eta, 2.0) case "inverse" => 1.0 / eta } val label = mu + noiseLevel * (generator.nextValue() - mean) // Return LabeledPoints with DenseVector LabeledPoint(label, features) } } }
shuangshuangwang/spark
mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala
Scala
apache-2.0
70,068
package org.gDanix.podapp_ng_server.controllers.v0 import org.gDanix.podapp_ng_server.controllers.v0.common.{ CommonResponses, ExecutionContext, Sessions, Masters, Subjects, Teachings } import play.api.mvc.Action import play.api.mvc.Results.Ok import play.api.libs.json.{ JsArray, Json } import scala.concurrent.Future /* * TODO: Finish implementation */ trait GetTeaching extends CommonResponses with ExecutionContext with Sessions with Masters with Subjects with Teachings { def teachingResponse(responsible: Int, groups: Int, teachingData: JsArray) = Ok(Json.obj("status" -> "OK", "responsible" -> responsible, "groups" -> groups, "teachingData" -> teachingData)) /* * Get teaching load implementation */ def getTeaching(sessionId: Int, masterCode: Int, subjectCode: Int, year: Int) = Action.async { request => sessions .retrieve(sessionId) .flatMap { case None => throw NotLoggedInException case Some(session) => masters.retrieveWithUserAccess(session.usuario).map { _.exists(_.codigo == masterCode) } } .flatMap { case false => throw OperationDeniedException case true => for { docencia <- subjects.retrieve(subjectCode, year.toShort) imparte <- teachings.retrieveFromSubjectAndYear(subjectCode, year.toShort) } yield (docencia, imparte) } .map { case (Some(docencia), imparte) => teachingResponse(docencia.responsable.getOrElse(-1), docencia.grupos, imparte.foldLeft(Json.arr()){ case(array, elem) => array :+ Json.obj("professor" -> elem.profesor, "group" -> elem.group, "credits" -> elem.creditos)}) case (None, _) => throw InternalErrorException } .recover { case e: ResultException => e.getResult } Future(Ok) } }
gDanix/PODApp-ng-server
app/org/gDanix/podapp_ng_server/controllers/v0/GetTeaching.scala
Scala
apache-2.0
1,821
package views.changekeeper /** * The following are used to deduplicate code across views and tests; they are not used in view-model mappings. */ object VrmLocked { final val BuyAnotherVehicleId = "buyAnotherVehicle" final val ExitId = "exit" }
dvla/vehicles-change-keeper-online
app/views/changekeeper/VrmLocked.scala
Scala
mit
250
package com.github.gigurra.math /** * Created by johan on 2016-12-04. */ object perp { ///////////////////////////////////// // Scalar def apply(minInput: Float, maxInput: Float, exponent: Float, input: Float, minOutput: Float, maxOutput: Float, clamp: Boolean): Float = { apply( delta = input - minInput, range = maxInput - minInput, exponent = exponent, minOutput = minOutput, maxOutput = maxOutput, clamp = clamp ) } def apply(minInput: Float, maxInput: Float, exponent: Float, input: Float, minOutput: Float, maxOutput: Float): Float = { apply(minInput, maxInput, exponent, input, minOutput, maxOutput, clamp = true) } def apply(delta: Float, range: Float, exponent: Float, minOutput: Float, maxOutput: Float, clamp: Boolean): Float = { if (clamp && delta < 0.0f) { minOutput } else if (clamp && delta > range) { maxOutput } else { minOutput + ((maxOutput - minOutput) * math.pow(delta / range, exponent).toFloat) } } def apply(delta: Float, range: Float, exponent: Float, minOutput: Float, maxOutput: Float): Float = { apply(delta, range, exponent, minOutput, maxOutput, clamp = true) } ///////////////////////////////////// // Vec2 def apply(delta: Float, range: Float, exponent: Float, minOutput: Vec2, maxOutput: Vec2, clamp: Boolean): Vec2 = { if (clamp && delta < 0.0f) { minOutput } else if (clamp && delta > range) { maxOutput } else { minOutput + ((maxOutput - minOutput) * math.pow(delta / range, exponent).toFloat) } } def apply(delta: Float, range: Float, exponent: Float, minOutput: Vec2, maxOutput: Vec2): Vec2 = { apply(delta, range, exponent, minOutput, maxOutput, clamp = true) } ///////////////////////////////////// // Vec3 def apply(delta: Float, range: Float, exponent: Float, minOutput: Vec3, maxOutput: Vec3, clamp: Boolean): Vec3 = { if (clamp && delta < 0.0f) { minOutput } else if (clamp && delta > range) { maxOutput } else { minOutput + ((maxOutput - minOutput) * math.pow(delta / range, exponent).toFloat) } } def apply(delta: Float, range: Float, exponent: Float, minOutput: Vec3, maxOutput: Vec3): Vec3 = { apply(delta, range, exponent, minOutput, maxOutput, clamp = true) } ///////////////////////////////////// // Vec4 def apply(delta: Float, range: Float, exponent: Float, minOutput: Vec4, maxOutput: Vec4, clamp: Boolean): Vec4 = { if (clamp && delta < 0.0f) { minOutput } else if (clamp && delta > range) { maxOutput } else { minOutput + ((maxOutput - minOutput) * math.pow(delta / range, exponent).toFloat) } } def apply(delta: Float, range: Float, exponent: Float, minOutput: Vec4, maxOutput: Vec4): Vec4 = { apply(delta, range, exponent, minOutput, maxOutput, clamp = true) } }
GiGurra/scala-libgurra
src/main/scala/com/github/gigurra/math/perp.scala
Scala
mit
3,464
package pl.touk.nussknacker.engine.build import cats.data.NonEmptyList import pl.touk.nussknacker.engine.api._ import pl.touk.nussknacker.engine.api.process.ProcessName import pl.touk.nussknacker.engine.build.GraphBuilder.Creator import pl.touk.nussknacker.engine.graph.EspProcess import pl.touk.nussknacker.engine.graph.expression.Expression class ProcessMetaDataBuilder private[build](metaData: MetaData) { def parallelism(p: Int): ProcessMetaDataBuilder = { val newTypeSpecificData = metaData.typeSpecificData match { case s: StreamMetaData => s.copy(parallelism = Some(p)) case l: LiteStreamMetaData => l.copy(parallelism = Some(p)) case other => throw new IllegalArgumentException(s"Given execution engine: ${other.getClass.getSimpleName} doesn't support parallelism parameter") } new ProcessMetaDataBuilder(metaData.copy(typeSpecificData = newTypeSpecificData)) } //TODO: exception when non-streaming process? def stateOnDisk(useStateOnDisk: Boolean) = new ProcessMetaDataBuilder(metaData.copy(typeSpecificData = metaData.typeSpecificData.asInstanceOf[StreamMetaData].copy(spillStateToDisk = Some(useStateOnDisk)))) //TODO: exception when non-request-response process? def path(p: Option[String]) = new ProcessMetaDataBuilder(metaData.copy(typeSpecificData = RequestResponseMetaData(p))) def subprocessVersions(subprocessVersions: Map[String, Long]) = new ProcessMetaDataBuilder(metaData.copy(subprocessVersions = subprocessVersions)) def additionalFields(description: Option[String] = None, properties: Map[String, String] = Map.empty) = new ProcessMetaDataBuilder(metaData.copy( additionalFields = Some(ProcessAdditionalFields(description, properties))) ) def source(id: String, typ: String, params: (String, Expression)*): ProcessGraphBuilder = new ProcessGraphBuilder(GraphBuilder.source(id, typ, params: _*).creator .andThen(r => EspProcess(metaData, NonEmptyList.of(r)))) class ProcessGraphBuilder private[ProcessMetaDataBuilder](val creator: Creator[EspProcess]) extends GraphBuilder[EspProcess] { override def build(inner: Creator[EspProcess]) = new ProcessGraphBuilder(inner) } } object ScenarioBuilder { def streaming(id: String) = new ProcessMetaDataBuilder(MetaData(id, StreamMetaData())) def streamingLite(id: String) = new ProcessMetaDataBuilder(MetaData(id, LiteStreamMetaData())) def requestResponse(id: String) = new ProcessMetaDataBuilder(MetaData(id, RequestResponseMetaData(None))) } @deprecated("use ScenarioBuilder streaming method", "1.3") object EspProcessBuilder { def id(id: String) = new ProcessMetaDataBuilder(MetaData(id, StreamMetaData())) } @deprecated("use ScenarioBuilder streamingLite method", "1.3") object StreamingLiteScenarioBuilder { def id(id: String) = new ProcessMetaDataBuilder(MetaData(id, LiteStreamMetaData())) } @deprecated("use ScenarioBuilder requestResponse method", "1.3") object RequestResponseScenarioBuilder { def id(id: ProcessName) = new ProcessMetaDataBuilder(MetaData(id.value, RequestResponseMetaData(None))) def id(id: String) = new ProcessMetaDataBuilder(MetaData(id, RequestResponseMetaData(None))) }
TouK/nussknacker
scenario-api/src/main/scala/pl/touk/nussknacker/engine/build/ScenarioBuilder.scala
Scala
apache-2.0
3,256
package com.github.rbobin.playjsonmatch.utils case class JsMatchException(message: String) extends RuntimeException
rbobin/play-json-match
src/main/scala/com/github/rbobin/playjsonmatch/utils/JsMatchException.scala
Scala
mit
117
package com.twitter.finagle.util import scala.collection.mutable trait Drv extends (Rng => Int) /** * Create discrete random variables representing arbitrary distributions. */ object Drv { private val ε = 0.01 /** * A Drv using the Aliasing method [1]: a distribution is described * by a set of probabilities and aliases. In order to pick a value * j in distribution Pr(Y = j), j=1..n, we first pick a random * integer in the uniform distribution over 1..n. We then inspect * the probability table whose value represents a biased coin; the * random integer is returned with this probability, otherwise the * index in the alias table is chosen. * * "It is a peculiar way to throw dice, but the results are * indistinguishable from the real thing." -Knuth (TAOCP Vol. 2; * 3.4.1 p.121). * * [1] Alastair J. Walker. 1977. An Efficient Method for Generating * Discrete Random Variables with General Distributions. ACM Trans. * Math. Softw. 3, 3 (September 1977), 253-256. * DOI=10.1145/355744.355749 * https://doi.acm.org/10.1145/355744.355749 * * Package private for testing. */ private[util] case class Aliased(alias: IndexedSeq[Int], prob: IndexedSeq[Double]) extends Drv { require(prob.size == alias.size) private[this] val N = alias.size def apply(rng: Rng): Int = { val i = rng.nextInt(N) val p = prob(i) if (p == 1 || rng.nextDouble() < p) i else alias(i) } } /** * Generate probability and alias tables in the manner of to Vose * [1]. This algorithm is simple, efficient, and intuitive. Vose's * algorithm is O(n) in the distribution size. The paper below * contains correctness and complexity proofs. * * [1] Michael D. Vose. 1991. A Linear Algorithm for Generating Random * Numbers with a Given Distribution. IEEE Trans. Softw. Eng. 17, 9 * (September 1991), 972-975. DOI=10.1109/32.92917 * https://dx.doi.org/10.1109/32.92917 * * Package private for testing. */ private[util] def newVose(dist: Seq[Double]): Drv = { val N = dist.size val alias = new Array[Int](N) val prob = new Array[Double](N) val small = mutable.Queue[Int]() val large = mutable.Queue[Int]() val p = new Array[Double](N) dist.copyToArray(p, 0, N) for (i <- p.indices) { p(i) *= N if (p(i) < 1) small.enqueue(i) else large.enqueue(i) } while (large.nonEmpty && small.nonEmpty) { val s = small.dequeue() val l = large.dequeue() prob(s) = p(s) alias(s) = l p(l) = (p(s) + p(l)) - 1D // Same as p(l)-(1-p(s)), but more stable if (p(l) < 1) small.enqueue(l) else large.enqueue(l) } while (large.nonEmpty) prob(large.dequeue()) = 1 while (small.nonEmpty) prob(small.dequeue()) = 1 Aliased(alias, prob) } /** * Create a new Drv representing the passed in distribution of * probabilities. These must add up to 1, however we cannot * reliably test for this due to numerical stability issues: we're * operating on the honor's system. */ def apply(dist: Seq[Double]): Drv = { require(dist.nonEmpty) val sum = dist.sum if (!(sum < 1 + ε && sum > 1 - ε)) throw new AssertionError("Bad sum %.001f".format(sum)) newVose(dist) } /** * Create a probability distribution based on a set of weights * (ratios). */ def fromWeights(weights: Seq[Double]): Drv = { require(weights.nonEmpty) val sum = weights.sum if (sum == 0) Drv(Seq.fill(weights.size) { 1D / weights.size }) else Drv(weights.map(_ / sum)) } }
luciferous/finagle
finagle-core/src/main/scala/com/twitter/finagle/util/Drv.scala
Scala
apache-2.0
3,639
package idv.brianhsu.maidroid.plurk.adapter import idv.brianhsu.maidroid.plurk.R import org.bone.soplurk.model.Icon import idv.brianhsu.maidroid.plurk.fragment.EmoticonFragment import idv.brianhsu.maidroid.plurk.view.IconGrid import idv.brianhsu.maidroid.plurk.util.EmoticonTabs import android.app.Activity import android.view.ViewGroup import android.view.View import android.support.v4.view.PagerAdapter class IconPagerAdapter(activity: Activity with EmoticonFragment.Listener, tabs: EmoticonTabs) extends PagerAdapter { val orderedTab = Vector(tabs.customPage, tabs.basicPage, tabs.morePage, tabs.hiddenPage) val tabsGrid = orderedTab.map { icons => val iconGrid = new IconGrid(activity, icons) iconGrid.setOnIconClickListener(activity.onIconSelected _) iconGrid } override def getCount = 4 override def instantiateItem(container: ViewGroup, position: Int): Object = { val iconGrid = tabsGrid(position) iconGrid.setTag(position) container.addView(iconGrid) position.toString } override def destroyItem(container: ViewGroup, position: Int, obj: Object) { container.removeView(tabsGrid(position)) } override def isViewFromObject(view: View, obj: Object) = { view.getTag.toString == obj.toString } override def getPageTitle(position: Int) = position match { case 0 => activity.getString(R.string.adapterIconPagerCustom) case 1 => activity.getString(R.string.adapterIconPagerOften) case 2 => activity.getString(R.string.adapterIconPagerMore) case 3 => activity.getString(R.string.adapterIconPagerHidden) } }
brianhsu/MaidroidPlurk
src/main/scala/adapter/IconPagerAdapter.scala
Scala
gpl-3.0
1,621
package org.bitcoins.core.protocol.transaction import org.bitcoins.core.currency.CurrencyUnit import org.bitcoins.core.number.{Int32, UInt32} import org.bitcoins.core.protocol.script.{EmptyScriptWitness, ScriptWitness} import org.bitcoins.core.util.BytesUtil import org.bitcoins.core.wallet.builder.RawTxBuilder import org.bitcoins.crypto._ import scodec.bits.ByteVector /** Created by chris on 7/14/15. */ sealed abstract class Transaction extends NetworkElement { override lazy val byteSize = bytes.length /** The `sha256(sha256(tx))` of this transaction, * Note that this is the little endian encoding of the hash, NOT the big endian encoding shown in block * explorers. See * [[https://bitcoin.stackexchange.com/questions/2063/why-does-the-bitcoin-protocol-use-the-little-endian-notation this link]] * for more info */ def txId: DoubleSha256Digest = CryptoUtil.doubleSHA256(bytes) /** This is the BIG ENDIAN encoding for the txid. This is commonly used for * RPC interfaces and block explorers, this encoding is NOT used at the protocol level * For more info see: * [[https://bitcoin.stackexchange.com/questions/2063/why-does-the-bitcoin-protocol-use-the-little-endian-notation]] */ def txIdBE: DoubleSha256DigestBE = txId.flip /** The version number for this transaction */ def version: Int32 /** The inputs for this transaction */ def inputs: Seq[TransactionInput] /** The outputs for this transaction */ def outputs: Seq[TransactionOutput] /** The locktime for this transaction */ def lockTime: UInt32 /** This is used to indicate how 'expensive' the transction is on the blockchain. * This use to be a simple calculation before segwit (BIP141). Each byte in the transaction * counted as 4 'weight' units. Now with segwit, the * [[org.bitcoins.core.protocol.transaction.TransactionWitness TransactionWitness]] * is counted as 1 weight unit per byte, * while other parts of the transaction (outputs, inputs, locktime etc) count as 4 weight units. * As we add more witness versions, this may be subject to change. * [[https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#Transaction_size_calculations BIP 141]] * [[https://github.com/bitcoin/bitcoin/blob/5961b23898ee7c0af2626c46d5d70e80136578d3/src/consensus/validation.h#L96]] */ def weight: Long /** The transaction's virtual size * [[https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#Transaction_size_calculations]] */ def vsize: Long = Math.ceil(weight / 4.0).toLong /** Base transaction size is the size of the transaction serialised with the witness data stripped * [[https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#Transaction_size_calculations]] */ def baseSize: Long = this match { case btx: NonWitnessTransaction => btx.byteSize case wtx: WitnessTransaction => BaseTransaction(wtx.version, wtx.inputs, wtx.outputs, wtx.lockTime).baseSize } def totalSize: Long = bytes.size /** Determines if this transaction is a coinbase transaction. */ def isCoinbase: Boolean = inputs.size match { case 1 => inputs.head match { case _: CoinbaseInput => true case _: TransactionInput => false } case _: Int => false } /** Updates the input at the given index and returns the new transaction with that input updated */ def updateInput(idx: Int, i: TransactionInput): Transaction = { val updatedInputs = inputs.updated(idx, i) this match { case _: NonWitnessTransaction => BaseTransaction(version, updatedInputs, outputs, lockTime) case wtx: WitnessTransaction => WitnessTransaction(version, updatedInputs, outputs, lockTime, wtx.witness) } } lazy val totalOutput: CurrencyUnit = outputs.map(_.value).sum def toBaseTx: BaseTransaction = { BaseTransaction(version, inputs, outputs, lockTime) } } object Transaction extends Factory[Transaction] { def newBuilder: RawTxBuilder = RawTxBuilder() override def fromBytes(bytes: ByteVector): Transaction = { //see BIP141 for marker/flag bytes //https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#transaction-id if ( bytes(4) == WitnessTransaction.marker && bytes( 5) == WitnessTransaction.flag ) { //this throw/catch is _still_ necessary for the case where we have unsigned base transactions //with zero inputs and 1 output which is serialized as "0001" at bytes 4 and 5. //these transactions will not have a script witness associated with them making them invalid //witness transactions (you need to have a witness to be considered a witness tx) //see: https://github.com/bitcoin-s/bitcoin-s/blob/01d89df1b7c6bc4b1594406d54d5e6019705c654/core-test/src/test/scala/org/bitcoins/core/protocol/transaction/TransactionTest.scala#L88 try { WitnessTransaction.fromBytes(bytes) } catch { case scala.util.control.NonFatal(_) => BaseTransaction.fromBytes(bytes) } } else { BaseTransaction.fromBytes(bytes) } } } sealed abstract class NonWitnessTransaction extends Transaction { override def weight: Long = byteSize * 4 override lazy val bytes: ByteVector = { val versionBytes = version.bytes.reverse val inputBytes = BytesUtil.writeCmpctSizeUInt(inputs) val outputBytes = BytesUtil.writeCmpctSizeUInt(outputs) val lockTimeBytes = lockTime.bytes.reverse versionBytes ++ inputBytes ++ outputBytes ++ lockTimeBytes } } case class BaseTransaction( version: Int32, inputs: Seq[TransactionInput], outputs: Seq[TransactionOutput], lockTime: UInt32) extends NonWitnessTransaction object BaseTransaction extends Factory[BaseTransaction] { override def fromBytes(bytes: ByteVector): BaseTransaction = { val versionBytes = bytes.take(4) val version = Int32(versionBytes.reverse) val txInputBytes = bytes.slice(4, bytes.size) val (inputs, outputBytes) = BytesUtil.parseCmpctSizeUIntSeq(txInputBytes, TransactionInput) val (outputs, lockTimeBytes) = BytesUtil.parseCmpctSizeUIntSeq(outputBytes, TransactionOutput) val lockTime = UInt32(lockTimeBytes.take(4).reverse) BaseTransaction(version, inputs, outputs, lockTime) } def unapply(tx: NonWitnessTransaction): Option[ (Int32, Seq[TransactionInput], Seq[TransactionOutput], UInt32)] = { Some((tx.version, tx.inputs, tx.outputs, tx.lockTime)) } } case object EmptyTransaction extends NonWitnessTransaction { override def txId: DoubleSha256Digest = DoubleSha256Digest.empty override def version: Int32 = TransactionConstants.version override def inputs: Vector[TransactionInput] = Vector.empty override def outputs: Vector[TransactionOutput] = Vector.empty override def lockTime: UInt32 = TransactionConstants.lockTime } case class WitnessTransaction( version: Int32, inputs: Seq[TransactionInput], outputs: Seq[TransactionOutput], lockTime: UInt32, witness: TransactionWitness) extends Transaction { require( inputs.length == witness.length, s"Must have same amount of inputs and witnesses in witness tx, inputs=${inputs.length} witnesses=${witness.length}" ) /** The txId for the witness transaction from satoshi's original serialization */ override def txId: DoubleSha256Digest = { toBaseTx.txId } /** The witness transaction id as defined by * [[https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#transaction-id BIP141]] */ def wTxId: DoubleSha256Digest = CryptoUtil.doubleSHA256(bytes) /** Returns the big endian encoding of the wtxid */ def wTxIdBE: DoubleSha256DigestBE = wTxId.flip /** Weight calculation in bitcoin for witness txs * [[https://github.com/bitcoin/bitcoin/blob/5961b23898ee7c0af2626c46d5d70e80136578d3/src/consensus/validation.h#L96]] */ override def weight: Long = { toBaseTx.byteSize * 3 + byteSize } /** Writes a [[org.bitcoins.core.protocol.transaction.WitnessTransaction WitnessTransaction]] to a hex string * This is unique from BaseTransaction.bytes in the fact * that it adds a 'marker' and 'flag' to indicate that this tx is a * [[org.bitcoins.core.protocol.transaction.WitnessTransaction WitnessTransaction]] and has extra * witness data attached to it. * See [[https://github.com/bitcoin/bips/blob/master/bip-0144.mediawiki BIP144]] for more info. * Functionality inside of Bitcoin Core: * [[https://github.com/bitcoin/bitcoin/blob/e8cfe1ee2d01c493b758a67ad14707dca15792ea/src/primitives/transaction.h#L282-L287s]] */ override lazy val bytes: ByteVector = { val versionBytes = version.bytes.reverse val inputBytes = BytesUtil.writeCmpctSizeUInt(inputs) val outputBytes = BytesUtil.writeCmpctSizeUInt(outputs) val witnessBytes = witness.bytes val lockTimeBytes = lockTime.bytes.reverse // notice we use the old serialization format if all witnesses are empty // https://github.com/bitcoin/bitcoin/blob/e8cfe1ee2d01c493b758a67ad14707dca15792ea/src/primitives/transaction.h#L276-L281 if (witness.exists(_ != EmptyScriptWitness)) { val witConstant = ByteVector(0.toByte, 1.toByte) versionBytes ++ witConstant ++ inputBytes ++ outputBytes ++ witnessBytes ++ lockTimeBytes } else toBaseTx.bytes } /** Updates the [[org.bitcoins.core.protocol.script.ScriptWitness ScriptWitness]] at the given index and * returns a new [[org.bitcoins.core.protocol.transaction.WitnessTransaction WitnessTransaction]] * with it's witness vector updated */ def updateWitness(idx: Int, scriptWit: ScriptWitness): WitnessTransaction = { val txWit = witness.updated(idx, scriptWit) WitnessTransaction(version, inputs, outputs, lockTime, txWit) } } object WitnessTransaction extends Factory[WitnessTransaction] { /** This read function is unique to BaseTransaction.fromBytes * in the fact that it reads a 'marker' and 'flag' byte to indicate that this tx is a * [[org.bitcoins.core.protocol.transaction.WitnessTransaction WitnessTransaction]]. * See [[https://github.com/bitcoin/bips/blob/master/bip-0144.mediawiki BIP144 ]] for more details. * Functionality inside of Bitcoin Core: * [[https://github.com/bitcoin/bitcoin/blob/e8cfe1ee2d01c493b758a67ad14707dca15792ea/src/primitives/transaction.h#L244-L251]] */ override def fromBytes(bytes: ByteVector): WitnessTransaction = { val versionBytes = bytes.take(4) val version = Int32(versionBytes.reverse) val marker = bytes(4) require( marker.toInt == 0, "Incorrect marker for witness transaction, the marker MUST be 0 for the marker according to BIP141, got: " + marker) val flag = bytes(5) require( flag.toInt != 0, "Incorrect flag for witness transaction, this must NOT be 0 according to BIP141, got: " + flag) val txInputBytes = bytes.slice(6, bytes.size) val (inputs, outputBytes) = BytesUtil.parseCmpctSizeUIntSeq(txInputBytes, TransactionInput) val (outputs, witnessBytes) = BytesUtil.parseCmpctSizeUIntSeq(outputBytes, TransactionOutput) val witness = TransactionWitness(witnessBytes, inputs.size) val lockTimeBytes = witnessBytes.drop(witness.byteSize) val lockTime = UInt32(lockTimeBytes.take(4).reverse) WitnessTransaction(version, inputs, outputs, lockTime, witness) } def toWitnessTx(tx: Transaction): WitnessTransaction = tx match { case btx: NonWitnessTransaction => WitnessTransaction(btx.version, btx.inputs, btx.outputs, btx.lockTime, EmptyWitness.fromInputs(btx.inputs)) case wtx: WitnessTransaction => wtx } val marker: Byte = 0.toByte val flag: Byte = 1.toByte /** These bytes -- at index 4 & 5 in a witness transaction -- are used to indicate a witness tx * @see BIP141 https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#transaction-id */ val witBytes: ByteVector = ByteVector(marker, flag) }
bitcoin-s/bitcoin-s
core/src/main/scala/org/bitcoins/core/protocol/transaction/Transaction.scala
Scala
mit
12,356
package org.http4s package headers import java.time.{Instant, ZoneId, ZonedDateTime} class DateSpec extends HeaderLaws { checkAll("Date", headerLaws(Date)) val gmtDate = ZonedDateTime.of(1994, 11, 6, 8, 49, 37, 0, ZoneId.of("GMT")) "render" should { "format GMT date according to RFC 1123" in { Date(Instant.from(gmtDate)).renderString must_== "Date: Sun, 06 Nov 1994 08:49:37 GMT" } "format UTC date according to RFC 1123" in { val utcDate = ZonedDateTime.of(1994, 11, 6, 8, 49, 37, 0, ZoneId.of("UTC")) Date(Instant.from(utcDate)).renderString must_== "Date: Sun, 06 Nov 1994 08:49:37 GMT" } } "fromDate" should { "accept format RFC 1123" in { Date.parse("Sun, 06 Nov 1994 08:49:37 GMT").map(_.date) must be_\\/-(Instant.from(gmtDate)) } "accept format RFC 1036" in { Date.parse("Sunday, 06-Nov-94 08:49:37 GMT").map(_.date) must be_\\/-(Instant.from(gmtDate)) } "accept format ANSI date" in { Date.parse("Sun Nov 6 08:49:37 1994").map(_.date) must be_\\/-(Instant.from(gmtDate)) Date.parse("Sun Nov 16 08:49:37 1994").map(_.date) must be_\\/-(Instant.from(gmtDate.plusDays(10))) } } }
m4dc4p/http4s
tests/src/test/scala/org/http4s/headers/DateSpec.scala
Scala
apache-2.0
1,184
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest package prop import org.scalactic.anyvals._ class PropertyCheckConfigurationHelperSuite extends FunSuite with Matchers { import org.scalatest.prop.Configuration._ val DefaultMinSuccessful: PosInt = 9 val PassedMinSuccessful: PosInt = 3 val DefaultMinSize: PosZInt = 99 val PassedMinSize: PosZInt = 33 val DefaultWorkers: PosInt = 99 val PassedWorkers: PosInt = 33 val DefaultSizeRange: PosZInt = 0 val PassedSizeRange: PosZInt = 10 val DefaultMaxDiscardedFactor: PosZDouble = 1.0 val PassedMaxDiscardedFactor: PosZDouble = 0.5 val defaultConfig = PropertyCheckConfiguration( minSuccessful = DefaultMinSuccessful, maxDiscardedFactor = DefaultMaxDiscardedFactor, minSize = DefaultMinSize, sizeRange = DefaultSizeRange, workers = DefaultWorkers ) // minSuccessful test("getParams returns passed minSuccessful config param") { val params = getParams(Seq(MinSuccessful(PassedMinSuccessful)), defaultConfig) params.minSuccessfulTests should equal (PassedMinSuccessful.value) } test("getParams throws IAE if passed multiple minSuccessful config params") { intercept[IllegalArgumentException] { getParams(Seq(MinSuccessful(33), MinSuccessful(34)), defaultConfig) } } test("getParams returns default minSuccessful config param if none passed") { val params = getParams(Seq(Workers(DefaultWorkers)), defaultConfig) params.minSuccessfulTests should equal (DefaultMinSuccessful.value) } def maxDiscardRatio(maxDiscardedTests: Int, minSuccessfulTests: Int): Float = (maxDiscardedTests: Float)/(minSuccessfulTests: Float) // maxDiscarded test("getParams returns passed maxDiscarded config param") { val params = getParams(Seq(MaxDiscardedFactor(PassedMaxDiscardedFactor)), defaultConfig) params.maxDiscardRatio should equal (PassedMaxDiscardedFactor.value) } test("getParams throws IAE if passed multiple maxDiscarded config params") { intercept[IllegalArgumentException] { getParams(Seq(MaxDiscardedFactor(33.0), MaxDiscardedFactor(34.0)), defaultConfig) } } test("getParams returns default maxDiscarded config param if none passed") { val params = getParams(Seq(MinSuccessful(PassedMinSuccessful)), defaultConfig) params.maxDiscardRatio should equal (DefaultMaxDiscardedFactor.value) } // minSize test("getParams returns passed minSize config param") { val params = getParams(Seq(MinSize(PassedMinSize)), defaultConfig) params.minSize should equal (PassedMinSize.value) } test("getParams throws IAE if passed multiple minSize config params") { intercept[IllegalArgumentException] { getParams(Seq(MinSize(33), MinSize(34)), defaultConfig) } } test("getParams returns default minSize config param if none passed") { val params = getParams(Seq(MinSuccessful(PassedMinSuccessful)), defaultConfig) params.minSize should equal (DefaultMinSize.value) } // sizeRange test("getParams returns passed sizeRange config param") { val params = getParams(Seq(SizeRange(PassedSizeRange)), defaultConfig) params.maxSize should equal (DefaultMinSize + PassedSizeRange) } test("getParams returns passed minSize and sizeRange config param") { val params = getParams(Seq(MinSize(PassedMinSize), SizeRange(PassedSizeRange)), defaultConfig) params.maxSize should equal (PassedMinSize + PassedSizeRange) } test("getParams throws IAE if passed multiple maxSize config params") { intercept[IllegalArgumentException] { getParams(Seq(MaxSize(33), MaxSize(34)), defaultConfig) } intercept[IllegalArgumentException] { getParams(Seq(MaxSize(33), SizeRange(34)), defaultConfig) } intercept[IllegalArgumentException] { getParams(Seq(SizeRange(33), SizeRange(34)), defaultConfig) } } test("getParams returns default sizeRange config if none passed") { val params = getParams(Seq(MinSuccessful(PassedMinSuccessful)), defaultConfig) params.maxSize should equal (DefaultMinSize + DefaultSizeRange) } // workers test("getParams returns passed workers config param") { val params = getParams(Seq(Workers(PassedWorkers)), defaultConfig) params.workers should equal (PassedWorkers.value) } test("getParams throws IAE if passed multiple workers config params") { intercept[IllegalArgumentException] { getParams(Seq(Workers(33), Workers(34)), defaultConfig) } } test("getParams returns default workers config param if none passed") { val params = getParams(Seq(MinSuccessful(PassedMinSuccessful)), defaultConfig) params.workers should equal (DefaultWorkers.value) } test("getParams returns all default if no config params passed") { val params = getParams(Seq(), defaultConfig) params.minSuccessfulTests should equal (DefaultMinSuccessful.value) params.maxDiscardRatio should equal (DefaultMaxDiscardedFactor.value) params.minSize should equal (DefaultMinSize.value) params.maxSize should equal (DefaultMinSize.value + DefaultSizeRange.value) params.workers should equal (DefaultWorkers.value) } test("getParams returns all passed if all config params passed") { val params = getParams(Seq(MinSuccessful(PassedMinSuccessful), MaxDiscardedFactor(PassedMaxDiscardedFactor), MinSize(PassedMinSize), SizeRange(PassedSizeRange), Workers(PassedWorkers)), defaultConfig) params.minSuccessfulTests should equal (PassedMinSuccessful.value) params.maxDiscardRatio should equal (PassedMaxDiscardedFactor.value) params.minSize should equal (PassedMinSize.value) params.maxSize should equal (PassedMinSize.value + PassedSizeRange.value) params.workers should equal (PassedWorkers.value) } }
SRGOM/scalatest
scalatest-test/src/test/scala/org/scalatest/prop/PropertyCheckConfigurationHelperSuite.scala
Scala
apache-2.0
6,364
/* * Library of Proximal Algorithms adapted from https://github.com/cvxgrp/proximal * In-place modifications which later should be BLAS-ed when applicable for more efficiency * @author debasish83 */ package breeze.optimize.proximal import breeze.numerics.signum import scala.math.max import scala.math.min import scala.math.sqrt import scala.math.abs import scala.Double.NegativeInfinity import scala.Double.PositiveInfinity import breeze.linalg._ import spire.syntax.cfor._ import breeze.linalg.norm trait Proximal { def prox(x: DenseVector[Double], rho: Double = 1.0) def valueAt(x: DenseVector[Double]) = 0.0 } case class ProjectIdentity() extends Proximal { def prox(x: DenseVector[Double], rho: Double = 1.0) {} } //TO DO: //1. Implement the binary search algorithm from http://see.stanford.edu/materials/lsocoee364b/hw4sol.pdf and compare performance //2. Implement randomized O(n) algorithm from Duchi et al's paper Efficient Projections onto the l1-Ball for Learning in High Dimensions case class ProjectProbabilitySimplex(s: Double) extends Proximal { require(s > 0, s"Proximal:ProjectProbabilitySimplex Radius s must be strictly positive") def prox(x: DenseVector[Double], rho: Double = 1.0) = { val sorted = x.data.sorted(Ordering[Double].reverse) val cum = sorted.scanLeft(0.0)(_ + _).slice(1, x.length + 1) val cs = DenseVector(cum.zipWithIndex.map { elem => (elem._1 - s) / (elem._2 + 1)}) val ndx = (DenseVector(sorted) - cs).data.filter { elem => elem >= 0.0}.length - 1 cforRange(0 until x.length) { i => x.update(i, max(x(i) - cs(ndx), 0.0)) } } } /** * Projection formula from Duchi et al's paper Efficient Projections onto the l1-Ball for Learning in High Dimensions * */ case class ProjectL1(s: Double) extends Proximal { val projectSimplex = ProjectProbabilitySimplex(s) def prox(x: DenseVector[Double], rho: Double = 1.0): Unit = { val u = x.mapValues { _.abs } projectSimplex.prox(u, rho) cforRange(0 until x.length) { i => x.update(i, signum(x(i)) * u(i)) } } } case class ProjectBox(l: DenseVector[Double], u: DenseVector[Double]) extends Proximal { def prox(x: DenseVector[Double], rho: Double = 0.0) = { cforRange(0 until x.length) { i => x.update(i, max(l(i), min(x(i), u(i))))} } } case class ProjectPos() extends Proximal { def prox(x: DenseVector[Double], rho: Double = 0.0) = { cforRange(0 until x.length) { i => x.update(i, max(0, x(i)))} } } case class ProjectSoc() extends Proximal { def prox(x: DenseVector[Double], rho: Double = 0.0) = { var nx: Double = 0.0 val n = x.length cforRange(1 until n) { i => nx += x(i) * x(i) } nx = sqrt(nx) if (nx > x(0)) { if (nx <= -x(0)) { cforRange(0 until n ) { i => x(i) = 0 } } else { val alpha = 0.5 * (1 + x(0) / nx) x.update(0, alpha * nx) cforRange(1 until n) { i => x.update(i, alpha * x(i)) } } } } } //Projection onto Affine set //Let C = { x \in R^{n} | Ax = b } where A \in R^{m x n} //If A is full rank matrix then the projection is given by v - A'(Av - b) where A' is the cached Moore-Penrose pseudo-inverse of A case class ProjectEquality(Aeq: DenseMatrix[Double], beq: DenseVector[Double]) extends Proximal { val invAeq = pinv(Aeq) def prox(x: DenseVector[Double], rho: Double = 0.0) = { val Av = Aeq*x Av -= beq x += invAeq*Av } } //Projection onto hyper-plane is a special case of projection onto affine set and is given by //x + ((b - a'x)/||a||_2^2)a case class ProjectHyperPlane(a: DenseVector[Double], b: Double) extends Proximal { val at = a.t def prox(x: DenseVector[Double], rho: Double = 0.0) = { val atx = at * x val anorm = norm(a, 2) val scale = (b - atx) / (anorm * anorm) val ascaled = a * scale x += ascaled } } case class ProximalL1(var lambda: Double = 1.0) extends Proximal { def setLambda(lambda: Double) = { this.lambda = lambda this } def prox(x: DenseVector[Double], rho: Double) = { cforRange(0 until x.length) { i => x.update(i, max(0, x(i) - lambda / rho) - max(0, -x(i) - lambda / rho)) } } override def valueAt(x: DenseVector[Double]) = { lambda * x.foldLeft(0.0) { (agg, entry) => agg + abs(entry)} } } case class ProximalL2() extends Proximal { def prox(x: DenseVector[Double], rho: Double) = { val xnorm = norm(x) cforRange(0 until x.length) { i => if (xnorm >= 1 / rho) x.update(i, x(i) * (1 - 1 / (rho * xnorm))) else x.update(i, 0) } } } // f = (1/2)||.||_2^2 case class ProximalSumSquare() extends Proximal { def prox(x: DenseVector[Double], rho: Double) = { cforRange(0 until x.length) { i => x.update(i, x(i) * (rho / (1 + rho))) } } } // f = -sum(log(x)) case class ProximalLogBarrier() extends Proximal { def prox(x: DenseVector[Double], rho: Double) = { cforRange(0 until x.length) { i => x.update(i, 0.5 * (x(i) + sqrt(x(i) * x(i) + 4 / rho))) } } } // f = huber = x^2 if |x|<=1, 2|x| - 1 otherwise case class ProximalHuber() extends Proximal { def proxScalar(v: Double, rho: Double, oracle: Double => Double, l: Double, u: Double, x0: Double): Double = { val MAX_ITER = 1000 val tol = 1e-8 var g: Double = 0.0 var x = max(l, min(x0, u)) var lIter = l var uIter = u var iter = 0 while (iter < MAX_ITER && u - l > tol) { g = -1 / x + rho * (x - v) if (g > 0) { lIter = max(lIter, x - g / rho) uIter = x } else if (g < 0) { lIter = x uIter = min(uIter, x - g / rho) } x = (lIter + uIter) / 2 iter = iter + 1 } x } def proxSeparable(x: DenseVector[Double], rho: Double, oracle: Double => Double, l: Double, u: Double) = { x.map(proxScalar(_, rho, oracle, l, u, 0)) cforRange(0 until x.length) { i => x.update(i, proxScalar(x(i), rho, oracle, l, u, 0)) } } def subgradHuber(x: Double): Double = { if (abs(x) <= 1) { 2 * x } else { val projx = if (x > 0) x else -x 2 * projx } } def prox(x: DenseVector[Double], rho: Double) = { proxSeparable(x, rho, subgradHuber, NegativeInfinity, PositiveInfinity) } } // f = c'*x case class ProximalLinear(c: DenseVector[Double]) extends Proximal { def prox(x: DenseVector[Double], rho: Double) = { cforRange(0 until x.length) { i => x.update(i, x(i) - c(i) / rho) } } } // f = c'*x + I(x >= 0) case class ProximalLp(c: DenseVector[Double]) extends Proximal { def prox(x: DenseVector[Double], rho: Double) = { cforRange(0 until x.length) { i => x.update(i, max(0, x(i) - c(i) / rho)) } } }
chen0031/breeze
math/src/main/scala/breeze/optimize/proximal/Proximal.scala
Scala
apache-2.0
6,768
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.models import org.apache.avro.generic.IndexedRecord import org.bdgenomics.formats.avro.{ AlignmentRecord, NucleotideContigFragment, Contig } import org.bdgenomics.adam.rdd.ADAMContext._ import htsjdk.samtools.{ SamReader, SAMFileHeader, SAMSequenceRecord, SAMSequenceDictionary } import scala.collection._ /** * SequenceDictionary contains the (bijective) map between Ints (the referenceId) and Strings (the referenceName) * from the header of a BAM file, or the combined result of multiple such SequenceDictionaries. */ object SequenceDictionary { def apply(): SequenceDictionary = new SequenceDictionary() def apply(records: SequenceRecord*): SequenceDictionary = new SequenceDictionary(records.toVector) def apply(dict: SAMSequenceDictionary): SequenceDictionary = { new SequenceDictionary(dict.getSequences.map(SequenceRecord.fromSAMSequenceRecord).toVector) } def apply(header: SAMFileHeader): SequenceDictionary = SequenceDictionary(header.getSequenceDictionary) def apply(reader: SamReader): SequenceDictionary = SequenceDictionary(reader.getFileHeader) def toSAMSequenceDictionary(dictionary: SequenceDictionary): SAMSequenceDictionary = { new SAMSequenceDictionary(dictionary.records.map(SequenceRecord.toSAMSequenceRecord).toList) } /** * Extracts a SAM sequence dictionary from a SAM file header and returns an * ADAM sequence dictionary. * * @see fromSAMSequenceDictionary * * @param header SAM file header. * @return Returns an ADAM style sequence dictionary. */ def fromSAMHeader(header: SAMFileHeader): SequenceDictionary = { val samDict = header.getSequenceDictionary fromSAMSequenceDictionary(samDict) } /** * Converts a picard/samtools SAMSequenceDictionary into an ADAM sequence dictionary. * * @see fromSAMHeader * @see fromVCFHeader * * @param samDict SAM style sequence dictionary. * @return Returns an ADAM style sequence dictionary. */ def fromSAMSequenceDictionary(samDict: SAMSequenceDictionary): SequenceDictionary = { val samDictRecords: List[SAMSequenceRecord] = samDict.getSequences new SequenceDictionary(samDictRecords.map(SequenceRecord.fromSAMSequenceRecord).toVector) } def fromSAMReader(samReader: SamReader): SequenceDictionary = fromSAMHeader(samReader.getFileHeader) } class SequenceDictionary(val records: Vector[SequenceRecord]) extends Serializable { def this() = this(Vector.empty[SequenceRecord]) private val byName: Map[String, SequenceRecord] = records.view.map(r => r.name -> r).toMap assert(byName.size == records.length, "SequenceRecords with duplicate names aren't permitted") def isCompatibleWith(that: SequenceDictionary): Boolean = { for (record <- that.records) { val myRecord = byName.get(record.name) if (myRecord.isDefined && myRecord.get != record) return false } true } def apply(name: String): Option[SequenceRecord] = byName.get(name) def containsRefName(name: String): Boolean = byName.containsKey(name) def +(record: SequenceRecord): SequenceDictionary = this ++ SequenceDictionary(record) def ++(that: SequenceDictionary): SequenceDictionary = { new SequenceDictionary(records ++ that.records.filter(r => !byName.contains(r.name))) } override def hashCode = records.hashCode() override def equals(o: Any) = o match { case that: SequenceDictionary => records.equals(that.records) case _ => false } /** * Converts this ADAM style sequence dictionary into a SAM style sequence dictionary. * * @return Returns a SAM formatted sequence dictionary. */ def toSAMSequenceDictionary: SAMSequenceDictionary = { import SequenceRecord._ new SAMSequenceDictionary(records.sorted.map(_ toSAMSequenceRecord).toList) } override def toString: String = { records.map(_.toString).fold("SequenceDictionary{")(_ + "\\n" + _) + "}" } } object SequenceOrdering extends Ordering[SequenceRecord] { def compare(a: SequenceRecord, b: SequenceRecord): Int = { a.name.compareTo(b.name) } } /** * Utility class within the SequenceDictionary; represents unique reference name-to-id correspondence * */ class SequenceRecord( val name: String, val length: Long, val url: Option[String] = None, val md5: Option[String] = None, val refseq: Option[String] = None, val genbank: Option[String] = None, val assembly: Option[String] = None, val species: Option[String] = None) extends Serializable { assert(name != null && !name.isEmpty, "SequenceRecord.name is null or empty") assert(length > 0, "SequenceRecord.length <= 0") override def toString: String = "%s->%s".format(name, length) /** * Converts this sequence record into a SAM sequence record. * * @return A SAM formatted sequence record. */ def toSAMSequenceRecord: SAMSequenceRecord = { val rec = new SAMSequenceRecord(name.toString, length.toInt) // set md5 if available md5.foreach(s => rec.setAttribute(SAMSequenceRecord.MD5_TAG, s.toUpperCase)) // set URL if available url.foreach(rec.setAttribute(SAMSequenceRecord.URI_TAG, _)) // set species if available species.foreach(rec.setAttribute(SAMSequenceRecord.SPECIES_TAG, _)) // set assembly if available assembly.foreach(rec.setAssembly) // set refseq accession number if available refseq.foreach(rec.setAttribute("REFSEQ", _)) // set genbank accession number if available genbank.foreach(rec.setAttribute("GENBANK", _)) // return record rec } override def equals(o: Any): Boolean = o match { case that: SequenceRecord => name == that.name && length == that.length && optionEq(md5, that.md5) && optionEq(url, that.url) case _ => false } // No md5/url is "equal" to any md5/url in this setting private def optionEq(o1: Option[String], o2: Option[String]) = (o1, o2) match { case (Some(c1), Some(c2)) => c1 == c2 case _ => true } } object SequenceRecord { val REFSEQ_TAG = "REFSEQ" val GENBANK_TAG = "GENBANK" implicit def ordering = SequenceOrdering def apply(name: String, length: Long, md5: String = null, url: String = null, refseq: String = null, genbank: String = null, assembly: String = null, species: String = null): SequenceRecord = { new SequenceRecord( name, length, Option(url).map(_.toString), Option(md5).map(_.toString), Option(refseq).map(_.toString), Option(genbank).map(_.toString), Option(assembly).map(_.toString), Option(species).map(_.toString)) } /* * Generates a sequence record from a SAMSequence record. * * @param seqRecord SAM Sequence record input. * @return A new ADAM sequence record. */ def fromSAMSequenceRecord(record: SAMSequenceRecord): SequenceRecord = { SequenceRecord( record.getSequenceName, record.getSequenceLength, md5 = record.getAttribute(SAMSequenceRecord.MD5_TAG), url = record.getAttribute(SAMSequenceRecord.URI_TAG), refseq = record.getAttribute(REFSEQ_TAG), genbank = record.getAttribute(GENBANK_TAG), assembly = record.getAssembly, species = record.getAttribute(SAMSequenceRecord.SPECIES_TAG)) } def toSAMSequenceRecord(record: SequenceRecord): SAMSequenceRecord = { val sam = new SAMSequenceRecord(record.name, record.length.toInt) record.md5.foreach(v => sam.setAttribute(SAMSequenceRecord.MD5_TAG, v.toString)) record.url.foreach(v => sam.setAttribute(SAMSequenceRecord.URI_TAG, v.toString)) sam } def fromADAMContig(contig: Contig): SequenceRecord = { SequenceRecord( contig.getContigName.toString, contig.getContigLength, md5 = contig.getContigName, url = contig.getReferenceURL, assembly = contig.getAssembly, species = contig.getSpecies) } def toADAMContig(record: SequenceRecord): Contig = { val builder = Contig.newBuilder() .setContigName(record.name) .setContigLength(record.length) record.md5.foreach(builder.setContigMD5) record.url.foreach(builder.setReferenceURL) record.assembly.foreach(builder.setAssembly) record.species.foreach(builder.setSpecies) builder.build } def fromADAMContigFragment(fragment: NucleotideContigFragment): SequenceRecord = { fromADAMContig(fragment.getContig) } /** * Convert an Read into one or more SequenceRecords. * The reason that we can't simply use the "fromSpecificRecord" method, below, is that each Read * can (through the fact that it could be a pair of reads) contain 1 or 2 possible SequenceRecord entries * for the SequenceDictionary itself. Both have to be extracted, separately. * * @param rec The Read from which to extract the SequenceRecord entries * @return a list of all SequenceRecord entries derivable from this record. */ def fromADAMRecord(rec: AlignmentRecord): Set[SequenceRecord] = { assert(rec != null, "Read was null") if (rec.getContig != null || rec.getMateContig != null) { // The contig should be null for unmapped read List(Option(rec.getContig), Option(rec.getMateContig)) .flatten .map(fromADAMContig) .toSet } else Set() } def fromSpecificRecord(rec: IndexedRecord): SequenceRecord = { val schema = rec.getSchema if (schema.getField("referenceId") != null) { SequenceRecord( rec.get(schema.getField("referenceName").pos()).toString, rec.get(schema.getField("referenceLength").pos()).asInstanceOf[Long], url = rec.get(schema.getField("referenceUrl").pos()).toString) } else if (schema.getField("contig") != null) { val pos = schema.getField("contig").pos() fromADAMContig(rec.get(pos).asInstanceOf[Contig]) } else { throw new AssertionError("Missing information to generate SequenceRecord") } } }
VinACE/adam
adam-core/src/main/scala/org/bdgenomics/adam/models/SequenceDictionary.scala
Scala
apache-2.0
10,841
package org.eigengo.scalalp.streams import java.util.zip.{ZipEntry, ZipFile} import java.io.{InputStream, File} class ZipArchive(file: File) { val zipFile = new ZipFile(file, ZipFile.OPEN_READ) /** * Apply ``operation`` to every entry in the zip file, collecting the result if the * operation is defined, skipping it if it is not defined * * @param operation the operation to apply to every entry * @tparam B the return type of the operation * @return all ``x``s for which ``operation`` returns ``Some(x: B)`` */ def flatMap[B](operation: (ZipEntry, InputStream) => Option[B]): List[B] = { import scala.collection.JavaConversions._ val entries = zipFile.entries().toList entries.flatMap { entry => val is = zipFile.getInputStream(entry) val result = operation(entry, is) is.close() result } } }
eigengo/scala-launchpad
src/main/scala/org/eigengo/scalalp/streams/ZipArchive.scala
Scala
apache-2.0
869
/* * Copyright 2015 leon chen * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.moilioncircle.jsonpath /** * Created by leon on 15-6-21. */ abstract class JSONType case class JSONObject(obj: Map[String, _]) extends JSONType case class JSONArray(list: List[_]) extends JSONType import scala.annotation.switch object JSONParser { def apply(json: String): JSONParser = new JSONParser(json.iterator) def apply(json: Iterator[Char]): JSONParser = new JSONParser(json) } class JSONParser(it:Iterator[Char]) { private var column: Int = 0 private var row: Int = 0 private var backChar: Option[(Char, Int, Int)] = None private val sb: StringBuilder = new StringBuilder def parse() = { try { next() match { case '{' => JSONObject(parseObject()) case '[' => JSONArray(parseArray()) case e => throw JSONSyntaxException(s"excepted ['[' , '{'] but '$e' at row $row,column $column") } } catch { case e: NoSuchElementException => throw JSONSyntaxException(s"excepted a char but stream ended at row $row,column $column") } } private def parseObject(): Map[String, Any] = { var map = Map.newBuilder[String, Any] next() match { case '}' => map.result() case c => map += (c match { case '"' => val key = parseString() next() match { case ':' => (key, parseValue(next())) case e => throw JSONSyntaxException(s"excepted ':' but '$e' at row $row,column $column") } case e => throw JSONSyntaxException(s"excepted string but '$e' at row $row,column $column") }) next() match { case ',' => var ch = ',' while (ch == ',') { map += (next() match { case '"' => val key = parseString() next() match { case ':' => (key, parseValue(next())) case e => throw JSONSyntaxException(s"excepted ':' but '$e' at row $row,column $column") } case e => throw JSONSyntaxException(s"excepted string but '$e' at row $row,column $column") }) ch = next() } ch match { case '}' => map.result() case e => throw JSONSyntaxException(s"excepted '}' but '$e' at row $row,column $column") } case '}' => map.result() case e => throw JSONSyntaxException(s"excepted [',' , '}'] but '$e' at row $row,column $column") } } } private def parseArray(): List[Any] = { var list = List.newBuilder[Any] next() match { case ']' => list.result() case ch => list += parseValue(ch) next() match { case ',' => var ch = ',' while (ch == ',') { list += parseValue(next()) ch = next() } ch match { case ']' => list.result() case e => throw JSONSyntaxException(s"excepted ']' but '$e' at row $row,column $column") } case ']' => list.result() case e => throw JSONSyntaxException(s"excepted [',' , ']'] but '$e' at row $row,column $column") } } } private def parseValue(ch: Char): Any = { (ch: @switch) match { case '"' => parseString() case n@('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '-') => scala.BigDecimal(parseNumber(n)) case 't' => parseTrue(ch) true case 'f' => parseFalse(ch) false case 'n' => parseNull(ch) null case '{' => JSONObject(parseObject()) case '[' => JSONArray(parseArray()) case e => throw JSONSyntaxException(s"excepted [string , number , null , true , false , jsonObject , jsonArray] but '$e' at row $row,column $column") } } @inline private def parseNull(ch: Char): Unit = { ch match { case 'n' => nextChar() match { case 'u' => nextChar() match { case 'l' => nextChar() match { case 'l' => case e => throw JSONLexerException(s"excepted null but '$e' at row $row,column $column") } case e => throw JSONLexerException(s"excepted null but $e at row $row,column $column") } case e => throw JSONLexerException(s"excepted null but $e at row $row,column $column") } } } @inline private def parseFalse(ch: Char): Unit = { ch match { case 'f' => nextChar() match { case 'a' => nextChar() match { case 'l' => nextChar() match { case 's' => nextChar() match { case 'e' => case e => throw JSONLexerException(s"excepted false but '$e' at row $row,column $column") } case e => throw JSONLexerException(s"excepted false but '$e' at row $row,column $column") } case e => throw JSONLexerException(s"excepted false but '$e' at row $row,column $column") } case e => throw JSONLexerException(s"excepted false but '$e' at row $row,column $column") } } } @inline private def parseTrue(ch: Char): Unit = { ch match { case 't' => nextChar() match { case 'r' => nextChar() match { case 'u' => nextChar() match { case 'e' => case e => throw JSONLexerException(s"excepted true but '$e' at row $row,column $column") } case e => throw JSONLexerException(s"excepted true but '$e' at row $row,column $column") } case e => throw JSONLexerException(s"excepted true but '$e' at row $row,column $column") } } } @inline private def parseString(): String = { sb.setLength(0) var next = nextChar() while (next != '"') { next match { case '\\' => next = nextChar() (next : @switch) match { case '"' => sb.append('\"') next = nextChar() case '\\' => sb.append('\\') next = nextChar() case '/' => sb.append('/') next = nextChar() case 'b' => sb.append('\b') next = nextChar() case 'f' => sb.append('\f') next = nextChar() case 'F' => sb.append('\f') next = nextChar() case 'n' => sb.append('\n') next = nextChar() case 'r' => sb.append('\r') next = nextChar() case 't' => sb.append('\t') next = nextChar() case 'u' => val s = Integer.valueOf(new String(Array(nextChar(), nextChar(), nextChar(), nextChar())), 16).toChar sb.append(s) next = nextChar() case e => sb.append('\\') sb.append(e) next = nextChar() } case e => sb.append(next) next = nextChar() } } sb.toString() } @inline private def parseNumber(ch: Char): String = { sb.setLength(0) var next = ch next match { case '-' => sb.append('-') next = nextChar() case _ => } next match { case '0' => sb.append('0') next = nextChar() case ch if ch > '0' && ch <= '9' => sb.append(next) next = nextChar() while (parseDigit(next, sb)) { next = nextChar() } } next match { case '.' => sb.append(next) next = nextChar() while (parseDigit(next, sb)) { next = nextChar() } case _ => } if (next == 'e' || next == 'E') { sb.append(next) next = nextChar() if (next == '+' || next == '-') { sb.append(next) next = nextChar() } while (parseDigit(next, sb)) { next = nextChar() } } back(next) sb.toString } @inline private def parseDigit(c: Char, sb: StringBuilder): Boolean = { c match { case c if c >= '0' && c <= '9' => sb.append(c) true case e => false } } @inline private def nextChar(): Char = { if (backChar.nonEmpty) { column = backChar.get._2 row = backChar.get._3 val c = backChar.get._1 backChar = None c } else { column += 1 it.next() } } @inline private def next(): Char = { if (backChar.nonEmpty) { column = backChar.get._2 row = backChar.get._3 val c = backChar.get._1 backChar = None c } else { var c = it.next() while (ignoreLetter(c)) { c = it.next() } column += 1 c } } @inline private def ignoreLetter(c: Char): Boolean = { (c: @switch) match { case ' ' => column += 1 true case '\r' => column += 1 true case '\n' => row += 1 column = 0 true case '\t' => column += 4 true case _ => false } } @inline private def back(char: Char): Unit = { var c = char while (ignoreLetter(c)) { c = it.next() } column += 1 backChar = Some((c, column, row)) } }
leonchen83/jsonpath
src/main/scala/com/moilioncircle/jsonpath/JSONParser.scala
Scala
apache-2.0
10,017
package molecule import sbt._ object Dependencies { object Test { type MM = String => ModuleID // Sort by artifact ID. lazy val junit = "junit" % "junit" % "4.11" lazy val specs: MM = sv => "org.scala-tools.testing" % "specs" % specsVersion(sv) cross specsCross //lazy val specs2: MM = sv => "org.specs2" %% "specs2" % specs2Version(sv) lazy val specs2: MM = sv => "org.specs2" % "specs2_2.9.2" % specs2Version(sv) lazy val scalatest: MM = sv => "org.scalatest" %% "scalatest" % scalatestVersion(sv) % "test" private val scalatestVersion: String => String = { case sv if sv startsWith "2.8." => "1.8" case sv if sv startsWith "2.9." => "1.9.1" // 1.9.2 case sv if sv startsWith "2.10" => "1.9.1" // 2.2.4 case _ => "2.2.4" } private val specsCross = CrossVersion.binaryMapped { case "2.8.2" => "2.8.1" // _2.8.2 published with bad checksum case "2.9.2" => "2.9.1" case "2.10.0" => "2.10" // sbt bug? case bin => bin } private val specsVersion: String => String = { case sv if sv startsWith "2.8." => "1.6.8" case "2.9.0-1" => "1.6.8" case _ => "1.6.9" } private val specs2Version: String => String = { case sv if sv startsWith "2.8." => "1.5" case "2.9.0-1" => "1.8.2" case sv if sv startsWith "2.9." => "1.12.3" case _ => "1.13" } } object Compilation { import molecule.Compiler.Keys._ // Compiler plugins val genjavadoc = compilerPlugin("com.typesafe.genjavadoc" %% "genjavadoc-plugin" % "0.3" cross CrossVersion.full) // ApacheV2 lazy val mbench = "com.github.sbocq" %% "mbench" % "0.2.4" lazy val scalaActors: Test.MM = { case versionXYZ("2","10", _) => "org.scala-lang" % "scala-actors" % "2.10.3" case _ => "org.scala-lang" % "scala-actors" % "2.11.5" } } }
molecule-labs/molecule
project/Dependencies.scala
Scala
apache-2.0
1,880