code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.jetbrains.plugins.scala.codeInspection.bundled
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.psi.PsiElement
/**
* User: Dmitry.Naydanov
* Date: 03.10.16.
*/
abstract class BundledInspectionBase {
final def getId: String = this.getClass.getName
def getName: String
def getDescription: String
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any]
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/bundled/BundledInspectionBase.scala | Scala | apache-2.0 | 425 |
package com.collective.modelmatrix
import java.sql.Timestamp
import java.time.Instant
import org.apache.spark.sql.types._
import scodec.bits.ByteVector
import slick.driver.PostgresDriver.api._
package object catalog {
implicit val instantColumnType =
MappedColumnType.base[Instant, java.sql.Timestamp](
instant => Timestamp.from(instant),
_.toInstant
)
implicit val dataTypeColumnType =
MappedColumnType.base[DataType, String]({
case ShortType => "short"
case IntegerType => "integer"
case LongType => "long"
case DoubleType => "double"
case StringType => "string"
}, {
case "short" => ShortType
case "integer" => IntegerType
case "long" => LongType
case "double" => DoubleType
case "string" => StringType
})
implicit val byteVectorColumnType =
MappedColumnType.base[ByteVector, Array[Byte]](
_.toArray,
ByteVector.apply
)
}
| codeaudit/modelmatrix | modelmatrix-core/src/main/scala/com/collective/modelmatrix/catalog/package.scala | Scala | apache-2.0 | 949 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils.zero90
import java.nio.ByteBuffer
import kafka.common.{KafkaException, OffsetAndMetadata}
import kafka.coordinator.group.{BaseKey, GroupMetadataKey, GroupTopicPartition, OffsetKey}
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.types.Type._
import org.apache.kafka.common.protocol.types.{ArrayOf, Field, Schema, Struct}
import org.apache.kafka.common.requests.DescribeGroupsResponse
import scala.collection.Map
/*
Borrowed from kafka 0.9.0.0 GroupMetadataManager
*/
object GroupMetadataManager {
private val CURRENT_OFFSET_KEY_SCHEMA_VERSION = 1.toShort
private val CURRENT_GROUP_KEY_SCHEMA_VERSION = 2.toShort
private val OFFSET_COMMIT_KEY_SCHEMA = new Schema(new Field("group", STRING),
new Field("topic", STRING),
new Field("partition", INT32))
private val OFFSET_KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("group")
private val OFFSET_KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("topic")
private val OFFSET_KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("partition")
private val OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata")
private val OFFSET_VALUE_TIMESTAMP_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp")
private val OFFSET_COMMIT_VALUE_SCHEMA_V1 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("commit_timestamp", INT64),
new Field("expire_timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("metadata")
private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp")
private val OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("expire_timestamp")
private val GROUP_METADATA_KEY_SCHEMA = new Schema(new Field("group", STRING))
private val GROUP_KEY_GROUP_FIELD = GROUP_METADATA_KEY_SCHEMA.get("group")
private val MEMBER_METADATA_V0 = new Schema(
new Field("member_id", STRING),
new Field("client_id", STRING),
new Field("client_host", STRING),
new Field("session_timeout", INT32),
new Field("subscription", BYTES),
new Field("assignment", BYTES))
private val MEMBER_METADATA_V1 = new Schema(
new Field("member_id", STRING),
new Field("client_id", STRING),
new Field("client_host", STRING),
new Field("session_timeout", INT32),
new Field("rebalance_timeout", INT32),
new Field("subscription", BYTES),
new Field("assignment", BYTES))
private val MEMBER_METADATA_MEMBER_ID_V0 = MEMBER_METADATA_V0.get("member_id")
private val MEMBER_METADATA_CLIENT_ID_V0 = MEMBER_METADATA_V0.get("client_id")
private val MEMBER_METADATA_CLIENT_HOST_V0 = MEMBER_METADATA_V0.get("client_host")
private val MEMBER_METADATA_SESSION_TIMEOUT_V0 = MEMBER_METADATA_V0.get("session_timeout")
private val MEMBER_METADATA_SUBSCRIPTION_V0 = MEMBER_METADATA_V0.get("subscription")
private val MEMBER_METADATA_ASSIGNMENT_V0 = MEMBER_METADATA_V0.get("assignment")
private val MEMBER_METADATA_MEMBER_ID_V1 = MEMBER_METADATA_V1.get("member_id")
private val MEMBER_METADATA_CLIENT_ID_V1 = MEMBER_METADATA_V1.get("client_id")
private val MEMBER_METADATA_CLIENT_HOST_V1 = MEMBER_METADATA_V1.get("client_host")
private val MEMBER_METADATA_SESSION_TIMEOUT_V1 = MEMBER_METADATA_V1.get("session_timeout")
private val MEMBER_METADATA_REBALANCE_TIMEOUT_V1 = MEMBER_METADATA_V1.get("rebalance_timeout")
private val MEMBER_METADATA_SUBSCRIPTION_V1 = MEMBER_METADATA_V1.get("subscription")
private val MEMBER_METADATA_ASSIGNMENT_V1 = MEMBER_METADATA_V1.get("assignment")
private val GROUP_METADATA_VALUE_SCHEMA_V0 = new Schema(
new Field("protocol_type", STRING),
new Field("generation", INT32),
new Field("protocol", STRING),
new Field("leader", STRING),
new Field("members", new ArrayOf(MEMBER_METADATA_V0)))
private val GROUP_METADATA_VALUE_SCHEMA_V1 = new Schema(
new Field("protocol_type", STRING),
new Field("generation", INT32),
new Field("protocol", NULLABLE_STRING),
new Field("leader", NULLABLE_STRING),
new Field("members", new ArrayOf(MEMBER_METADATA_V1)))
private val GROUP_METADATA_PROTOCOL_TYPE_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("protocol_type")
private val GROUP_METADATA_GENERATION_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("generation")
private val GROUP_METADATA_PROTOCOL_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("protocol")
private val GROUP_METADATA_LEADER_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("leader")
private val GROUP_METADATA_MEMBERS_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("members")
private val GROUP_METADATA_PROTOCOL_TYPE_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("protocol_type")
private val GROUP_METADATA_GENERATION_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("generation")
private val GROUP_METADATA_PROTOCOL_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("protocol")
private val GROUP_METADATA_LEADER_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("leader")
private val GROUP_METADATA_MEMBERS_V1 = GROUP_METADATA_VALUE_SCHEMA_V1.get("members")
// map of versions to key schemas as data types
private val MESSAGE_TYPE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_KEY_SCHEMA,
1 -> OFFSET_COMMIT_KEY_SCHEMA,
2 -> GROUP_METADATA_KEY_SCHEMA)
// map of version of offset value schemas
private val OFFSET_VALUE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_VALUE_SCHEMA_V0,
1 -> OFFSET_COMMIT_VALUE_SCHEMA_V1)
private val CURRENT_OFFSET_VALUE_SCHEMA_VERSION = 1.toShort
// map of version of group metadata value schemas
private val GROUP_VALUE_SCHEMAS = Map(0 -> GROUP_METADATA_VALUE_SCHEMA_V0,1 -> GROUP_METADATA_VALUE_SCHEMA_V1)
private val CURRENT_GROUP_VALUE_SCHEMA_VERSION = 0.toShort
private val CURRENT_OFFSET_KEY_SCHEMA = schemaForKey(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
private val CURRENT_GROUP_KEY_SCHEMA = schemaForKey(CURRENT_GROUP_KEY_SCHEMA_VERSION)
private val CURRENT_OFFSET_VALUE_SCHEMA = schemaForOffset(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
private val CURRENT_GROUP_VALUE_SCHEMA = schemaForGroup(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
private def schemaForKey(version: Int) = {
val schemaOpt = MESSAGE_TYPE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForOffset(version: Int) = {
val schemaOpt = OFFSET_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForGroup(version: Int) = {
val schemaOpt = GROUP_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown group metadata version " + version)
}
}
/**
* Generates the key for offset commit message for given (group, topic, partition)
*
* @return key for offset commit message
*/
private def offsetCommitKey(group: String, topic: String, partition: Int, versionId: Short = 0): Array[Byte] = {
val key = new Struct(CURRENT_OFFSET_KEY_SCHEMA)
key.set(OFFSET_KEY_GROUP_FIELD, group)
key.set(OFFSET_KEY_TOPIC_FIELD, topic)
key.set(OFFSET_KEY_PARTITION_FIELD, partition)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the key for group metadata message for given group
*
* @return key bytes for group metadata message
*/
private def groupMetadataKey(group: String): Array[Byte] = {
val key = new Struct(CURRENT_GROUP_KEY_SCHEMA)
key.set(GROUP_KEY_GROUP_FIELD, group)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_GROUP_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
* @return payload for offset commit message
*/
private def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
// generate commit value with schema version 1
val value = new Struct(CURRENT_OFFSET_VALUE_SCHEMA)
value.set(OFFSET_VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
value.set(OFFSET_VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
value.set(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1, offsetAndMetadata.expireTimestamp)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Decodes the offset messages' key
*
* @param buffer input byte-buffer
* @return an GroupTopicPartition object
*/
def readMessageKey(buffer: ByteBuffer): BaseKey = {
val version = buffer.getShort
val keySchema = schemaForKey(version)
val key = keySchema.read(buffer).asInstanceOf[Struct]
if (version <= CURRENT_OFFSET_KEY_SCHEMA_VERSION) {
// version 0 and 1 refer to offset
val group = key.get(OFFSET_KEY_GROUP_FIELD).asInstanceOf[String]
val topic = key.get(OFFSET_KEY_TOPIC_FIELD).asInstanceOf[String]
val partition = key.get(OFFSET_KEY_PARTITION_FIELD).asInstanceOf[Int]
OffsetKey(version, GroupTopicPartition(group, new TopicPartition(topic, partition)))
} else if (version == CURRENT_GROUP_KEY_SCHEMA_VERSION) {
// version 2 refers to offset
val group = key.get(GROUP_KEY_GROUP_FIELD).asInstanceOf[String]
GroupMetadataKey(version, group)
} else {
throw new IllegalStateException("Unknown version " + version + " for group metadata message")
}
}
/**
* Decodes the offset messages' payload and retrieves offset and metadata from it
*
* @param buffer input byte-buffer
* @return an offset-metadata object from the message
*/
def readOffsetMessageValue(buffer: ByteBuffer): OffsetAndMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForOffset(version)
val value = valueSchema.read(buffer).asInstanceOf[Struct]
if (version == 0) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V0).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V0).asInstanceOf[String]
val timestamp = value.get(OFFSET_VALUE_TIMESTAMP_FIELD_V0).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, timestamp)
} else if (version == 1) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V1).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V1).asInstanceOf[String]
val commitTimestamp = value.get(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
val expireTimestamp = value.get(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
} else {
throw new IllegalStateException("Unknown offset message version")
}
}
}
/**
* Decodes the group metadata messages' payload and retrieves its member metadatafrom it
*
* @param buffer input byte-buffer
* @return a group metadata object from the message
*/
def readGroupMessageValue(groupId: String, buffer: ByteBuffer): GroupMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForGroup(version)
val value = valueSchema.read(buffer).asInstanceOf[Struct]
if (version == 0) {
val protocolType = value.get(GROUP_METADATA_PROTOCOL_TYPE_V0).asInstanceOf[String]
val generationId = value.get(GROUP_METADATA_GENERATION_V0).asInstanceOf[Int]
val leaderId = value.get(GROUP_METADATA_LEADER_V0).asInstanceOf[String]
val protocol = value.get(GROUP_METADATA_PROTOCOL_V0).asInstanceOf[String]
val group = new GroupMetadata(groupId, protocolType, generationId, leaderId, protocol)
value.getArray(GROUP_METADATA_MEMBERS_V0).foreach {
case memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
val memberId = memberMetadata.get(MEMBER_METADATA_MEMBER_ID_V0).asInstanceOf[String]
val clientId = memberMetadata.get(MEMBER_METADATA_CLIENT_ID_V0).asInstanceOf[String]
val clientHost = memberMetadata.get(MEMBER_METADATA_CLIENT_HOST_V0).asInstanceOf[String]
val subscription = ConsumerProtocol.deserializeSubscription(memberMetadata.get(MEMBER_METADATA_SUBSCRIPTION_V0).asInstanceOf[ByteBuffer])
val assignment = ConsumerProtocol.deserializeAssignment(memberMetadata.get(MEMBER_METADATA_ASSIGNMENT_V0).asInstanceOf[ByteBuffer])
import collection.JavaConverters._
val member = new MemberMetadata(
memberId
, groupId
, clientId
, clientHost
//, sessionTimeout
, List((group.protocol, subscription.topics().asScala.toSet))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
)
group.add(memberId, member)
}
group
} else if (version == 1){
val protocolType = value.get(GROUP_METADATA_PROTOCOL_TYPE_V1).asInstanceOf[String]
val generationId = value.get(GROUP_METADATA_GENERATION_V1).asInstanceOf[Int]
val leaderId = value.get(GROUP_METADATA_LEADER_V1).asInstanceOf[String]
val protocol = value.get(GROUP_METADATA_PROTOCOL_V1).asInstanceOf[String]
val group = new GroupMetadata(groupId, protocolType, generationId, leaderId, protocol)
value.getArray(GROUP_METADATA_MEMBERS_V1).foreach {
case memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
val memberId = memberMetadata.get(MEMBER_METADATA_MEMBER_ID_V1).asInstanceOf[String]
val clientId = memberMetadata.get(MEMBER_METADATA_CLIENT_ID_V1).asInstanceOf[String]
val clientHost = memberMetadata.get(MEMBER_METADATA_CLIENT_HOST_V1).asInstanceOf[String]
//val sessionTimeout = memberMetadata.get(MEMBER_METADATA_SESSION_TIMEOUT_V0).asInstanceOf[Int]
val subscription = ConsumerProtocol.deserializeSubscription(memberMetadata.get(MEMBER_METADATA_SUBSCRIPTION_V1).asInstanceOf[ByteBuffer])
val assignment = ConsumerProtocol.deserializeAssignment(memberMetadata.get(MEMBER_METADATA_ASSIGNMENT_V1).asInstanceOf[ByteBuffer])
import collection.JavaConverters._
val member = new MemberMetadata(
memberId
, groupId
, clientId
, clientHost
//, sessionTimeout
, List((group.protocol, subscription.topics().asScala.toSet))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
)
group.add(memberId, member)
}
group
} else {
throw new IllegalStateException("Unknown group metadata message version")
}
}
}
}
case class GroupMetadata(groupId: String
, protocolType: String
, generationId: Int
, leaderId: String
, protocol: String
) {
private val members = new collection.mutable.HashMap[String, MemberMetadata]
def isEmpty = members.isEmpty
def allMemberMetadata = members.values.toList
def add(memberId: String, member: MemberMetadata) {
assert(supportsProtocols(member.protocols))
members.put(memberId, member)
}
private def candidateProtocols = {
// get the set of protocols that are commonly supported by all members
allMemberMetadata
.map(_.protocols)
.reduceLeft((commonProtocols, protocols) => commonProtocols & protocols)
}
def supportsProtocols(memberProtocols: Set[String]) = {
isEmpty || (memberProtocols & candidateProtocols).nonEmpty
}
}
object MemberMetadata {
import collection.JavaConverters._
def from(groupId: String, groupSummary: DescribeGroupsResponse.GroupMetadata, memberSummary: DescribeGroupsResponse.GroupMember) : MemberMetadata = {
val subscription = ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberSummary.memberMetadata().array()))
val assignment = ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(memberSummary.memberAssignment().array()))
MemberMetadata(memberSummary.memberId
, groupId, memberSummary.clientId
, memberSummary. clientHost
//, -1
, List((groupSummary.protocol, subscription.topics().asScala.toSet))
, assignment.partitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
)
}
}
case class MemberMetadata(memberId: String
, groupId: String
, clientId: String
, clientHost: String
//, sessionTimeoutMs: Int
, supportedProtocols: List[(String, Set[String])]
, assignment: Set[(String, Int)]
) {
def protocols = supportedProtocols.map(_._1).toSet
}
| krux/kafka-manager | app/kafka/manager/utils/zero90/GroupMetadataManager.scala | Scala | apache-2.0 | 18,939 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.annotation.Experimental
import org.apache.flink.configuration.ConfigOption
import org.apache.flink.configuration.ConfigOptions.key
import org.apache.flink.table.planner.calcite.FlinkContext
import org.apache.flink.table.planner.plan.logical.{LogicalWindow, SlidingGroupWindow, TumblingGroupWindow}
import org.apache.flink.table.planner.plan.nodes.calcite.{Expand, Rank, WindowAggregate}
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.stats.ValueInterval
import org.apache.flink.table.planner.plan.utils.AggregateUtil.{hasTimeIntervalType, toLong}
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, SortUtil}
import org.apache.calcite.adapter.enumerable.EnumerableLimit
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata._
import org.apache.calcite.rel.{RelNode, SingleRel}
import org.apache.calcite.rex.{RexLiteral, RexNode}
import org.apache.calcite.util._
import java.lang.{Double => JDouble, Long => JLong}
import scala.collection.JavaConversions._
/**
* FlinkRelMdRowCount supplies a implementation of
* [[RelMetadataQuery#getRowCount]] for the standard logical algebra.
*/
class FlinkRelMdRowCount private extends MetadataHandler[BuiltInMetadata.RowCount] {
def getDef: MetadataDef[BuiltInMetadata.RowCount] = BuiltInMetadata.RowCount.DEF
def getRowCount(rel: TableScan, mq: RelMetadataQuery): JDouble = rel.estimateRowCount(mq)
def getRowCount(rel: Values, mq: RelMetadataQuery): JDouble = rel.estimateRowCount(mq)
def getRowCount(rel: Project, mq: RelMetadataQuery): JDouble = mq.getRowCount(rel.getInput)
def getRowCount(rel: Filter, mq: RelMetadataQuery): JDouble =
RelMdUtil.estimateFilteredRows(rel.getInput, rel.getCondition, mq)
def getRowCount(rel: Calc, mq: RelMetadataQuery): JDouble =
RelMdUtil.estimateFilteredRows(rel.getInput, rel.getProgram, mq)
def getRowCount(rel: Expand, mq: RelMetadataQuery): JDouble = rel.estimateRowCount(mq)
def getRowCount(rel: Exchange, mq: RelMetadataQuery): JDouble = rel.estimateRowCount(mq)
def getRowCount(rel: Rank, mq: RelMetadataQuery): JDouble = rel.estimateRowCount(mq)
def getRowCount(rel: Sort, mq: RelMetadataQuery): JDouble = {
getRowCountOfSort(rel, rel.offset, rel.fetch, mq)
}
def getRowCount(rel: EnumerableLimit, mq: RelMetadataQuery): JDouble = {
getRowCountOfSort(rel, rel.offset, rel.fetch, mq)
}
private def getRowCountOfSort(
rel: SingleRel,
offset: RexNode,
fetch: RexNode,
mq: RelMetadataQuery): JDouble = {
val inputRowCount = mq.getRowCount(rel.getInput)
if (inputRowCount == null) {
return null
}
val limitStart = SortUtil.getLimitStart(offset)
val rowCount = Math.max(inputRowCount - limitStart, 0D)
if (fetch != null) {
val limit = RexLiteral.intValue(fetch)
if (limit < rowCount) {
return limit.toDouble
}
}
rowCount
}
def getRowCount(rel: Aggregate, mq: RelMetadataQuery): JDouble = {
val (outputRowCnt, _) = getRowCountOfAgg(rel, rel.getGroupSet, rel.getGroupSets.size(), mq)
outputRowCnt
}
/**
* Get output rowCount and input rowCount of agg
*
* @param rel agg relNode
* @param groupSet agg groupSet
* @param groupSetsSize agg groupSets count
* @param mq metadata query
* @return a tuple, the first element is output rowCount, second one is input rowCount
*/
private def getRowCountOfAgg(
rel: SingleRel,
groupSet: ImmutableBitSet,
groupSetsSize: Int,
mq: RelMetadataQuery): (JDouble, JDouble) = {
val input = rel.getInput
val inputRowCount = mq.getRowCount(input)
if (groupSet.cardinality() == 0) {
return (1.0, inputRowCount)
}
// rowCount is the cardinality of the group by columns
val distinctRowCount = mq.getDistinctRowCount(input, groupSet, null)
val groupCount = groupSet.cardinality()
val d: JDouble = if (distinctRowCount == null) {
val ratio = FlinkRelMdUtil.getAggregationRatioIfNdvUnavailable(groupCount)
NumberUtil.multiply(inputRowCount, ratio)
} else {
NumberUtil.min(distinctRowCount, inputRowCount)
}
if (d != null) {
// Grouping sets multiply
(d * groupSetsSize, inputRowCount)
} else {
(null, inputRowCount)
}
}
def getRowCount(rel: BatchExecGroupAggregateBase, mq: RelMetadataQuery): JDouble = {
getRowCountOfBatchExecAgg(rel, mq)
}
private def getRowCountOfBatchExecAgg(rel: SingleRel, mq: RelMetadataQuery): JDouble = {
val input = rel.getInput
val (grouping, isFinal, isMerge) = rel match {
case agg: BatchExecGroupAggregateBase =>
(ImmutableBitSet.of(agg.getGrouping: _*), agg.isFinal, agg.isMerge)
case windowAgg: BatchExecWindowAggregateBase =>
(ImmutableBitSet.of(windowAgg.getGrouping: _*), windowAgg.isFinal, windowAgg.isMerge)
case _ => throw new IllegalArgumentException(s"Unknown aggregate type ${rel.getRelTypeName}!")
}
val ndvOfGroupKeysOnGlobalAgg: JDouble = if (grouping.isEmpty) {
1.0
} else {
// rowCount is the cardinality of the group by columns
val distinctRowCount = mq.getDistinctRowCount(input, grouping, null)
val childRowCount = mq.getRowCount(input)
if (distinctRowCount == null) {
if (isFinal && isMerge) {
// Avoid apply aggregation ratio twice when calculate row count of global agg
// which has local agg.
childRowCount
} else {
val ratio = FlinkRelMdUtil.getAggregationRatioIfNdvUnavailable(grouping.length)
NumberUtil.multiply(childRowCount, ratio)
}
} else {
NumberUtil.min(distinctRowCount, childRowCount)
}
}
if (isFinal) {
ndvOfGroupKeysOnGlobalAgg
} else {
val inputRowCnt = mq.getRowCount(input)
val config = rel.getCluster.getPlanner.getContext.unwrap(classOf[FlinkContext]).getTableConfig
val parallelism = (inputRowCnt /
config.getConfiguration.getLong(
FlinkRelMdRowCount.TABLE_OPTIMIZER_ROWS_PER_LOCALAGG) + 1).toInt
if (parallelism == 1) {
ndvOfGroupKeysOnGlobalAgg
} else if (grouping.isEmpty) {
// output rowcount of local agg is parallelism for agg which has no group keys
parallelism.toDouble
} else {
val distinctRowCount = mq.getDistinctRowCount(input, grouping, null)
if (distinctRowCount == null) {
ndvOfGroupKeysOnGlobalAgg
} else {
FlinkRelMdUtil.getRowCountOfLocalAgg(parallelism, inputRowCnt, ndvOfGroupKeysOnGlobalAgg)
}
}
}
}
def getRowCount(rel: WindowAggregate, mq: RelMetadataQuery): JDouble = {
val (ndvOfGroupKeys, inputRowCount) = getRowCountOfAgg(rel, rel.getGroupSet, 1, mq)
estimateRowCountOfWindowAgg(ndvOfGroupKeys, inputRowCount, rel.getWindow)
}
def getRowCount(rel: BatchExecWindowAggregateBase, mq: RelMetadataQuery): JDouble = {
val ndvOfGroupKeys = getRowCountOfBatchExecAgg(rel, mq)
val inputRowCount = mq.getRowCount(rel.getInput)
estimateRowCountOfWindowAgg(ndvOfGroupKeys, inputRowCount, rel.getWindow)
}
private def estimateRowCountOfWindowAgg(
ndv: JDouble,
inputRowCount: JDouble,
window: LogicalWindow): JDouble = {
if (ndv == null) {
null
} else {
// simply assume expand factor of TumblingWindow/SessionWindow/SlideWindowWithoutOverlap is 2
// SlideWindowWithOverlap is 4.
// Introduce expand factor here to distinguish output rowCount of normal agg with all kinds of
// window aggregates.
val expandFactorOfTumblingWindow = 2D
val expandFactorOfNoOverLapSlidingWindow = 2D
val expandFactorOfOverLapSlidingWindow = 4D
val expandFactorOfSessionWindow = 2D
window match {
case TumblingGroupWindow(_, _, size) if hasTimeIntervalType(size) =>
Math.min(expandFactorOfTumblingWindow * ndv, inputRowCount)
case SlidingGroupWindow(_, _, size, slide) if hasTimeIntervalType(size) =>
val sizeValue = toLong(size)
val slideValue = toLong(slide)
if (sizeValue > slideValue) {
// only slideWindow which has overlap may generates more records than input
expandFactorOfOverLapSlidingWindow * ndv
} else {
Math.min(expandFactorOfNoOverLapSlidingWindow * ndv, inputRowCount)
}
case _ => Math.min(expandFactorOfSessionWindow * ndv, inputRowCount)
}
}
}
def getRowCount(rel: Window, mq: RelMetadataQuery): JDouble = getRowCountOfOverAgg(rel, mq)
def getRowCount(rel: BatchExecOverAggregate, mq: RelMetadataQuery): JDouble =
getRowCountOfOverAgg(rel, mq)
private def getRowCountOfOverAgg(overAgg: SingleRel, mq: RelMetadataQuery): JDouble =
mq.getRowCount(overAgg.getInput)
def getRowCount(join: Join, mq: RelMetadataQuery): JDouble = {
join.getJoinType match {
case JoinRelType.SEMI | JoinRelType.ANTI =>
val semiJoinSelectivity = FlinkRelMdUtil.makeSemiAntiJoinSelectivityRexNode(mq, join)
val selectivity = mq.getSelectivity(join.getLeft, semiJoinSelectivity)
val leftRowCount = mq.getRowCount(join.getLeft)
return NumberUtil.multiply(leftRowCount, selectivity)
case _ => // do nothing
}
val leftChild = join.getLeft
val rightChild = join.getRight
val leftRowCount = mq.getRowCount(leftChild)
val rightRowCount = mq.getRowCount(rightChild)
if (leftRowCount == null || rightRowCount == null) {
return null
}
val joinInfo = JoinInfo.of(leftChild, rightChild, join.getCondition)
if (joinInfo.leftSet().nonEmpty) {
val innerJoinRowCount = getEquiInnerJoinRowCount(join, mq, leftRowCount, rightRowCount)
require(innerJoinRowCount != null)
// Make sure outputRowCount won't be too small based on join type.
join.getJoinType match {
case JoinRelType.INNER => innerJoinRowCount
case JoinRelType.LEFT =>
// All rows from left side should be in the result.
math.max(leftRowCount, innerJoinRowCount)
case JoinRelType.RIGHT =>
// All rows from right side should be in the result.
math.max(rightRowCount, innerJoinRowCount)
case JoinRelType.FULL =>
// T(A FULL JOIN B) = T(A LEFT JOIN B) + T(A RIGHT JOIN B) - T(A INNER JOIN B)
math.max(leftRowCount, innerJoinRowCount) +
math.max(rightRowCount, innerJoinRowCount) - innerJoinRowCount
}
} else {
val rexBuilder = join.getCluster.getRexBuilder
val crossJoin = copyJoinWithNewCondition(join, rexBuilder.makeLiteral(true))
val selectivity = mq.getSelectivity(crossJoin, join.getCondition)
(leftRowCount * rightRowCount) * selectivity
}
}
private def getEquiInnerJoinRowCount(
join: Join,
mq: RelMetadataQuery,
leftRowCount: JDouble,
rightRowCount: JDouble): JDouble = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val leftChild = join.getLeft
val rightChild = join.getRight
val rexBuilder = join.getCluster.getRexBuilder
val condition = join.getCondition
val joinInfo = JoinInfo.of(leftChild, rightChild, condition)
// the leftKeys length equals to rightKeys, so it's ok to only check leftKeys length
require(joinInfo.leftKeys.nonEmpty)
val joinKeyDisjoint = joinInfo.leftKeys.zip(joinInfo.rightKeys).exists {
case (leftKey, rightKey) =>
val leftInterval = fmq.getColumnInterval(leftChild, leftKey)
val rightInterval = fmq.getColumnInterval(rightChild, rightKey)
if (leftInterval != null && rightInterval != null) {
!ValueInterval.isIntersected(leftInterval, rightInterval)
} else {
false
}
}
// One of the join key pairs is disjoint, thus the two sides of join is disjoint.
if (joinKeyDisjoint) {
return 0D
}
val leftKeySet = joinInfo.leftSet()
val rightKeySet = joinInfo.rightSet()
val leftNdv = fmq.getDistinctRowCount(leftChild, leftKeySet, null)
val rightNdv = fmq.getDistinctRowCount(rightChild, rightKeySet, null)
// estimate selectivity of non-equi
val selectivityOfNonEquiPred: JDouble = if (joinInfo.isEqui) {
1D
} else {
val nonEquiPred = joinInfo.getRemaining(rexBuilder)
val equiPred = RelMdUtil.minusPreds(rexBuilder, condition, nonEquiPred)
val joinWithOnlyEquiPred = copyJoinWithNewCondition(join, equiPred)
fmq.getSelectivity(joinWithOnlyEquiPred, nonEquiPred)
}
if (leftNdv != null && rightNdv != null) {
// selectivity of equi part is 1 / Max(leftNdv, rightNdv)
val selectivityOfEquiPred = Math.min(1D, 1D / Math.max(leftNdv, rightNdv))
return leftRowCount * rightRowCount * selectivityOfEquiPred * selectivityOfNonEquiPred
}
val leftKeysAreUnique = fmq.areColumnsUnique(leftChild, leftKeySet)
val rightKeysAreUnique = fmq.areColumnsUnique(rightChild, rightKeySet)
if (leftKeysAreUnique != null && rightKeysAreUnique != null &&
(leftKeysAreUnique || rightKeysAreUnique)) {
val outputRowCount = if (leftKeysAreUnique && rightKeysAreUnique) {
// if both leftKeys and rightKeys are both unique,
// rowCount = Min(leftRowCount) * selectivity of non-equi
Math.min(leftRowCount, rightRowCount) * selectivityOfNonEquiPred
} else if (leftKeysAreUnique) {
rightRowCount * selectivityOfNonEquiPred
} else {
leftRowCount * selectivityOfNonEquiPred
}
return outputRowCount
}
// if joinCondition has no ndv stats and no uniqueKeys stats,
// rowCount = (leftRowCount + rightRowCount) * join condition selectivity
val crossJoin = copyJoinWithNewCondition(join, rexBuilder.makeLiteral(true))
val selectivity = fmq.getSelectivity(crossJoin, condition)
(leftRowCount + rightRowCount) * selectivity
}
private def copyJoinWithNewCondition(join: Join, newCondition: RexNode): Join = {
join.copy(
join.getTraitSet,
newCondition,
join.getLeft,
join.getRight,
join.getJoinType,
join.isSemiJoinDone)
}
def getRowCount(rel: Union, mq: RelMetadataQuery): JDouble = {
val rowCounts = rel.getInputs.map(mq.getRowCount)
if (rowCounts.contains(null)) {
null
} else {
rowCounts.foldLeft(0D)(_ + _)
}
}
def getRowCount(rel: Intersect, mq: RelMetadataQuery): JDouble = {
rel.getInputs.foldLeft(null.asInstanceOf[JDouble]) {
(res, input) =>
val partialRowCount = mq.getRowCount(input)
if (res == null || (partialRowCount != null && partialRowCount < res)) {
partialRowCount
} else {
res
}
}
}
def getRowCount(rel: Minus, mq: RelMetadataQuery): JDouble = {
rel.getInputs.foldLeft(null.asInstanceOf[JDouble]) {
(res, input) =>
val partialRowCount = mq.getRowCount(input)
if (res == null || (partialRowCount != null && partialRowCount < res)) {
partialRowCount
} else {
res
}
}
}
def getRowCount(subset: RelSubset, mq: RelMetadataQuery): JDouble = {
if (!Bug.CALCITE_1048_FIXED) {
val rel = Util.first(subset.getBest, subset.getOriginal)
return mq.getRowCount(rel)
}
val v = subset.getRels.foldLeft(null.asInstanceOf[JDouble]) {
(min, rel) =>
try {
val rowCount = mq.getRowCount(rel)
NumberUtil.min(min, rowCount)
} catch {
// ignore this rel; there will be other, non-cyclic ones
case e: CyclicMetadataException => min
case e: Throwable =>
e.printStackTrace()
min
}
}
// if set is empty, estimate large
Util.first(v, 1e6d)
}
/**
* Catch-all implementation for
* [[BuiltInMetadata.RowCount#getRowCount()]],
* invoked using reflection.
*
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#getRowCount(RelNode)
*/
def getRowCount(rel: RelNode, mq: RelMetadataQuery): JDouble = rel.estimateRowCount(mq)
}
object FlinkRelMdRowCount {
private val INSTANCE = new FlinkRelMdRowCount
val SOURCE: RelMetadataProvider = ReflectiveRelMetadataProvider.reflectiveSource(
BuiltInMethod.ROW_COUNT.method, INSTANCE)
// It is a experimental config, will may be removed later.
@Experimental
val TABLE_OPTIMIZER_ROWS_PER_LOCALAGG: ConfigOption[JLong] =
key("table.optimizer.rows-per-local-agg")
.defaultValue(JLong.valueOf(1000000L))
.withDescription("Sets estimated number of records that one local-agg processes. " +
"Optimizer will infer whether to use local/global aggregate according to it.")
}
| jinglining/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdRowCount.scala | Scala | apache-2.0 | 17,761 |
package lv.ddgatve.velesanas.cleanup
import scala.xml.XML
import scala.util.matching.Regex
import scala.xml.Node
class ConfigurationReader(f: String, profile: String) {
val conf = XML.loadFile(f)
var patternMap: Map[String, Regex] = Map()
for (p <- conf \\ "profiles" \\ "patterns" \\ "pattern") {
patternMap += (p.attribute("id").head.text -> p.text.trim.r)
}
val theProfile = ((conf \\ "profiles" \\ "profile") filter
{ _.attribute("id").head.text == profile }).head
val urlPrefix = (theProfile \\ "urlPrefix").head.text.trim
def getUrlPrefix(): String = urlPrefix
def getPatternMap(): Map[String, Regex] = patternMap
def getProfile = theProfile
def getIndividualExtractors(): List[(String, Regex, Int)] = {
val result = theProfile \\ "individualExtractors" \\ "item" map
(x =>
(x.attribute("name").head.text,
patternMap.get(x.attribute("patternId").head.text).get,
x.attribute("group").head.text.toInt))
result.toList
}
def getTableExtractor(): Regex = patternMap.get(
(theProfile \\ "tableExtractor").head.attribute("patternId").head.text).get
def getTableColumns(): List[String] = {
val result = (theProfile \\ "tableColumns" \\ "item") map
(_.attribute("name").head.text)
result.toList
}
def getTidyPatterns(): List[(Regex, Int, String)] = {
val result = theProfile \\ "tidyPatterns" \\ "item" map
(x =>
(patternMap.get(x.attribute("patternId").head.text.trim).get,
x.attribute("num").head.text.trim.toInt,
x.head.text.trim))
result.toList
}
} | kapsitis/ddgatve-screenscrapers | src/main/scala/lv/ddgatve/velesanas/cleanup/ConfigurationReader.scala | Scala | cc0-1.0 | 1,605 |
package akka.persistence.kafka
import juju.messages.DomainEvent
class DomainEventTopicMapper extends EventTopicMapper {
def getBoundedContext(e: DomainEvent) = "finance"
def getEventName(e: DomainEvent) = e.getClass.getSimpleName
def topicsFor(event: Event): scala.collection.immutable.Seq[String] = {
event.data match {
case e : DomainEvent => {
val bc = getBoundedContext(e)
val eventname = getEventName(e)
val topicname = if (bc != null && bc.trim != "") s"${bc}_$eventname" else eventname
List("events", topicname)
}
case _ => {
List("infrastructure")
}
}
}
}
| brokersquare/juju | juju-kafka/src/main/scala/akka/persistence/kafka/DomainEventTopicMapper.scala | Scala | apache-2.0 | 651 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.Locale
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import org.mockito.Mockito._
import org.apache.spark.TestUtils.{assertNotSpilled, assertSpilled}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.expressions.{Ascending, GenericRow, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.execution.{BinaryExecNode, FilterExec, SortExec, SparkPlan}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.execution.python.BatchEvalPythonExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlanHelper {
import testImplicits._
private def attachCleanupResourceChecker(plan: SparkPlan): Unit = {
// SPARK-21492: Check cleanupResources are finally triggered in SortExec node for every
// test case
plan.foreachUp {
case s: SortExec =>
val sortExec = spy(s)
verify(sortExec, atLeastOnce).cleanupResources()
verify(sortExec.rowSorter, atLeastOnce).cleanupResources()
case _ =>
}
}
override protected def checkAnswer(df: => DataFrame, rows: Seq[Row]): Unit = {
attachCleanupResourceChecker(df.queryExecution.sparkPlan)
super.checkAnswer(df, rows)
}
setupTestData()
def statisticSizeInByte(df: DataFrame): BigInt = {
df.queryExecution.optimizedPlan.stats.sizeInBytes
}
test("equi-join is hash-join") {
val x = testData2.as("x")
val y = testData2.as("y")
val join = x.join(y, $"x.a" === $"y.a", "inner").queryExecution.optimizedPlan
val planned = spark.sessionState.planner.JoinSelection(join)
assert(planned.size === 1)
}
def assertJoin(pair: (String, Class[_ <: BinaryExecNode])): Any = {
val sqlString = pair._1
val c = pair._2
val df = sql(sqlString)
val physical = df.queryExecution.sparkPlan
val operators = physical.collect {
case j: BroadcastHashJoinExec => j
case j: ShuffledHashJoinExec => j
case j: CartesianProductExec => j
case j: BroadcastNestedLoopJoinExec => j
case j: SortMergeJoinExec => j
}
assert(operators.size === 1)
if (operators.head.getClass != c) {
fail(s"$sqlString expected operator: $c, but got ${operators.head}\\n physical: \\n$physical")
}
}
test("join operator selection") {
spark.sharedState.cacheManager.clearCache()
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT SEMI JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2", classOf[CartesianProductExec]),
("SELECT * FROM testData JOIN testData2 WHERE key = 2", classOf[CartesianProductExec]),
("SELECT * FROM testData LEFT JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 WHERE key = 2",
classOf[CartesianProductExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key > a", classOf[CartesianProductExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key > a",
classOf[CartesianProductExec]),
("SELECT * FROM testData JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData JOIN testData2 ON key = a and key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData JOIN testData2 ON key = a where key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 ON key = a where key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData right join testData2 ON key = a and key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData full outer join testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData left JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData right JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData full JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData ANTI JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT ANTI JOIN testData2", classOf[BroadcastNestedLoopJoinExec])
).foreach(assertJoin)
}
}
// ignore("SortMergeJoin shouldn't work on unsortable columns") {
// Seq(
// ("SELECT * FROM arrayData JOIN complexData ON data = a", classOf[ShuffledHashJoin])
// ).foreach { case (query, joinClass) => assertJoin(query, joinClass) }
// }
test("broadcasted hash join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
Seq(
("SELECT * FROM testData join testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData join testData2 ON key = a and key = 2",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData join testData2 ON key = a where key = 2",
classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
test("broadcasted hash outer join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
sql("CACHE TABLE testData2")
Seq(
("SELECT * FROM testData LEFT JOIN testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 ON key = a where key = 2",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData right join testData2 ON key = a and key = 2",
classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
test("multiple-key equi-join is hash-join") {
val x = testData2.as("x")
val y = testData2.as("y")
val join = x.join(y, ($"x.a" === $"y.a") && ($"x.b" === $"y.b")).queryExecution.optimizedPlan
val planned = spark.sessionState.planner.JoinSelection(join)
assert(planned.size === 1)
}
test("inner join where, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData).where('n === 'N),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")
))
}
}
test("inner join ON, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N"),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")
))
}
}
test("inner join, where, multiple matches") {
val x = testData2.where($"a" === 1).as("x")
val y = testData2.where($"a" === 1).as("y")
checkAnswer(
x.join(y).where($"x.a" === $"y.a"),
Row(1, 1, 1, 1) ::
Row(1, 1, 1, 2) ::
Row(1, 2, 1, 1) ::
Row(1, 2, 1, 2) :: Nil
)
}
test("inner join, no matches") {
val x = testData2.where($"a" === 1).as("x")
val y = testData2.where($"a" === 2).as("y")
checkAnswer(
x.join(y).where($"x.a" === $"y.a"),
Nil)
}
test("SPARK-22141: Propagate empty relation before checking Cartesian products") {
Seq("inner", "left", "right", "left_outer", "right_outer", "full_outer").foreach { joinType =>
val x = testData2.where($"a" === 2 && !($"a" === 2)).as("x")
val y = testData2.where($"a" === 1 && !($"a" === 1)).as("y")
checkAnswer(x.join(y, Seq.empty, joinType), Nil)
}
}
test("big inner join, 4 matches per row") {
val bigData = testData.union(testData).union(testData).union(testData)
val bigDataX = bigData.as("x")
val bigDataY = bigData.as("y")
checkAnswer(
bigDataX.join(bigDataY).where($"x.key" === $"y.key"),
testData.rdd.flatMap { row =>
Seq.fill(16)(new GenericRow(Seq(row, row).flatMap(_.toSeq).toArray))
}.collect().toSeq)
}
test("cartesian product join") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
checkAnswer(
testData3.as("x").join(testData3.as("y"), $"x.a" > $"y.a"),
Row(2, 2, 1, null) :: Nil)
}
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
val e = intercept[Exception] {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
}
assert(e.getMessage.contains("Detected implicit cartesian product for INNER join " +
"between logical plans"))
}
}
test("left outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N", "left"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"n" > 1, "left"),
Row(1, "A", null, null) ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"N" > 1, "left"),
Row(1, "A", null, null) ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"l" > $"L", "left"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
// Make sure we are choosing left.outputPartitioning as the
// outputPartitioning for the outer join operator.
checkAnswer(
sql(
"""
|SELECT l.N, count(*)
|FROM uppercasedata l LEFT OUTER JOIN allnulls r ON (l.N = r.a)
|GROUP BY l.N
""".stripMargin),
Row(
1, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) :: Nil)
checkAnswer(
sql(
"""
|SELECT r.a, count(*)
|FROM uppercasedata l LEFT OUTER JOIN allnulls r ON (l.N = r.a)
|GROUP BY r.a
""".stripMargin),
Row(null, 6) :: Nil)
}
}
test("right outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N", "right"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"n" > 1, "right"),
Row(null, null, 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"N" > 1, "right"),
Row(null, null, 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"l" > $"L", "right"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
// Make sure we are choosing right.outputPartitioning as the
// outputPartitioning for the outer join operator.
checkAnswer(
sql(
"""
|SELECT l.a, count(*)
|FROM allnulls l RIGHT OUTER JOIN uppercasedata r ON (l.a = r.N)
|GROUP BY l.a
""".stripMargin),
Row(null,
6))
checkAnswer(
sql(
"""
|SELECT r.N, count(*)
|FROM allnulls l RIGHT OUTER JOIN uppercasedata r ON (l.a = r.N)
|GROUP BY r.N
""".stripMargin),
Row(1
, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) :: Nil)
}
}
test("full outer join") {
upperCaseData.where('N <= 4).createOrReplaceTempView("`left`")
upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val left = UnresolvedRelation(TableIdentifier("left"))
val right = UnresolvedRelation(TableIdentifier("right"))
checkAnswer(
left.join(right, $"left.N" === $"right.N", "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
left.join(right, ($"left.N" === $"right.N") && ($"left.N" =!= 3), "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", null, null) ::
Row(null, null, 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
left.join(right, ($"left.N" === $"right.N") && ($"right.N" =!= 3), "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", null, null) ::
Row(null, null, 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
// Make sure we are UnknownPartitioning as the outputPartitioning for the outer join
// operator.
checkAnswer(
sql(
"""
|SELECT l.a, count(*)
|FROM allNulls l FULL OUTER JOIN upperCaseData r ON (l.a = r.N)
|GROUP BY l.a
""".
stripMargin),
Row(null, 10))
checkAnswer(
sql(
"""
|SELECT r.N, count(*)
|FROM allNulls l FULL OUTER JOIN upperCaseData r ON (l.a = r.N)
|GROUP BY r.N
""".stripMargin),
Row
(1, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) ::
Row(null, 4) :: Nil)
checkAnswer(
sql(
"""
|SELECT l.N, count(*)
|FROM upperCaseData l FULL OUTER JOIN allNulls r ON (l.N = r.a)
|GROUP BY l.N
""".stripMargin),
Row(1
, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) ::
Row(null, 4) :: Nil)
checkAnswer(
sql(
"""
|SELECT r.a, count(*)
|FROM upperCaseData l FULL OUTER JOIN allNulls r ON (l.N = r.a)
|GROUP BY r.a
""".
stripMargin),
Row(null, 10))
}
test("broadcasted existence join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData ANT JOIN testData2 ON key = a", classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT ANTI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec])
).foreach(assertJoin)
}
}
test("cross join with broadcast") {
sql("CACHE TABLE testData")
val sizeInByteOfTestData = statisticSizeInByte(spark.table("testData"))
// we set the threshold is greater than statistic of the cached table testData
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> (sizeInByteOfTestData + 1).toString(),
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assert(statisticSizeInByte(spark.table("testData2")) >
spark.conf.get[Long](SQLConf.AUTO_BROADCASTJOIN_THRESHOLD))
assert(statisticSizeInByte(spark.table("testData")) <
spark.conf.get[Long](SQLConf.AUTO_BROADCASTJOIN_THRESHOLD))
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT SEMI JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key > a",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key > a",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData left JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData right JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData full JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec])
).foreach(assertJoin)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y WHERE x.key = 2
""".stripMargin),
Row("2", 1, 1) ::
Row("2", 1, 2) ::
Row("2", 2, 1) ::
Row("2", 2, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y WHERE x.key < y.a
""".stripMargin),
Row("1", 2, 1) ::
Row("1", 2, 2) ::
Row("1", 3, 1) ::
Row("1", 3, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y ON x.key < y.a
""".stripMargin),
Row("1", 2, 1) ::
Row("1", 2, 2) ::
Row("1", 3, 1) ::
Row("1", 3, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
}
}
test("left semi join") {
val df = sql("SELECT * FROM testData2 LEFT SEMI JOIN testData ON key = a")
checkAnswer(df,
Row(1, 1) ::
Row(1, 2) ::
Row(2, 1) ::
Row(2, 2) ::
Row(3, 1) ::
Row(3, 2) :: Nil)
}
test("cross join detection") {
testData.createOrReplaceTempView("A")
testData.createOrReplaceTempView("B")
testData2.createOrReplaceTempView("C")
testData3.createOrReplaceTempView("D")
upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val cartesianQueries = Seq(
/** The following should error out since there is no explicit cross join */
"SELECT * FROM testData inner join testData2",
"SELECT * FROM testData left outer join testData2",
"SELECT * FROM testData right outer join testData2",
"SELECT * FROM testData full outer join testData2",
"SELECT * FROM testData, testData2",
"SELECT * FROM testData, testData2 where testData.key = 1 and testData2.a = 22",
/** The following should fail because after reordering there are cartesian products */
"select * from (A join B on (A.key = B.key)) join D on (A.key=D.a) join C",
"select * from ((A join B on (A.key = B.key)) join C) join D on (A.key = D.a)",
/** Cartesian product involving C, which is not involved in a CROSS join */
"select * from ((A join B on (A.key = B.key)) cross join D) join C on (A.key = D.a)");
def checkCartesianDetection(query: String): Unit = {
val e = intercept[Exception] {
checkAnswer(sql(query), Nil);
}
assert(e.getMessage.contains("Detected implicit cartesian product"))
}
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
cartesianQueries.foreach(checkCartesianDetection)
}
// Check that left_semi, left_anti, existence joins without conditions do not throw
// an exception if cross joins are disabled
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
checkAnswer(
sql("SELECT * FROM testData3 LEFT SEMI JOIN testData2"),
Row(1, null) :: Row (2, 2) :: Nil)
checkAnswer(
sql("SELECT * FROM testData3 LEFT ANTI JOIN testData2"),
Nil)
checkAnswer(
sql(
"""
|SELECT a FROM testData3
|WHERE
| EXISTS (SELECT * FROM testData)
|OR
| EXISTS (SELECT * FROM testData2)""".stripMargin),
Row(1) :: Row(2) :: Nil)
checkAnswer(
sql(
"""
|SELECT key FROM testData
|WHERE
| key IN (SELECT a FROM testData2)
|OR
| key IN (SELECT a FROM testData3)""".stripMargin),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
}
test("test SortMergeJoin (without spill)") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1",
SQLConf.SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD.key -> Int.MaxValue.toString) {
assertNotSpilled(sparkContext, "inner join") {
checkAnswer(
sql("SELECT * FROM testData JOIN testData2 ON key = a where key = 2"),
Row(2, "2", 2, 1) :: Row(2, "2", 2, 2) :: Nil
)
}
val expected = new ListBuffer[Row]()
expected.append(
Row(1, "1", 1, 1), Row(1, "1", 1, 2),
Row(2, "2", 2, 1), Row(2, "2", 2, 2),
Row(3, "3", 3, 1), Row(3, "3", 3, 2)
)
for (i <- 4 to 100) {
expected.append(Row(i, i.toString, null, null))
}
assertNotSpilled(sparkContext, "left outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData big
|LEFT OUTER JOIN
| testData2 small
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
assertNotSpilled(sparkContext, "right outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|RIGHT OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
}
}
test("test SortMergeJoin (with spill)") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1",
SQLConf.SORT_MERGE_JOIN_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "0",
SQLConf.SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD.key -> "1") {
assertSpilled(sparkContext, "inner join") {
checkAnswer(
sql("SELECT * FROM testData JOIN testData2 ON key = a where key = 2"),
Row(2, "2", 2, 1) :: Row(2, "2", 2, 2) :: Nil
)
}
val expected = new ListBuffer[Row]()
expected.append(
Row(1, "1", 1, 1), Row(1, "1", 1, 2),
Row(2, "2", 2, 1), Row(2, "2", 2, 2),
Row(3, "3", 3, 1), Row(3, "3", 3, 2)
)
for (i <- 4 to 100) {
expected.append(Row(i, i.toString, null, null))
}
assertSpilled(sparkContext, "left outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData big
|LEFT OUTER JOIN
| testData2 small
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
assertSpilled(sparkContext, "right outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|RIGHT OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
// FULL OUTER JOIN still does not use [[ExternalAppendOnlyUnsafeRowArray]]
// so should not cause any spill
assertNotSpilled(sparkContext, "full outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|FULL OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
}
}
test("outer broadcast hash join should not throw NPE") {
withTempView("v1", "v2") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
Seq(2 -> 2).toDF("x", "y").createTempView("v1")
spark.createDataFrame(
Seq(Row(1, "a")).asJava,
new StructType().add("i", "int", nullable = false).add("j", "string", nullable = false)
).createTempView("v2")
checkAnswer(
sql("select x, y, i, j from v1 left join v2 on x = i and y < length(j)"),
Row(2, 2, null, null)
)
}
}
}
test("test SortMergeJoin output ordering") {
val joinQueries = Seq(
"SELECT * FROM testData JOIN testData2 ON key = a",
"SELECT * FROM testData t1 JOIN " +
"testData2 t2 ON t1.key = t2.a JOIN testData3 t3 ON t2.a = t3.a",
"SELECT * FROM testData t1 JOIN " +
"testData2 t2 ON t1.key = t2.a JOIN " +
"testData3 t3 ON t2.a = t3.a JOIN " +
"testData t4 ON t1.key = t4.key")
def assertJoinOrdering(sqlString: String): Unit = {
val df = sql(sqlString)
val physical = df.queryExecution.sparkPlan
val physicalJoins = physical.collect {
case j: SortMergeJoinExec => j
}
val executed = df.queryExecution.executedPlan
val executedJoins = collect(executed) {
case j: SortMergeJoinExec => j
}
// This only applies to the above tested queries, in which a child SortMergeJoin always
// contains the SortOrder required by its parent SortMergeJoin. Thus, SortExec should never
// appear as parent of SortMergeJoin.
executed.foreach {
case s: SortExec => s.foreach {
case j: SortMergeJoinExec => fail(
s"No extra sort should be added since $j already satisfies the required ordering"
)
case _ =>
}
case _ =>
}
val joinPairs = physicalJoins.zip(executedJoins)
val numOfJoins = sqlString.split(" ").count(_.toUpperCase(Locale.ROOT) == "JOIN")
assert(joinPairs.size == numOfJoins)
joinPairs.foreach {
case(join1, join2) =>
val leftKeys = join1.leftKeys
val rightKeys = join1.rightKeys
val outputOrderingPhysical = join1.outputOrdering
val outputOrderingExecuted = join2.outputOrdering
// outputOrdering should always contain join keys
assert(
SortOrder.orderingSatisfies(
outputOrderingPhysical, leftKeys.map(SortOrder(_, Ascending))))
assert(
SortOrder.orderingSatisfies(
outputOrderingPhysical, rightKeys.map(SortOrder(_, Ascending))))
// outputOrdering should be consistent between physical plan and executed plan
assert(outputOrderingPhysical == outputOrderingExecuted,
s"Operator $join1 did not have the same output ordering in the physical plan as in " +
s"the executed plan.")
}
}
joinQueries.foreach(assertJoinOrdering)
}
test("SPARK-22445 Respect stream-side child's needCopyResult in BroadcastHashJoin") {
val df1 = Seq((2, 3), (2, 5), (2, 2), (3, 8), (2, 1)).toDF("k", "v1")
val df2 = Seq((2, 8), (3, 7), (3, 4), (1, 2)).toDF("k", "v2")
val df3 = Seq((1, 1), (3, 2), (4, 3), (5, 1)).toDF("k", "v3")
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.JOIN_REORDER_ENABLED.key -> "false") {
val df = df1.join(df2, "k").join(functions.broadcast(df3), "k")
val plan = df.queryExecution.sparkPlan
// Check if `needCopyResult` in `BroadcastHashJoin` is correct when smj->bhj
val joins = new collection.mutable.ArrayBuffer[BinaryExecNode]()
plan.foreachUp {
case j: BroadcastHashJoinExec => joins += j
case j: SortMergeJoinExec => joins += j
case _ =>
}
assert(joins.size == 2)
assert(joins(0).isInstanceOf[SortMergeJoinExec])
assert(joins(1).isInstanceOf[BroadcastHashJoinExec])
checkAnswer(df, Row(3, 8, 7, 2) :: Row(3, 8, 4, 2) :: Nil)
}
}
test("SPARK-24495: Join may return wrong result when having duplicated equal-join keys") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1",
SQLConf.CONSTRAINT_PROPAGATION_ENABLED.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df1 = spark.range(0, 100, 1, 2)
val df2 = spark.range(100).select($"id".as("b1"), (- $"id").as("b2"))
val res = df1.join(df2, $"id" === $"b1" && $"id" === $"b2").select($"b1", $"b2", $"id")
checkAnswer(res, Row(0, 0, 0))
}
}
test("SPARK-27485: EnsureRequirements should not fail join with duplicate keys") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "2",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val tbl_a = spark.range(40)
.select($"id" as "x", $"id" % 10 as "y")
.repartition(2, $"x", $"y", $"x")
.as("tbl_a")
val tbl_b = spark.range(20)
.select($"id" as "x", $"id" % 2 as "y1", $"id" % 20 as "y2")
.as("tbl_b")
val res = tbl_a
.join(tbl_b,
$"tbl_a.x" === $"tbl_b.x" && $"tbl_a.y" === $"tbl_b.y1" && $"tbl_a.y" === $"tbl_b.y2")
.select($"tbl_a.x")
checkAnswer(res, Row(0L) :: Row(1L) :: Nil)
}
}
test("SPARK-26352: join reordering should not change the order of columns") {
withTable("tab1", "tab2", "tab3") {
spark.sql("select 1 as x, 100 as y").write.saveAsTable("tab1")
spark.sql("select 42 as i, 200 as j").write.saveAsTable("tab2")
spark.sql("select 1 as a, 42 as b").write.saveAsTable("tab3")
val df = spark.sql("""
with tmp as (select * from tab1 cross join tab2)
select * from tmp join tab3 on a = x and b = i
""")
checkAnswer(df, Row(1, 100, 42, 200, 1, 42))
}
}
test("NaN and -0.0 in join keys") {
withTempView("v1", "v2", "v3", "v4") {
Seq(Float.NaN -> Double.NaN, 0.0f -> 0.0, -0.0f -> -0.0).toDF("f", "d").createTempView("v1")
Seq(Float.NaN -> Double.NaN, 0.0f -> 0.0, -0.0f -> -0.0).toDF("f", "d").createTempView("v2")
checkAnswer(
sql(
"""
|SELECT v1.f, v1.d, v2.f, v2.d
|FROM v1 JOIN v2
|ON v1.f = v2.f AND v1.d = v2.d
""".stripMargin),
Seq(
Row(Float.NaN, Double.NaN, Float.NaN, Double.NaN),
Row(0.0f, 0.0, 0.0f, 0.0),
Row(0.0f, 0.0, -0.0f, -0.0),
Row(-0.0f, -0.0, 0.0f, 0.0),
Row(-0.0f, -0.0, -0.0f, -0.0)))
// test with complicated join keys.
checkAnswer(
sql(
"""
|SELECT v1.f, v1.d, v2.f, v2.d
|FROM v1 JOIN v2
|ON
| array(v1.f) = array(v2.f) AND
| struct(v1.d) = struct(v2.d) AND
| array(struct(v1.f, v1.d)) = array(struct(v2.f, v2.d)) AND
| struct(array(v1.f), array(v1.d)) = struct(array(v2.f), array(v2.d))
""".stripMargin),
Seq(
Row(Float.NaN, Double.NaN, Float.NaN, Double.NaN),
Row(0.0f, 0.0, 0.0f, 0.0),
Row(0.0f, 0.0, -0.0f, -0.0),
Row(-0.0f, -0.0, 0.0f, 0.0),
Row(-0.0f, -0.0, -0.0f, -0.0)))
// test with tables with complicated-type columns.
Seq((Array(-0.0f, 0.0f), Tuple2(-0.0d, Double.NaN), Seq(Tuple2(-0.0d, Double.NaN))))
.toDF("arr", "stru", "arrOfStru").createTempView("v3")
Seq((Array(0.0f, -0.0f), Tuple2(0.0d, 0.0/0.0), Seq(Tuple2(0.0d, 0.0/0.0))))
.toDF("arr", "stru", "arrOfStru").createTempView("v4")
checkAnswer(
sql(
"""
|SELECT v3.arr, v3.stru, v3.arrOfStru, v4.arr, v4.stru, v4.arrOfStru
|FROM v3 JOIN v4
|ON v3.arr = v4.arr AND v3.stru = v4.stru AND v3.arrOfStru = v4.arrOfStru
""".stripMargin),
Seq(Row(
Seq(-0.0f, 0.0f),
Row(-0.0d, Double.NaN),
Seq(Row(-0.0d, Double.NaN)),
Seq(0.0f, -0.0f),
Row(0.0d, 0.0/0.0),
Seq(Row(0.0d, 0.0/0.0)))))
}
}
test("SPARK-28323: PythonUDF should be able to use in join condition") {
import IntegratedUDFTestUtils._
assume(shouldTestPythonUDFs)
val pythonTestUDF = TestPythonUDF(name = "udf")
val left = Seq((1, 2), (2, 3)).toDF("a", "b")
val right = Seq((1, 2), (3, 4)).toDF("c", "d")
val df = left.join(right, pythonTestUDF(left("a")) === pythonTestUDF(right.col("c")))
val joinNode = find(df.queryExecution.executedPlan)(_.isInstanceOf[BroadcastHashJoinExec])
assert(joinNode.isDefined)
// There are two PythonUDFs which use attribute from left and right of join, individually.
// So two PythonUDFs should be evaluated before the join operator, at left and right side.
val pythonEvals = collect(joinNode.get) {
case p: BatchEvalPythonExec => p
}
assert(pythonEvals.size == 2)
checkAnswer(df, Row(1, 2, 1, 2) :: Nil)
}
test("SPARK-28345: PythonUDF predicate should be able to pushdown to join") {
import IntegratedUDFTestUtils._
assume(shouldTestPythonUDFs)
val pythonTestUDF = TestPythonUDF(name = "udf")
val left = Seq((1, 2), (2, 3)).toDF("a", "b")
val right = Seq((1, 2), (3, 4)).toDF("c", "d")
val df = left.crossJoin(right).where(pythonTestUDF(left("a")) === right.col("c"))
// Before optimization, there is a logical Filter operator.
val filterInAnalysis = df.queryExecution.analyzed.find(_.isInstanceOf[Filter])
assert(filterInAnalysis.isDefined)
// Filter predicate was pushdown as join condition. So there is no Filter exec operator.
val filterExec = find(df.queryExecution.executedPlan)(_.isInstanceOf[FilterExec])
assert(filterExec.isEmpty)
checkAnswer(df, Row(1, 2, 1, 2) :: Nil)
}
test("SPARK-21492: cleanupResource without code generation") {
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false",
SQLConf.SHUFFLE_PARTITIONS.key -> "1",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df1 = spark.range(0, 10, 1, 2)
val df2 = spark.range(10).select($"id".as("b1"), (- $"id").as("b2"))
val res = df1.join(df2, $"id" === $"b1" && $"id" === $"b2").select($"b1", $"b2", $"id")
checkAnswer(res, Row(0, 0, 0))
}
}
test("SPARK-29850: sort-merge-join an empty table should not memory leak") {
val df1 = spark.range(10).select($"id", $"id" % 3 as 'p)
.repartition($"id").groupBy($"id").agg(Map("p" -> "max"))
val df2 = spark.range(0)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
assert(df2.join(df1, "id").collect().isEmpty)
}
}
}
| ptkool/spark | sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala | Scala | apache-2.0 | 38,785 |
import java.security._
import scala.language.{ reflectiveCalls }
object Test {
trait Bar { def bar: Unit }
object Mgr extends SecurityManager {
override def checkPermission(perm: Permission) = perm match {
case _: java.lang.RuntimePermission => ()
case _: java.io.FilePermission => ()
case x: java.security.SecurityPermission if x.getName contains ".networkaddress." => () // generality ftw
case x: java.util.PropertyPermission if x.getName == "sun.net.inetaddr.ttl" => ()
case _: java.lang.reflect.ReflectPermission => () // needed for LambdaMetaFactory
case _ => super.checkPermission(perm)
}
}
def t1() = {
val p = Runtime.getRuntime().exec("ls");
type Destroyable = { def destroy() : Unit }
def doDestroy( obj : Destroyable ) : Unit = obj.destroy();
doDestroy( p );
}
def t2() = {
System.setSecurityManager(Mgr)
val b = new Bar { def bar = println("bar") }
b.bar
val structural = b.asInstanceOf[{ def bar: Unit }]
structural.bar
}
def main(args: Array[String]) {
// figuring this will otherwise break on windows
try t1()
catch { case _: java.io.IOException => () }
t2()
}
}
| felixmulder/scala | test/files/run/t2318.scala | Scala | bsd-3-clause | 1,440 |
package ch.epfl
import java.util.logging.LogManager
import com.typesafe.config.ConfigFactory
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
package object telegram {
LogManager.getLogManager.readConfiguration()
lazy val version = BuildInfo.version // sbt build info created at compiled time
val Config = ConfigFactory.load().as[EPFLBotConfig]("epflbot")
}
| epflbot/epflbot | src/main/scala/ch/epfl/telegram/package.scala | Scala | apache-2.0 | 409 |
package scalafiddle.compiler.cache
import scala.tools.nsc
object AutoCompleteCache extends LRUCache[nsc.interactive.Global]("AutoComplete") {}
| scalafiddle/scalafiddle-core | compiler-server/src/main/scala/scalafiddle/compiler/cache/AutoCompleteCache.scala | Scala | apache-2.0 | 145 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package vta.core
import chisel3._
import chisel3.util._
import vta.util.config._
import vta.shell._
/** EventCounters.
*
* This unit contains all the event counting logic. One common event tracked in
* hardware is the number of clock cycles taken to achieve certain task. We
* can count the total number of clock cycles spent in a VTA run by checking
* launch and finish signals.
*
* The event counter value is passed to the VCR module via the ecnt port, so
* they can be accessed by the host. The number of event counters (nECnt) is
* defined in the Shell VCR module as a parameter, see VCRParams.
*
* If one would like to add an event counter, then the value of nECnt must be
* changed in VCRParams together with the corresponding counting logic here.
*/
class EventCounters(debug: Boolean = false)(implicit p: Parameters)
extends Module {
val vp = p(ShellKey).vcrParams
val io = IO(new Bundle {
val launch = Input(Bool())
val finish = Input(Bool())
val ecnt = Vec(vp.nECnt, ValidIO(UInt(vp.regBits.W)))
})
val cycle_cnt = RegInit(0.U(vp.regBits.W))
when(io.launch && !io.finish) {
cycle_cnt := cycle_cnt + 1.U
}.otherwise {
cycle_cnt := 0.U
}
io.ecnt(0).valid := io.finish
io.ecnt(0).bits := cycle_cnt
}
| Huyuwei/tvm | vta/hardware/chisel/src/main/scala/core/EventCounters.scala | Scala | apache-2.0 | 2,086 |
/*-------------------------------------------------------------------------*\\
** ScalaCheck **
** Copyright (c) 2007-2019 Rickard Nilsson. All rights reserved. **
** http://www.scalacheck.org **
** **
** This software is released under the terms of the Revised BSD License. **
** There is NO WARRANTY. See the file LICENSE for the full text. **
\\*------------------------------------------------------------------------ */
package org.scalacheck
import language.higherKinds
import language.implicitConversions
import rng.Seed
import util.Buildable
import util.SerializableCanBuildFroms._
import ScalaVersionSpecific._
import scala.annotation.tailrec
import scala.collection.immutable.TreeMap
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration.{Duration, FiniteDuration}
import java.util.{ Calendar, UUID }
sealed abstract class Gen[+T] extends Serializable { self =>
//// Private interface ////
import Gen.{R, gen}
/** Just an alias */
private type P = Gen.Parameters
/** Should be a copy of R.sieve. Used internally in Gen when some generators
* with suchThat-clause are created (when R is not available). This method
* actually breaks covariance, but since this method will only ever be
* called with a value of exactly type T, it is OK. */
private[scalacheck] def sieveCopy(x: Any): Boolean = true
private[scalacheck] def doApply(p: P, seed: Seed): R[T]
//// Public interface ////
/** A class supporting filtered operations. */
final class WithFilter(p: T => Boolean) {
def map[U](f: T => U): Gen[U] = Gen.this.suchThat(p).map(f)
def flatMap[U](f: T => Gen[U]): Gen[U] = Gen.this.suchThat(p).flatMap(f)
def withFilter(q: T => Boolean): WithFilter = Gen.this.withFilter(x => p(x) && q(x))
}
/** Evaluate this generator with the given parameters */
def apply(p: Gen.Parameters, seed: Seed): Option[T] =
doApply(p, seed).retrieve
def doPureApply(p: Gen.Parameters, seed: Seed, retries: Int = 100): Gen.R[T] = {
@tailrec def loop(r: Gen.R[T], i: Int): Gen.R[T] =
if (r.retrieve.isDefined) r
else if (i > 0) loop(doApply(p, r.seed), i - 1)
else throw new Gen.RetrievalError()
loop(doApply(p, seed), retries)
}
/**
* Evaluate this generator with the given parameters.
*
* The generator will attempt to generate a valid `T` value. If a
* valid value is not produced it may retry several times,
* determined by the `retries` parameter (which defaults to 100).
*
* If all the retries fail it will throw a `Gen.RetrievalError`
* exception.
*/
def pureApply(p: Gen.Parameters, seed: Seed, retries: Int = 100): T =
doPureApply(p, seed, retries).retrieve.get
/** Create a new generator by mapping the result of this generator */
def map[U](f: T => U): Gen[U] = gen { (p, seed) => doApply(p, seed).map(f) }
/** Create a new generator by flat-mapping the result of this generator */
def flatMap[U](f: T => Gen[U]): Gen[U] = gen { (p, seed) =>
val rt = doApply(p, seed)
rt.flatMap(t => f(t).doApply(p, rt.seed))
}
/** Create a new generator that uses this generator to produce a value
* that fulfills the given condition. If the condition is not fulfilled,
* the generator fails (returns None). Also, make sure that the provided
* test property is side-effect free, e.g. it should not use external vars. */
def filter(p: T => Boolean): Gen[T] = suchThat(p)
/** Create a new generator that uses this generator to produce a value
* that doesn't fulfill the given condition. If the condition is fulfilled,
* the generator fails (returns None). Also, make sure that the provided
* test property is side-effect free, e.g. it should not use external vars. */
def filterNot(p: T => Boolean): Gen[T] = suchThat(x => !p(x))
/** Creates a non-strict filtered version of this generator. */
def withFilter(p: T => Boolean): WithFilter = new WithFilter(p)
/** Create a new generator that uses this generator to produce a value
* that fulfills the given condition. If the condition is not fulfilled,
* the generator fails (returns None). Also, make sure that the provided
* test property is side-effect free, e.g. it should not use external vars.
* This method is identical to [Gen.filter]. */
def suchThat(f: T => Boolean): Gen[T] = new Gen[T] {
def doApply(p: P, seed: Seed) =
p.useInitialSeed(seed) { (p0, s0) =>
val res = Gen.this.doApply(p0, s0)
res.copy(s = { x:T => res.sieve(x) && f(x) })
}
override def sieveCopy(x: Any) =
try Gen.this.sieveCopy(x) && f(x.asInstanceOf[T])
catch { case _: java.lang.ClassCastException => false }
}
case class RetryUntilException(n: Int) extends RuntimeException(s"retryUntil failed after $n attempts")
/**
* Create a generator that calls this generator repeatedly until the
* given condition is fulfilled. The generated value is then
* returned. Make sure that the provided test property is
* side-effect free (it should not use external vars).
*
* If the generator fails more than maxTries, a RetryUntilException
* will be thrown.
*/
def retryUntil(p: T => Boolean, maxTries: Int): Gen[T] = {
require(maxTries > 0)
def loop(params: P, seed: Seed, tries: Int): R[T] =
if (tries > maxTries) throw RetryUntilException(tries) else {
val r = self.doApply(params, seed)
if (r.retrieve.exists(p)) r else loop(params, r.seed, tries + 1)
}
Gen.gen((params, seed) => loop(params, seed, 1))
}
/**
* Create a generator that calls this generator repeatedly until the
* given condition is fulfilled. The generated value is then
* returned. Make sure that the provided test property is
* side-effect free (it should not use external vars).
*
*
* If the generator fails more than 10000 times, a
* RetryUntilException will be thrown. You can call `retryUntil`
* with a second parameter to change this number.
*/
def retryUntil(p: T => Boolean): Gen[T] =
retryUntil(p, 10000)
def sample: Option[T] =
doApply(Gen.Parameters.default, Seed.random()).retrieve
/** Returns a new property that holds if and only if both this
* and the given generator generates the same result, or both
* generators generate no result. */
def ==[U](g: Gen[U]) = Prop { prms =>
// test equality using a random seed
val seed = Seed.random()
val lhs = doApply(prms, seed).retrieve
val rhs = g.doApply(prms, seed).retrieve
if (lhs == rhs) Prop.proved(prms) else Prop.falsified(prms)
}
def !=[U](g: Gen[U]) = Prop.forAll(this)(r => Prop.forAll(g)(_ != r))
def !==[U](g: Gen[U]) = Prop { prms =>
// test inequality using a random seed
val seed = Seed.random()
val lhs = doApply(prms, seed).retrieve
val rhs = g.doApply(prms, seed).retrieve
if (lhs != rhs) Prop.proved(prms) else Prop.falsified(prms)
}
/** Put a label on the generator to make test reports clearer */
def label(l: String): Gen[T] = new Gen[T] {
def doApply(p: P, seed: Seed) =
p.useInitialSeed(seed) { (p0, s0) =>
val r = Gen.this.doApply(p0, s0)
r.copy(l = r.labels + l)
}
override def sieveCopy(x: Any) = Gen.this.sieveCopy(x)
}
/** Put a label on the generator to make test reports clearer */
def :|(l: String) = label(l)
/** Put a label on the generator to make test reports clearer */
def |:(l: String) = label(l)
/** Put a label on the generator to make test reports clearer */
def :|(l: Symbol) = label(l.name)
/** Put a label on the generator to make test reports clearer */
def |:(l: Symbol) = label(l.name)
/** Perform some RNG perturbation before generating */
def withPerturb(f: Seed => Seed): Gen[T] =
Gen.gen((p, seed) => doApply(p, f(seed)))
}
object Gen extends GenArities with GenVersionSpecific {
//// Private interface ////
import Arbitrary.arbitrary
/** Just an alias */
private type P = Parameters
class RetrievalError extends RuntimeException("couldn't generate value")
private[scalacheck] trait R[+T] {
def labels: Set[String] = Set()
def sieve[U >: T]: U => Boolean = _ => true
protected def result: Option[T]
def seed: Seed
def retrieve: Option[T] = result.filter(sieve)
def copy[U >: T](
l: Set[String] = this.labels,
s: U => Boolean = this.sieve,
r: Option[U] = this.result,
sd: Seed = this.seed
): R[U] = new R[U] {
override val labels = l
override def sieve[V >: U] = { (x: Any) =>
try s(x.asInstanceOf[U])
catch { case _: java.lang.ClassCastException => false }
}
val seed = sd
val result = r
}
def map[U](f: T => U): R[U] = r(retrieve.map(f), seed).copy(l = labels)
def flatMap[U](f: T => R[U]): R[U] = retrieve match {
case None => r(None, seed).copy(l = labels)
case Some(t) =>
val r = f(t)
r.copy(l = labels ++ r.labels, sd = r.seed)
}
}
private[scalacheck] def r[T](r: Option[T], sd: Seed): R[T] = new R[T] {
val result = r
val seed = sd
}
/** Generator factory method */
private[scalacheck] def gen[T](f: (P, Seed) => R[T]): Gen[T] = new Gen[T] {
def doApply(p: P, seed: Seed): R[T] = p.useInitialSeed(seed)(f)
}
//// Public interface ////
/** Generator parameters, used by [[org.scalacheck.Gen.apply]] */
sealed abstract class Parameters extends Serializable { outer =>
override def toString: String = {
val sb = new StringBuilder
sb.append("Parameters(")
sb.append(s"size=$size, ")
sb.append(s"initialSeed=$initialSeed, ")
sb.append(s"useLegacyShrinking=$useLegacyShrinking)")
sb.toString
}
/**
* The size of the generated value. Generator implementations are
* allowed to freely interpret (or ignore) this value. During test
* execution, the value of this parameter is controlled by
* [[Test.Parameters.minSize]] and [[Test.Parameters.maxSize]].
*/
val size: Int
private[this] def cpy(
size0: Int = outer.size,
initialSeed0: Option[Seed] = outer.initialSeed,
useLegacyShrinking0: Boolean = outer.useLegacyShrinking
): Parameters =
new Parameters {
val size: Int = size0
val initialSeed: Option[Seed] = initialSeed0
override val useLegacyShrinking: Boolean = useLegacyShrinking0
}
/**
* Create a copy of this [[Gen.Parameters]] instance with
* [[Gen.Parameters.size]] set to the specified value.
*/
def withSize(size: Int): Parameters =
cpy(size0 = size)
/**
*
*/
val initialSeed: Option[Seed]
def withInitialSeed(o: Option[Seed]): Parameters =
cpy(initialSeed0 = o)
def withInitialSeed(seed: Seed): Parameters =
cpy(initialSeed0 = Some(seed))
def withInitialSeed(n: Long): Parameters =
cpy(initialSeed0 = Some(Seed(n)))
def withNoInitialSeed: Parameters =
cpy(initialSeed0 = None)
def useInitialSeed[A](seed: Seed)(f: (Parameters, Seed) => A): A =
initialSeed match {
case Some(s) => f(this.withNoInitialSeed, s)
case None => f(this, seed)
}
val useLegacyShrinking: Boolean = true
def disableLegacyShrinking: Parameters =
withLegacyShrinking(false)
def enableLegacyShrinking: Parameters =
withLegacyShrinking(true)
def withLegacyShrinking(b: Boolean): Parameters =
cpy(useLegacyShrinking0 = b)
// no longer used, but preserved for binary compatibility
@deprecated("cp is deprecated. use cpy.", "1.14.1")
private case class cp(size: Int = size, initialSeed: Option[Seed] = None) extends Parameters
}
/** Provides methods for creating [[org.scalacheck.Gen.Parameters]] values */
object Parameters {
/** Default generator parameters instance. */
val default: Parameters = new Parameters {
val size: Int = 100
val initialSeed: Option[Seed] = None
}
}
/** A wrapper type for range types */
trait Choose[T] extends Serializable {
/** Creates a generator that returns a value in the given inclusive range */
def choose(min: T, max: T): Gen[T]
}
/** Provides implicit [[org.scalacheck.Gen.Choose]] instances */
object Choose {
class IllegalBoundsError[A](low: A, high: A)
extends IllegalArgumentException(s"invalid bounds: low=$low, high=$high")
/**
* This method gets a ton of use -- so we want it to be as fast as
* possible for many of our common cases.
*/
private def chLng(l: Long, h: Long)(p: P, seed: Seed): R[Long] = {
if (h < l) {
throw new IllegalBoundsError(l, h)
} else if (h == l) {
const(l).doApply(p, seed)
} else if (l == Long.MinValue && h == Long.MaxValue) {
val (n, s) = seed.long
r(Some(n), s)
} else if (l == Int.MinValue && h == Int.MaxValue) {
val (n, s) = seed.long
r(Some(n.toInt.toLong), s)
} else if (l == Short.MinValue && h == Short.MaxValue) {
val (n, s) = seed.long
r(Some(n.toShort.toLong), s)
} else if (l == 0L && h == Char.MaxValue) {
val (n, s) = seed.long
r(Some(n.toChar.toLong), s)
} else if (l == Byte.MinValue && h == Byte.MaxValue) {
val (n, s) = seed.long
r(Some(n.toByte.toLong), s)
} else {
val d = h - l + 1
if (d <= 0) {
var tpl = seed.long
var n = tpl._1
var s = tpl._2
while (n < l || n > h) {
tpl = s.long
n = tpl._1
s = tpl._2
}
r(Some(n), s)
} else {
val (n, s) = seed.long
r(Some(l + (n & 0x7fffffffffffffffL) % d), s)
}
}
}
private def chDbl(l: Double, h: Double)(p: P, seed: Seed): R[Double] = {
val d = h - l
if (d < 0) {
throw new IllegalBoundsError(l, h)
} else if (d > Double.MaxValue) {
val (x, seed2) = seed.long
if (x < 0) chDbl(l, 0d)(p, seed2) else chDbl(0d, h)(p, seed2)
} else if (d == 0) {
r(Some(l), seed)
} else {
val (n, s) = seed.double
r(Some(n * (h-l) + l), s)
}
}
implicit val chooseLong: Choose[Long] =
new Choose[Long] {
def choose(low: Long, high: Long): Gen[Long] =
if (low > high) throw new IllegalBoundsError(low, high)
else gen(chLng(low,high))
}
implicit val chooseInt: Choose[Int] =
Choose.xmap[Long, Int](_.toInt, _.toLong)
implicit val chooseShort: Choose[Short] =
Choose.xmap[Long, Short](_.toShort, _.toLong)
implicit val chooseChar: Choose[Char] =
Choose.xmap[Long, Char](_.toChar, _.toLong)
implicit val chooseByte: Choose[Byte] =
Choose.xmap[Long, Byte](_.toByte, _.toLong)
implicit val chooseDouble: Choose[Double] =
new Choose[Double] {
def choose(low: Double, high: Double) =
if (low > high) throw new IllegalBoundsError(low, high)
else if (low == Double.NegativeInfinity)
frequency(1 -> const(Double.NegativeInfinity),
9 -> choose(Double.MinValue, high))
else if (high == Double.PositiveInfinity)
frequency(1 -> const(Double.PositiveInfinity),
9 -> choose(low, Double.MaxValue))
else gen(chDbl(low,high))
}
implicit val chooseFloat: Choose[Float] =
Choose.xmap[Double, Float](_.toFloat, _.toDouble)
implicit val chooseFiniteDuration: Choose[FiniteDuration] =
Choose.xmap[Long, FiniteDuration](Duration.fromNanos, _.toNanos)
/** Transform a Choose[T] to a Choose[U] where T and U are two isomorphic
* types whose relationship is described by the provided transformation
* functions. (exponential functor map) */
def xmap[T, U](from: T => U, to: U => T)(implicit c: Choose[T]): Choose[U] =
new Choose[U] {
def choose(low: U, high: U): Gen[U] =
c.choose(to(low), to(high)).map(from)
}
}
//// Various Generator Combinators ////
/** A generator that always generates the given value */
implicit def const[T](x: T): Gen[T] = gen((p, seed) => r(Some(x), seed))
/** A generator that never generates a value */
def fail[T]: Gen[T] = gen((p, seed) => failed[T](seed))
/** A result that never contains a value */
private[scalacheck] def failed[T](seed0: Seed): R[T] =
new R[T] {
val result: Option[T] = None
override def sieve[U >: T]: U => Boolean = _ => false
val seed = seed0
}
/** A generator that generates a random value in the given (inclusive)
* range. If the range is invalid, an IllegalBoundsError exception will be
* thrown. */
def choose[T](min: T, max: T)(implicit c: Choose[T]): Gen[T] =
c.choose(min, max)
/** Sequences generators. If any of the given generators fails, the
* resulting generator will also fail. */
def sequence[C,T](gs: Traversable[Gen[T]])(implicit b: Buildable[T,C]): Gen[C] = {
val g = gen { (p, seed) =>
gs.foldLeft(r(Some(Vector.empty[T]), seed)) {
case (rs,g) =>
val rt = g.doApply(p, rs.seed)
rt.flatMap(t => rs.map(_ :+ t)).copy(sd = rt.seed)
}
}
g.map(b.fromIterable)
}
/** Monadic recursion on Gen
* This is a stack-safe loop that is the same as:
*
* {{{
*
* fn(a).flatMap {
* case Left(a) => tailRec(a)(fn)
* case Right(b) => Gen.const(b)
* }
*
* }}}
*
* which is useful for doing monadic loops without blowing up the
* stack
*/
def tailRecM[A, B](a0: A)(fn: A => Gen[Either[A, B]]): Gen[B] = {
@tailrec
def tailRecMR(a: A, seed: Seed, labs: Set[String])(fn: (A, Seed) => R[Either[A, B]]): R[B] = {
val re = fn(a, seed)
val nextLabs = labs | re.labels
re.retrieve match {
case None => r(None, re.seed).copy(l = nextLabs)
case Some(Right(b)) => r(Some(b), re.seed).copy(l = nextLabs)
case Some(Left(a)) => tailRecMR(a, re.seed, nextLabs)(fn)
}
}
// This is the "Reader-style" approach to making a stack-safe loop:
// we put one outer closure around an explicitly tailrec loop
gen[B] { (p: P, seed: Seed) =>
tailRecMR(a0, seed, Set.empty) { (a, seed) => fn(a).doApply(p, seed) }
}
}
/** Wraps a generator lazily. The given parameter is only evaluated once,
* and not until the wrapper generator is evaluated. */
def lzy[T](g: => Gen[T]): Gen[T] = {
lazy val h = g
gen { (p, seed) => h.doApply(p, seed) }
}
/** Wraps a generator for later evaluation. The given parameter is
* evaluated each time the wrapper generator is evaluated. */
def delay[T](g: => Gen[T]): Gen[T] =
gen { (p, seed) => g.doApply(p, seed) }
/** Creates a generator that can access its generation parameters */
def parameterized[T](f: Parameters => Gen[T]): Gen[T] =
gen { (p, seed) => f(p).doApply(p, seed) }
/** Creates a generator that can access its generation size */
def sized[T](f: Int => Gen[T]): Gen[T] =
gen { (p, seed) => f(p.size).doApply(p, seed) }
/** A generator that returns the current generation size */
lazy val size: Gen[Int] = sized { sz => sz }
/** Creates a resized version of a generator */
def resize[T](s: Int, g: Gen[T]) = gen((p, seed) => g.doApply(p.withSize(s), seed))
/** Picks a random value from a list. */
def oneOf[T](xs: Iterable[T]): Gen[T] =
if (xs.isEmpty) {
throw new IllegalArgumentException("oneOf called on empty collection")
} else {
val vector = xs.toVector
choose(0, vector.size - 1).map(vector(_))
}
/** Picks a random value from a list.
* @todo Remove this overloaded method in the next major release. See #438.
*/
def oneOf[T](xs: Seq[T]): Gen[T] =
oneOf(xs: Iterable[T])
/** Picks a random value from a list */
def oneOf[T](t0: T, t1: T, tn: T*): Gen[T] = oneOf(t0 +: t1 +: tn)
/** Picks a random generator from a list */
def oneOf[T](g0: Gen[T], g1: Gen[T], gn: Gen[T]*): Gen[T] = {
val gs = g0 +: g1 +: gn
choose(0,gs.size-1).flatMap(gs(_)).suchThat(x => gs.exists(_.sieveCopy(x)))
}
/** Makes a generator result optional. Either `Some(T)` or `None` will be provided. */
def option[T](g: Gen[T]): Gen[Option[T]] =
frequency(1 -> const(None), 9 -> some(g))
/** A generator that returns `Some(T)` */
def some[T](g: Gen[T]): Gen[Option[T]] =
g.map(Some.apply)
/** Generates a `Left` of `T` or a `Right` of `U` with equal probability. */
def either[T, U](gt: Gen[T], gu: Gen[U]): Gen[Either[T, U]] =
oneOf(gt.map(Left(_)), gu.map(Right(_)))
/** Chooses one of the given generators with a weighted random distribution */
def frequency[T](gs: (Int, Gen[T])*): Gen[T] = {
val filtered = gs.iterator.filter(_._1 > 0).toVector
if (filtered.isEmpty) {
throw new IllegalArgumentException("no items with positive weights")
} else {
var total = 0L
val builder = TreeMap.newBuilder[Long, Gen[T]]
filtered.foreach { case (weight, value) =>
total += weight
builder += ((total, value))
}
val tree = builder.result
choose(1L, total).flatMap(r => tree.rangeFrom(r).head._2).suchThat { x =>
gs.exists(_._2.sieveCopy(x))
}
}
}
/** Implicit convenience method for using the `frequency` method
* like this:
* {{{
* frequency((1, "foo"), (3, "bar"))
* }}}
*/
implicit def freqTuple[T](t: (Int,T)): (Int,Gen[T]) = (t._1, const(t._2))
//// List Generators ////
/** Generates a container of any Traversable type for which there exists an
* implicit [[org.scalacheck.util.Buildable]] instance. The elements in the
* container will be generated by the given generator. The size of the
* generated container is limited by `n`. Depending on what kind of container
* that is generated, the resulting container may contain fewer elements than
* `n`, but not more. If the given generator fails generating a value, the
* complete container generator will also fail. */
def buildableOfN[C,T](n: Int, g: Gen[T])(implicit
evb: Buildable[T,C], evt: C => Traversable[T]
): Gen[C] =
sequence[C,T](Traversable.fill(n)(g)) suchThat { c =>
// TODO: Can we guarantee c.size == n (See issue #89)?
evt(c).forall(g.sieveCopy)
}
/** Generates a container of any Traversable type for which there exists an
* implicit [[org.scalacheck.util.Buildable]] instance. The elements in the
* container will be generated by the given generator. The size of the
* container is bounded by the size parameter used when generating values. */
def buildableOf[C,T](g: Gen[T])(implicit
evb: Buildable[T,C], evt: C => Traversable[T]
): Gen[C] =
sized(s => choose(0, s max 0).flatMap(buildableOfN[C,T](_,g))) suchThat { c =>
if (c == null) g.sieveCopy(null) else evt(c).forall(g.sieveCopy)
}
/** Generates a non-empty container of any Traversable type for which there
* exists an implicit [[org.scalacheck.util.Buildable]] instance. The
* elements in the container will be generated by the given generator. The
* size of the container is bounded by the size parameter used when
* generating values. */
def nonEmptyBuildableOf[C,T](g: Gen[T])(implicit
evb: Buildable[T,C], evt: C => Traversable[T]
): Gen[C] =
sized(s => choose(1, s max 1).flatMap(buildableOfN[C,T](_,g))) suchThat(c => evt(c).size > 0)
/** A convenience method for calling `buildableOfN[C[T],T](n,g)`. */
def containerOfN[C[_],T](n: Int, g: Gen[T])(implicit
evb: Buildable[T,C[T]], evt: C[T] => Traversable[T]
): Gen[C[T]] = buildableOfN[C[T],T](n,g)
/** A convenience method for calling `buildableOf[C[T],T](g)`. */
def containerOf[C[_],T](g: Gen[T])(implicit
evb: Buildable[T,C[T]], evt: C[T] => Traversable[T]
): Gen[C[T]] = buildableOf[C[T],T](g)
/** A convenience method for calling `nonEmptyBuildableOf[C[T],T](g)`. */
def nonEmptyContainerOf[C[_],T](g: Gen[T])(implicit
evb: Buildable[T,C[T]], evt: C[T] => Traversable[T]
): Gen[C[T]] = nonEmptyBuildableOf[C[T],T](g)
/** Generates a list of random length. The maximum length depends on the
* size parameter. This method is equal to calling
* `containerOf[List,T](g)`. */
def listOf[T](g: => Gen[T]) = buildableOf[List[T],T](g)
/** Generates a non-empty list of random length. The maximum length depends
* on the size parameter. This method is equal to calling
* `nonEmptyContainerOf[List,T](g)`. */
def nonEmptyListOf[T](g: => Gen[T]) = nonEmptyBuildableOf[List[T],T](g)
/** Generates a list with at most the given number of elements. This method
* is equal to calling `containerOfN[List,T](n,g)`. */
def listOfN[T](n: Int, g: Gen[T]) = buildableOfN[List[T],T](n,g)
/** Generates a map of random length. The maximum length depends on the
* size parameter. This method is equal to calling
* <code>containerOf[Map,(T,U)](g)</code>. */
def mapOf[T,U](g: => Gen[(T,U)]) = buildableOf[Map[T,U],(T,U)](g)
/** Generates a non-empty map of random length. The maximum length depends
* on the size parameter. This method is equal to calling
* <code>nonEmptyContainerOf[Map,(T,U)](g)</code>. */
def nonEmptyMap[T,U](g: => Gen[(T,U)]) = nonEmptyBuildableOf[Map[T,U],(T,U)](g)
/** Generates a map with at most the given number of elements. This method
* is equal to calling <code>containerOfN[Map,(T,U)](n,g)</code>. */
def mapOfN[T,U](n: Int, g: Gen[(T,U)]) = buildableOfN[Map[T,U],(T,U)](n,g)
/** Generates an infinite stream. */
def infiniteStream[T](g: => Gen[T]): Gen[Stream[T]] = {
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = f(z) match {
case Some((h, s)) => h #:: unfold(s)(f)
case None => Stream.empty
}
gen { (p, seed0) =>
new R[Stream[T]] {
val result: Option[Stream[T]] = Some(unfold(seed0)(s => Some(g.pureApply(p, s) -> s.next)))
val seed: Seed = seed0.next
}
}
}
/** A generator that picks a random number of elements from a list */
def someOf[T](l: Iterable[T]) = choose(0,l.size).flatMap(pick(_,l))
/** A generator that picks a random number of elements from a list */
def someOf[T](g1: Gen[T], g2: Gen[T], gs: Gen[T]*) =
choose(0, gs.length+2).flatMap(pick(_, g1, g2, gs: _*))
/** A generator that picks at least one element from a list */
def atLeastOne[T](l: Iterable[T]) = {
require(l.size > 0, "There has to be at least one option to choose from")
choose(1,l.size).flatMap(pick(_,l))
}
/** A generator that picks at least one element from a list */
def atLeastOne[T](g1: Gen[T], g2: Gen[T], gs: Gen[T]*) =
choose(1, gs.length+2).flatMap(pick(_, g1, g2, gs: _*))
/** A generator that randomly picks a given number of elements from a list
*
* The elements are not guaranteed to be permuted in random order.
*/
def pick[T](n: Int, l: Iterable[T]): Gen[collection.Seq[T]] = {
if (n > l.size || n < 0) throw new IllegalArgumentException(s"invalid choice: $n")
else if (n == 0) Gen.const(Nil)
else gen { (p, seed0) =>
val buf = ArrayBuffer.empty[T]
val it = l.iterator
var seed = seed0
var count = 0
while (it.hasNext) {
val t = it.next
count += 1
if (count <= n) {
buf += t
} else {
val (x, s) = seed.long
val i = (x & 0x7fffffff).toInt % count
if (i < n) buf(i) = t
seed = s
}
}
r(Some(buf), seed)
}
}
/** A generator that randomly picks a given number of elements from a list
*
* The elements are not guaranteed to be permuted in random order.
*/
def pick[T](n: Int, g1: Gen[T], g2: Gen[T], gn: Gen[T]*): Gen[Seq[T]] = {
val gs = g1 +: g2 +: gn
pick(n, 0 until gs.size).flatMap(idxs =>
sequence[List[T],T](idxs.toList.map(gs(_)))
).suchThat(_.forall(x => gs.exists(_.sieveCopy(x))))
}
/** Takes a function and returns a generator that generates arbitrary
* results of that function by feeding it with arbitrarily generated input
* parameters. */
def resultOf[T,R0](f: T => R0)(implicit a: Arbitrary[T]): Gen[R0] =
arbitrary[T] map f
/** Creates a Function0 generator. */
def function0[A](g: Gen[A]): Gen[() => A] =
g.map(a => () => a)
//// Character Generators ////
/** Generates a numerical character */
def numChar: Gen[Char] = choose(48.toChar, 57.toChar)
/** Generates an upper-case alpha character */
def alphaUpperChar: Gen[Char] = choose(65.toChar, 90.toChar)
/** Generates a lower-case alpha character */
def alphaLowerChar: Gen[Char] = choose(97.toChar, 122.toChar)
/** Generates an alpha character */
def alphaChar = frequency((1,alphaUpperChar), (9,alphaLowerChar))
/** Generates an alphanumerical character */
def alphaNumChar = frequency((1,numChar), (9,alphaChar))
/** Generates a ASCII character, with extra weighting for printable characters */
def asciiChar: Gen[Char] = chooseNum(0, 127, 32 to 126:_*).map(_.toChar)
/** Generates a ASCII printable character */
def asciiPrintableChar: Gen[Char] = choose(32.toChar, 126.toChar)
/** Generates a character that can represent a valid hexadecimal digit. This
* includes both upper and lower case values.
*/
def hexChar: Gen[Char] =
Gen.oneOf(
Gen.oneOf("0123456789abcdef".toSeq),
Gen.oneOf("0123456789ABCDEF".toSeq)
)
//// String Generators ////
/** Generates a string that starts with a lower-case alpha character,
* and only contains alphanumerical characters */
def identifier: Gen[String] = (for {
c <- alphaLowerChar
cs <- listOf(alphaNumChar)
} yield (c::cs).mkString)
/** Generates a string of digits */
def numStr: Gen[String] =
listOf(numChar).map(_.mkString)
/** Generates a string of upper-case alpha characters */
def alphaUpperStr: Gen[String] =
listOf(alphaUpperChar).map(_.mkString)
/** Generates a string of lower-case alpha characters */
def alphaLowerStr: Gen[String] =
listOf(alphaLowerChar).map(_.mkString)
/** Generates a string of alpha characters */
def alphaStr: Gen[String] =
listOf(alphaChar).map(_.mkString)
/** Generates a string of alphanumerical characters */
def alphaNumStr: Gen[String] =
listOf(alphaNumChar).map(_.mkString)
/** Generates a string of ASCII characters, with extra weighting for printable characters */
def asciiStr: Gen[String] =
listOf(asciiChar).map(_.mkString)
/** Generates a string of ASCII printable characters */
def asciiPrintableStr: Gen[String] =
listOf(asciiPrintableChar).map(_.mkString)
/** Generates a string that can represent a valid hexadecimal digit. This
* includes both upper and lower case values.
*/
def hexStr: Gen[String] =
listOf(hexChar).map(_.mkString)
//// Number Generators ////
/** Generates positive numbers of uniform distribution, with an
* upper bound of the generation size parameter. */
def posNum[T](implicit num: Numeric[T], c: Choose[T]): Gen[T] = {
import num._
sized(n => c.choose(zero, max(fromInt(n), one)).suchThat(_ != zero))
}
/** Generates negative numbers of uniform distribution, with an
* lower bound of the negated generation size parameter. */
def negNum[T](implicit num: Numeric[T], c: Choose[T]): Gen[T] = {
import num._
sized(n => c.choose(min(-fromInt(n), -one), zero).suchThat(_ != zero))
}
/** Generates numbers within the given inclusive range, with
* extra weight on zero, +/- unity, both extremities, and any special
* numbers provided. The special numbers must lie within the given range,
* otherwise they won't be included. */
def chooseNum[T](minT: T, maxT: T, specials: T*)(
implicit num: Numeric[T], c: Choose[T]
): Gen[T] = {
import num._
val basics = List(minT, maxT, zero, one, -one)
val basicsAndSpecials = for {
t <- specials ++ basics if t >= minT && t <= maxT
} yield (1, const(t))
val other = (basicsAndSpecials.length, c.choose(minT, maxT))
val allGens = basicsAndSpecials :+ other
frequency(allGens: _*)
}
//// Misc Generators ////
/** Generates a version 4 (random) UUID. */
lazy val uuid: Gen[UUID] = for {
l1 <- Gen.choose(Long.MinValue, Long.MaxValue)
l2 <- Gen.choose(Long.MinValue, Long.MaxValue)
y <- Gen.oneOf('8', '9', 'a', 'b')
} yield UUID.fromString(
new UUID(l1,l2).toString.updated(14, '4').updated(19, y)
)
lazy val calendar: Gen[Calendar] = {
import Calendar._
def adjust(c: Calendar)(f: Calendar => Unit): Calendar = { f(c); c }
// We want to be sure we always initialize the calendar's time. By
// default, Calendar.getInstance uses the system time. We always
// overwrite it with a determinisitcally-generated time to be sure
// that calendar generation is also deterministic.
//
// We limit the time (in milliseconds) because extreme values will
// cause Calendar.getTime calls to fail. This range is relatively
// large but safe:
//
// -62135751600000 is 1 CE
// 64087186649116 is 4000 CE
val calendar: Gen[Calendar] =
Gen.chooseNum(-62135751600000L, 64087186649116L).map { t =>
adjust(Calendar.getInstance)(_.setTimeInMillis(t))
}
def yearGen(c: Calendar): Gen[Int] =
Gen.chooseNum(c.getGreatestMinimum(YEAR), c.getLeastMaximum(YEAR))
def moveToNearestLeapDate(c: Calendar, year: Int): Calendar = {
@tailrec def loop(y: Int): Calendar = {
c.set(YEAR, y)
if (c.getActualMaximum(DAY_OF_YEAR) > 365) c else loop(y + 1)
}
loop(if (year + 4 > c.getLeastMaximum(YEAR)) year - 5 else year)
}
val beginningOfDayGen: Gen[Calendar] =
calendar.map(c => adjust(c) { c =>
c.set(HOUR_OF_DAY, 0)
c.set(MINUTE, 0)
c.set(SECOND, 0)
c.set(MILLISECOND, 0)
})
val endOfDayGen: Gen[Calendar] =
calendar.map(c => adjust(c) { c =>
c.set(HOUR_OF_DAY, 23)
c.set(MINUTE, 59)
c.set(SECOND, 59)
c.set(MILLISECOND, 59)
})
val firstDayOfYearGen: Gen[Calendar] =
for { c <- calendar; y <- yearGen(c) } yield adjust(c)(_.set(y, JANUARY, 1))
val lastDayOfYearGen: Gen[Calendar] =
for { c <- calendar; y <- yearGen(c) } yield adjust(c)(_.set(y, DECEMBER, 31))
val closestLeapDateGen: Gen[Calendar] =
for { c <- calendar; y <- yearGen(c) } yield moveToNearestLeapDate(c, y)
val lastDayOfMonthGen: Gen[Calendar] =
calendar.map(c => adjust(c)(_.set(DAY_OF_MONTH, c.getActualMaximum(DAY_OF_MONTH))))
val firstDayOfMonthGen: Gen[Calendar] =
calendar.map(c => adjust(c)(_.set(DAY_OF_MONTH, 1)))
Gen.frequency(
(1, firstDayOfYearGen),
(1, lastDayOfYearGen),
(1, closestLeapDateGen),
(1, beginningOfDayGen),
(1, endOfDayGen),
(1, firstDayOfMonthGen),
(1, lastDayOfMonthGen),
(7, calendar))
}
val finiteDuration: Gen[FiniteDuration] =
// Duration.fromNanos doesn't allow Long.MinValue since it would create a
// duration that cannot be negated.
chooseNum(Long.MinValue + 1, Long.MaxValue).map(Duration.fromNanos)
/**
* Generates instance of Duration.
*
* In addition to `FiniteDuration` values, this can generate `Duration.Inf`,
* `Duration.MinusInf`, and `Duration.Undefined`.
*/
val duration: Gen[Duration] = frequency(
1 -> const(Duration.Inf),
1 -> const(Duration.MinusInf),
1 -> const(Duration.Undefined),
1 -> const(Duration.Zero),
6 -> finiteDuration)
}
| xuwei-k/scalacheck | src/main/scala/org/scalacheck/Gen.scala | Scala | bsd-3-clause | 35,970 |
// Hierarchical Labeled LDA
// tells Scala where to find the TMT classes
import scalanlp.io._;
import scalanlp.stage._;
import scalanlp.stage.text._;
import scalanlp.text.tokenize._;
import scalanlp.pipes.Pipes.global._;
import edu.stanford.nlp.tmt.stage._;
import edu.stanford.nlp.tmt.model.lda._;
import edu.stanford.nlp.tmt.model.llda._;
import edu.stanford.nlp.tmt.learn._;
val input_path = System.getenv("INPUT_HLLDA");
val model_path = System.getenv("OUTPUT_HLLDA");
val n_iterations = Integer.parseInt(System.getenv("NUMBER_OF_ITERATIONS"));
println("[HLLDA] Input Path: " + input_path);
println("[HLLDA] Model Path: " + model_path);
println("[HLLDA] Number of Iterations: " + n_iterations);
val source = CSVFile(input_path) ~> IDColumn(1);
val tokenizer = {
SimpleEnglishTokenizer() ~> // tokenize on space and punctuation
MinimumLengthFilter(0) // take terms with >=3 characters
}
val text = {
source ~> // read from the source file
Column(2) ~> // select column containing text
TokenizeWith(tokenizer) ~> // tokenize with tokenizer above
TermCounter() ~> // collect counts (needed below)
DocumentMinimumLengthFilter(0) // take only docs with >=5 terms
}
// display information about the loaded dataset
println("Description of the loaded text field:");
println(text.description);
println();
println("------------------------------------");
println();
// define fields from the dataset we are going to slice against
val labels = {
source ~> // read from the source file
Column(3) ~> // take column two, the year
TokenizeWith(WhitespaceTokenizer()) ~> // turns label field into an array
TermCounter() ~> // collect label counts
TermMinimumDocumentCountFilter(0) // filter labels in < 10 docs
}
println("Creating L-LDA dataset ...");
val dataset = LabeledLDADataset(text, labels);
// define the model parameters
println("Creating L-LDA parameters ...");
val modelParams = LabeledLDAModelParams(dataset);
// Name of the output model folder to generate
println("Creating model file ...");
val modelPath = file(model_path);
// Trains the model, writing to the given output path
println("Running model inference ...");
//TrainCVB0LabeledLDA(modelParams, dataset, output = modelPath, maxIterations = n_iterations);
val modeler = ThreadedModeler(CVB0LabeledLDA,20);
modeler.train(modelParams, dataset, modelPath, saveDataState = false, maxIterations = n_iterations);
val table : Iterable[(String,List[(Int,Double)])] = modeler.data.view.map(doc => (doc.id,doc.signature.activeIterator.toList));
CSVFile(modelPath, "document-topic-distributions.csv").write(table);
//return modeler.model.get;
//TrainCVB0LabeledLDA(modelParams, dataset, output = modelPath, maxIterations = 1000);
//TrainGibbsLabeledLDA(modelParams, dataset, output = modelPath, maxIterations = n_iterations);
| ypetinot/web-summarization | models/topic-models/hllda/src/hllda.scala | Scala | apache-2.0 | 3,010 |
/***********************************************************************
* Copyright (c) 2015-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.geomesa.nifi.processors.accumulo
import com.typesafe.scalalogging.LazyLogging
import org.apache.nifi.avro.AvroReader
import org.apache.nifi.json.JsonTreeReader
import org.apache.nifi.schema.access.SchemaAccessUtils
import org.apache.nifi.schema.inference.SchemaInferenceUtil
import org.apache.nifi.util.TestRunners
import org.geomesa.nifi.datastore.processor.mixins.{ConvertInputProcessor, DataStoreIngestProcessor, FeatureTypeProcessor}
import org.geomesa.nifi.datastore.processor.records.Properties
import org.geomesa.nifi.datastore.processor.{CompatibilityMode, RecordUpdateProcessor, Relationships}
import org.geotools.data.{DataStoreFinder, Transaction}
import org.junit.{Assert, Test}
import org.locationtech.geomesa.accumulo.MiniCluster
import org.locationtech.geomesa.accumulo.data.AccumuloDataStoreParams
import org.locationtech.geomesa.convert.ConverterConfigLoader
import org.locationtech.geomesa.convert2.SimpleFeatureConverter
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.features.avro.AvroDataFileWriter
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypeLoader, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.io.WithClose
import org.locationtech.geomesa.utils.text.WKTUtils
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.nio.charset.StandardCharsets
import java.text.SimpleDateFormat
import java.util.{Collections, Date}
class PutGeoMesaAccumuloTest extends LazyLogging {
import scala.collection.JavaConverters._
// we use class name to prevent spillage between unit tests
lazy val root = s"${MiniCluster.namespace}.${getClass.getSimpleName}"
// note the table needs to be different to prevent tests from conflicting with each other
lazy val dsParams: Map[String, String] = Map(
AccumuloDataStoreParams.InstanceIdParam.key -> MiniCluster.cluster.getInstanceName,
AccumuloDataStoreParams.ZookeepersParam.key -> MiniCluster.cluster.getZooKeepers,
AccumuloDataStoreParams.UserParam.key -> MiniCluster.Users.root.name,
AccumuloDataStoreParams.PasswordParam.key -> MiniCluster.Users.root.password
)
@Test
def testIngest(): Unit = {
val catalog = s"${root}Ingest"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftNameKey, "example")
runner.setProperty(ConvertInputProcessor.Properties.ConverterNameKey, "example-csv")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
} finally {
ds.dispose()
}
}
@Test
def testSpecValidation(): Unit = {
val catalog = s"${root}IngestSpec"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftSpec,
"fid:Int,name:String,age:Int,dtg:Date,geom:Point:srid=4326")
runner.setProperty(ConvertInputProcessor.Properties.ConverterNameKey, "example-csv")
runner.assertNotValid()
runner.setProperty(FeatureTypeProcessor.Properties.FeatureNameOverride, "example")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
} finally {
ds.dispose()
}
}
@Test
def testIngestConvertAttributes(): Unit = {
val catalog = s"${root}IngestConvertAttributes"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(ConvertInputProcessor.Properties.ConvertFlowFileAttributes, "true")
val attributes = new java.util.HashMap[String, String]()
attributes.put(FeatureTypeProcessor.Attributes.SftSpecAttribute, "example")
attributes.put(ConvertInputProcessor.Attributes.ConverterAttribute, "example-csv-attributes")
attributes.put("my.flowfile.attribute", "foobar")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"), attributes)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
Assert.assertEquals(Seq("foobar2", "foobar3", "foobar4"), features.map(_.getID).sorted)
} finally {
ds.dispose()
}
}
@Test
def testIngestConfigureAttributes(): Unit = {
val catalog = s"${root}IngestConfigureAttributes"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
val attributes = new java.util.HashMap[String, String]()
attributes.put(FeatureTypeProcessor.Attributes.SftSpecAttribute, "example")
attributes.put(ConvertInputProcessor.Attributes.ConverterAttribute, "example-csv")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"), attributes)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
} finally {
ds.dispose()
}
}
@Test
def testIngestConfigureAttributeOverride(): Unit = {
val catalog = s"${root}IngestConfigureAttributeOverride"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftNameKey, "example")
runner.setProperty(ConvertInputProcessor.Properties.ConverterNameKey, "example-csv")
val attributes = new java.util.HashMap[String, String]()
attributes.put(FeatureTypeProcessor.Attributes.SftNameAttribute, "renamed")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"), attributes)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("renamed")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("renamed").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
} finally {
ds.dispose()
}
}
@Test
def testAvroIngest(): Unit = {
val catalog = s"${root}AvroIngest"
val runner = TestRunners.newTestRunner(new AvroToPutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.FeatureNameOverride, "example")
runner.setProperty(DataStoreIngestProcessor.Properties.SchemaCompatibilityMode, CompatibilityMode.Exact.toString)
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example-csv.avro"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
runner.enqueue(getClass.getClassLoader.getResourceAsStream("bad-example-csv.avro"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 1)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
} finally {
ds.dispose()
}
}
@Test
def testAvroIngestByName(): Unit = {
val catalog = s"${root}AvroIngestByName"
// Let's make a new Avro file
val sft2 = SimpleFeatureTypes.createType("test2", "name:String,*geom:Point:srid=4326,dtg:Date")
val pt = WKTUtils.read("POINT(1.2 3.4)")
val date = new Date()
val baos = new ByteArrayOutputStream()
WithClose(new AvroDataFileWriter(baos, sft2)) { writer =>
val sf = new ScalaSimpleFeature(sft2, "sf2-record", Array("Ray", pt, date))
writer.append(sf)
writer.flush()
}
val is = new ByteArrayInputStream(baos.toByteArray)
val runner = TestRunners.newTestRunner(new AvroToPutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftNameKey, "example")
runner.setProperty(DataStoreIngestProcessor.Properties.SchemaCompatibilityMode, CompatibilityMode.Existing.toString)
runner.enqueue(is)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore(
(dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
Assert.assertEquals(5, sft.getAttributeCount)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
Assert.assertEquals(1, features.length)
logger.debug(features.mkString(";"))
Assert.assertEquals(Seq(null, "Ray", null, date, pt), features.head.getAttributes.asScala)
} finally {
ds.dispose()
}
}
@Test
def testAvroIngestWitNameOverride(): Unit = {
val catalog = s"${root}AvroIngestWithNameOverride"
// Let's make a new Avro file
val sft2 = SimpleFeatureTypes.createType("test2", "lastseen:Date,newField:Double,age:Int,name:String,*geom:Point:srid=4326")
val baos = new ByteArrayOutputStream()
val writer = new AvroDataFileWriter(baos, sft2)
val sf = new ScalaSimpleFeature(sft2, "sf2-record", Array(new Date(), new java.lang.Double(2.34), new Integer(34), "Ray", WKTUtils.read("POINT(1.2 3.4)")))
sf.getUserData().put("geomesa.feature.visibility", "admin")
writer.append(sf)
writer.flush()
writer.close()
val is = new ByteArrayInputStream(baos.toByteArray)
val runner = TestRunners.newTestRunner(new AvroToPutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.FeatureNameOverride, "example")
runner.setProperty(DataStoreIngestProcessor.Properties.SchemaCompatibilityMode, CompatibilityMode.Existing.toString)
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example-csv.avro"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
runner.enqueue(getClass.getClassLoader.getResourceAsStream("bad-example-csv.avro"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 2)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
runner.enqueue(is)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 3)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore(
(dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog) +
(AccumuloDataStoreParams.AuthsParam.key -> "admin")).asJava
)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
val ray = features.find(sf => sf.getAttribute("name") == "Ray")
Assert.assertTrue(ray.map(!_.getUserData.isEmpty).get)
logger.debug(features.mkString(";"))
Assert.assertEquals(7, features.length)
} finally {
ds.dispose()
}
}
@Test
def testAvroIngestWithSchema(): Unit = {
val catalog = s"${root}AvroIngestWithSchema"
val spec =
"""geomesa.sfts.example = {
| attributes = [
| { name = "name", type = "String", index = true }
| { name = "age", type = "Int" }
| { name = "dtg", type = "Date" }
| { name = "geom", type = "Point", srid = 4326 }
| ]
|}
|""".stripMargin
val runner = TestRunners.newTestRunner(new AvroToPutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.FeatureNameOverride, "example")
runner.setProperty(DataStoreIngestProcessor.Properties.SchemaCompatibilityMode, CompatibilityMode.Existing.toString)
runner.setProperty(FeatureTypeProcessor.Properties.SftSpec, spec)
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example-csv.avro"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
val attributes = features.head.getFeatureType.getAttributeDescriptors.asScala.map(_.getLocalName)
Assert.assertEquals(Seq("name", "age", "dtg", "geom"), attributes)
} finally {
ds.dispose()
}
}
@Test
def testRecordIngest(): Unit = {
val catalog = s"${root}RecordIngest"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumuloRecord())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
val service = new AvroReader()
runner.addControllerService("avro-record-reader", service)
runner.setProperty(service, SchemaAccessUtils.SCHEMA_ACCESS_STRATEGY, "embedded-avro-schema")
runner.enableControllerService(service)
runner.setProperty(Properties.RecordReader, "avro-record-reader")
runner.setProperty(Properties.FeatureIdCol, "__fid__")
runner.setProperty(Properties.GeometryCols, "*geom:Point")
runner.setProperty(Properties.GeometrySerializationDefaultWkt, "WKB")
runner.setProperty(Properties.VisibilitiesCol, "Vis")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.avro"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
Assert.assertEquals(10, sft.getAttributeCount)
Assert.assertEquals(
Seq("__version__", "Name", "Age", "LastSeen", "Friends", "Skills", "Lon", "Lat", "geom", "__userdata__"),
sft.getAttributeDescriptors.asScala.map(_.getLocalName))
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList.sortBy(_.getID)
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
Assert.assertEquals(
Seq("02c42fc5e8db91d9b8165c6014a23cb0", "6a6e2089854ec6e20015d1c4857b1d9b", "dbd0cf6fc8b5d3d4c54889a493bd5d12"),
features.map(_.getID)
)
Assert.assertEquals(
Seq(
"POINT (-100.23650360107422 23)",
"POINT (3 -62.22999954223633)",
"POINT (40.231998443603516 -53.235599517822266)"
).map(WKTUtils.read),
features.map(_.getAttribute("geom"))
)
Assert.assertEquals(
Seq(
Collections.singletonMap("geomesa.feature.visibility", "user"),
Collections.singletonMap("geomesa.feature.visibility", "user&admin"),
Collections.singletonMap("geomesa.feature.visibility", "user")
),
features.map(_.getUserData)
)
} finally {
ds.dispose()
}
}
@Test
def testRecordIngestFlowFileAttributes(): Unit = {
val catalog = s"${root}RecordIngestAttributes"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumuloRecord())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
val service = new AvroReader()
runner.addControllerService("avro-record-reader", service)
runner.setProperty(service, SchemaAccessUtils.SCHEMA_ACCESS_STRATEGY, "embedded-avro-schema")
runner.enableControllerService(service)
runner.setProperty(Properties.RecordReader, "avro-record-reader")
runner.setProperty(Properties.TypeName, "${type-name}")
runner.setProperty(Properties.FeatureIdCol, "${id-col}")
runner.setProperty(Properties.GeometryCols, "${geom-cols}")
runner.setProperty(Properties.GeometrySerializationDefaultWkt, "WKB")
runner.setProperty(Properties.VisibilitiesCol, "${vis-col}")
val attributes = new java.util.HashMap[String, String]()
attributes.put("type-name", "attributes")
attributes.put("id-col", "__fid__")
attributes.put("geom-cols", "*geom:Point")
attributes.put("vis-col", "Vis")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.avro"), attributes)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("attributes")
Assert.assertNotNull(sft)
Assert.assertEquals(10, sft.getAttributeCount)
Assert.assertEquals(
Seq("__version__", "Name", "Age", "LastSeen", "Friends", "Skills", "Lon", "Lat", "geom", "__userdata__"),
sft.getAttributeDescriptors.asScala.map(_.getLocalName))
val features = SelfClosingIterator(ds.getFeatureSource("attributes").getFeatures.features()).toList.sortBy(_.getID)
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
Assert.assertEquals(
Seq("02c42fc5e8db91d9b8165c6014a23cb0", "6a6e2089854ec6e20015d1c4857b1d9b", "dbd0cf6fc8b5d3d4c54889a493bd5d12"),
features.map(_.getID)
)
Assert.assertEquals(
Seq(
"POINT (-100.23650360107422 23)",
"POINT (3 -62.22999954223633)",
"POINT (40.231998443603516 -53.235599517822266)"
).map(WKTUtils.read),
features.map(_.getAttribute("geom"))
)
Assert.assertEquals(
Seq(
Collections.singletonMap("geomesa.feature.visibility", "user"),
Collections.singletonMap("geomesa.feature.visibility", "user&admin"),
Collections.singletonMap("geomesa.feature.visibility", "user")
),
features.map(_.getUserData)
)
} finally {
ds.dispose()
}
}
@Test
def testUpdateIngest(): Unit = {
val catalog = s"${root}UpdateIngest"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
def checkResults(ids: Seq[String], names: Seq[String], dates: Seq[Date], geoms: Seq[String]): Unit = {
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList.sortBy(_.getID)
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
Assert.assertEquals(ids, features.map(_.getID))
Assert.assertEquals(names, features.map(_.getAttribute("name")))
Assert.assertEquals(dates, features.map(_.getAttribute("dtg")))
Assert.assertEquals(geoms, features.map(_.getAttribute("geom").toString))
} finally {
ds.dispose()
}
}
val df = new SimpleDateFormat("yyyy-MM-dd")
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftNameKey, "example")
runner.setProperty(ConvertInputProcessor.Properties.ConverterNameKey, "example-csv")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq("23623", "26236", "3233"),
Seq("Harry", "Hermione", "Severus"),
Seq("2015-05-06", "2015-06-07", "2015-10-23").map(df.parse),
Seq("POINT (-100.2365 23)", "POINT (40.232 -53.2356)", "POINT (3 -62.23)")
)
runner.setProperty(DataStoreIngestProcessor.Properties.WriteMode, DataStoreIngestProcessor.ModifyMode)
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example-update.csv"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 2)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq("23623", "26236", "3233"),
Seq("Harry Potter", "Hermione Granger", "Severus Snape"),
Seq("2016-05-06", "2016-06-07", "2016-10-23").map(df.parse),
Seq("POINT (-100.2365 33)", "POINT (40.232 -43.2356)", "POINT (3 -52.23)")
)
// verify update by attribute
runner.setProperty(DataStoreIngestProcessor.Properties.ModifyAttribute, "age")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example-update-2.csv"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 3)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq("23624", "26236", "3233"),
Seq("Harry", "Hermione Granger", "Severus Snape"),
Seq("2016-05-06", "2016-06-07", "2016-10-23").map(df.parse),
Seq("POINT (-100.2365 33)", "POINT (40.232 -43.2356)", "POINT (3 -52.23)")
)
} finally {
runner.shutdown()
}
}
@Test
def testAppendThenUpdateIngest(): Unit = {
val catalog = s"${root}AppendUpdateIngest"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
def checkResults(ages: Seq[Int], dates: Seq[Date], geoms: Seq[String]): Unit = {
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList.sortBy(_.getID)
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
Assert.assertEquals(Seq("23623", "26236", "3233"), features.map(_.getID))
Assert.assertEquals(Seq("Harry", "Hermione", "Severus"), features.map(_.getAttribute("name")))
Assert.assertEquals(ages, features.map(_.getAttribute("age")))
Assert.assertEquals(dates, features.map(_.getAttribute("dtg")))
Assert.assertEquals(geoms, features.map(_.getAttribute("geom").toString))
} finally {
ds.dispose()
}
}
val df = new SimpleDateFormat("yyyy-MM-dd")
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftNameKey, "example")
runner.setProperty(ConvertInputProcessor.Properties.ConverterNameKey, "example-csv")
runner.setProperty(DataStoreIngestProcessor.Properties.WriteMode, "${geomesa.write.mode}")
runner.setProperty(DataStoreIngestProcessor.Properties.ModifyAttribute, "${geomesa.update.attribute}")
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"))
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq(20, 25, 30),
Seq("2015-05-06", "2015-06-07", "2015-10-23").map(df.parse),
Seq("POINT (-100.2365 23)", "POINT (40.232 -53.2356)", "POINT (3 -62.23)")
)
runner.enqueue(
getClass.getClassLoader.getResourceAsStream("example-update-3.csv"),
Map("geomesa.write.mode" -> "modify", "geomesa.update.attribute" -> "name").asJava)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 2)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq(21, 26, 31),
Seq("2016-05-06", "2016-06-07", "2016-10-23").map(df.parse),
Seq("POINT (-100.2365 24)", "POINT (40.232 -52.2356)", "POINT (3 -61.23)")
)
} finally {
runner.shutdown()
}
}
@Test
def testUpdateRecord(): Unit = {
val catalog = s"${root}UpdateRecord"
val runner = TestRunners.newTestRunner(new UpdateGeoMesaAccumuloRecord())
val df = new SimpleDateFormat("yyyy-MM-dd")
try {
val params = dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)
WithClose(DataStoreFinder.getDataStore(params.asJava)) { ds =>
// create the sft and ingest 3 features
val sft = SimpleFeatureTypeLoader.sftForName("example").get
ds.createSchema(sft)
val converterConf = ConverterConfigLoader.configForName("example-csv")
val features = WithClose(SimpleFeatureConverter(sft, converterConf.get)) { converter =>
WithClose(converter.process(getClass.getClassLoader.getResourceAsStream("example.csv")))(_.toList)
}
WithClose(ds.getFeatureWriterAppend("example", Transaction.AUTO_COMMIT)) { writer =>
features.foreach(FeatureUtils.write(writer, _))
}
def checkResults(ids: Seq[String], names: Seq[String], dates: Seq[Date], geoms: Seq[String]): Unit = {
val results = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList.sortBy(_.getID)
logger.debug(results.mkString(";"))
Assert.assertEquals(3, results.length)
Assert.assertEquals(ids, results.map(_.getID))
Assert.assertEquals(names, results.map(_.getAttribute("name")))
Assert.assertEquals(dates, results.map(_.getAttribute("dtg")))
Assert.assertEquals(geoms, results.map(_.getAttribute("geom").toString))
}
// verify initial write
checkResults(
Seq("23623", "26236", "3233"),
Seq("Harry", "Hermione", "Severus"),
Seq("2015-05-06", "2015-06-07", "2015-10-23").map(df.parse),
Seq("POINT (-100.2365 23)", "POINT (40.232 -53.2356)", "POINT (3 -62.23)")
)
// configure the processor to use json
val service = new JsonTreeReader()
runner.addControllerService("json-record-reader", service)
runner.setProperty(service, SchemaAccessUtils.SCHEMA_ACCESS_STRATEGY, SchemaInferenceUtil.INFER_SCHEMA)
runner.enableControllerService(service)
params.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(Properties.RecordReader, "json-record-reader")
runner.setProperty(Properties.TypeName, "${type-name}")
runner.setProperty(Properties.FeatureIdCol, "${id-col}")
runner.setProperty(RecordUpdateProcessor.Properties.LookupCol, "${id-col}")
// update one name
runner.enqueue(
new ByteArrayInputStream("""{"fid":"23623","name":"Harry Potter"}""".getBytes(StandardCharsets.UTF_8)),
Map("type-name"-> "example", "id-col" -> "fid").asJava)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 1)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq("23623", "26236", "3233"),
Seq("Harry Potter", "Hermione", "Severus"),
Seq("2015-05-06", "2015-06-07", "2015-10-23").map(df.parse),
Seq("POINT (-100.2365 23)", "POINT (40.232 -53.2356)", "POINT (3 -62.23)")
)
// update the name and feature id based on matching on age
runner.setProperty(RecordUpdateProcessor.Properties.LookupCol, "${match-col}")
runner.enqueue(
new ByteArrayInputStream("""{"fid":"26237","name":"Hermione Granger","age":25}""".getBytes(StandardCharsets.UTF_8)),
Map("type-name"-> "example", "id-col" -> "fid", "match-col" -> "age").asJava)
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, 2)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
checkResults(
Seq("23623", "26237", "3233"),
Seq("Harry Potter", "Hermione Granger", "Severus"),
Seq("2015-05-06", "2015-06-07", "2015-10-23").map(df.parse),
Seq("POINT (-100.2365 23)", "POINT (40.232 -53.2356)", "POINT (3 -62.23)")
)
}
} finally {
runner.shutdown()
}
}
@Test
def testIngestCounts(): Unit = {
val catalog = s"${root}Counts"
val runner = TestRunners.newTestRunner(new PutGeoMesaAccumulo())
try {
dsParams.foreach { case (k, v) => runner.setProperty(k, v) }
runner.setProperty(AccumuloDataStoreParams.CatalogParam.key, catalog)
runner.setProperty(FeatureTypeProcessor.Properties.SftNameKey, "example")
runner.setProperty(ConvertInputProcessor.Properties.ConverterNameKey, "example-csv")
var i = 0
while (i < 3) {
runner.enqueue(getClass.getClassLoader.getResourceAsStream("example.csv"))
i += 1
}
runner.run()
runner.assertTransferCount(Relationships.SuccessRelationship, i)
runner.assertTransferCount(Relationships.FailureRelationship, 0)
while (i > 0) {
i -= 1
val output = runner.getFlowFilesForRelationship(Relationships.SuccessRelationship).get(i)
output.assertAttributeEquals(org.geomesa.nifi.datastore.processor.Attributes.IngestSuccessCount, "3")
output.assertAttributeEquals(org.geomesa.nifi.datastore.processor.Attributes.IngestFailureCount, "0")
}
} finally {
runner.shutdown()
}
val ds = DataStoreFinder.getDataStore((dsParams + (AccumuloDataStoreParams.CatalogParam.key -> catalog)).asJava)
Assert.assertNotNull(ds)
try {
val sft = ds.getSchema("example")
Assert.assertNotNull(sft)
val features = SelfClosingIterator(ds.getFeatureSource("example").getFeatures.features()).toList
logger.debug(features.mkString(";"))
Assert.assertEquals(3, features.length)
} finally {
ds.dispose()
}
}
}
| geomesa/geomesa-nifi | geomesa-accumulo-bundle/geomesa-accumulo-processors/src/test/scala/org/geomesa/nifi/processors/accumulo/PutGeoMesaAccumuloTest.scala | Scala | apache-2.0 | 35,243 |
/*
* Copyright 2017 Georgi Krastev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink
package api.scala.derived.typeutils
import api.common.ExecutionConfig
import api.common.typeinfo.TypeInformation
import api.common.typeutils.TypeSerializer
import scala.reflect.ClassTag
/** `TypeInformation` for type `A` based on an injection into type `B`. */
case class InjectTypeInfo[A, B](underlying: TypeInformation[B])
(inj: Inject[A, B])(implicit tag: ClassTag[A]) extends TypeInformation[A] {
def isBasicType: Boolean =
underlying.isBasicType
def isKeyType: Boolean =
underlying.isKeyType
def isTupleType: Boolean =
underlying.isTupleType
def getArity: Int =
underlying.getArity
def getTotalFields: Int =
underlying.getTotalFields
def getTypeClass: Class[A] =
tag.runtimeClass.asInstanceOf[Class[A]]
def createSerializer(config: ExecutionConfig): TypeSerializer[A] =
InjectSerializer(underlying.createSerializer(config))(inj)
override def toString: String =
getTypeClass.getTypeName
}
| joroKr21/flink-shapeless | src/main/scala/org/apache/flink/api/scala/derived/typeutils/InjectTypeInfo.scala | Scala | apache-2.0 | 1,579 |
import com.kolor.docker.api._
import com.kolor.docker.api.entities._
import scala.concurrent.duration.Duration
import org.specs2.specification._
import java.util.concurrent.TimeUnit._
import scala.concurrent._
import org.specs2.Specification
import org.specs2.execute.AsResult
import play.api.libs.json._
import org.slf4j.LoggerFactory
import play.api.libs.iteratee.Iteratee
import scala.concurrent.ExecutionContext.Implicits.global
package object test {
private val log = LoggerFactory.getLogger(getClass())
class DockerContext extends Scope {
implicit lazy val docker: DockerClient = Docker("localhost", 2375)
implicit val timeout = Duration.create(60, SECONDS)
}
case class Image(imageCmd: Seq[String], imageTag: RepositoryTag) {
def imageName = imageTag.repo
}
case class Container(containerId: ContainerId, containerName: String, image: RepositoryTag, imageCmd: Seq[String])
trait DockerEnv[T] extends AroundOutside[T] {
implicit val docker = Docker("localhost", 2375)
implicit val timeout = Duration.create(60, SECONDS)
}
/**
* provides a spec2 Around context with a basic busybox image
*/
def image:DockerEnv[Image] = new DockerEnv[Image] {
val cmd = Seq("/bin/sh", "-c", "while true; do echo hello world; sleep 1; done")
val env = new Image(cmd, RepositoryTag.create("busybox", Some("latest")))
// create a context
def around[T : AsResult](t: =>T) = {
try {
log.info(s"prepare image context - pulling busybox:latest ...")
Await.result(docker.imageCreateIteratee(env.imageTag)(Iteratee.ignore).flatMap(_.run), timeout)
AsResult(t)
} finally {
//Await.result(docker.imageRemove(env.imageName), timeout)
log.info(s"shutdown & cleaned up image context")
}
}
// prepare a valid ImageEnv
def outside: Image = env
}
/**
* provides a spec2 Around context with a created but nut running container
*/
def container:DockerEnv[Container] = new DockerEnv[Container] {
val env = {
val cmd = Seq("/bin/sh", "-c", "while true; do echo hello world; sleep 1; done")
val containerName = "reactive-docker"
val imageTag = RepositoryTag.create("busybox", Some("latest"))
val cfg = ContainerConfig("busybox", cmd)
log.info(s"prepare container context - pulling busybox:latest ...")
Await.result(docker.imageCreateIteratee(imageTag)(Iteratee.ignore).flatMap(_.run), timeout)
implicit val fmt:Format[ContainerConfiguration] = com.kolor.docker.api.json.Formats.containerConfigFmt
log.info(s"prepare container context - creating container $containerName (cmd: ${cmd.mkString})")
val containerId = Await.result(docker.containerCreate("busybox", cfg, Some(containerName)), timeout)._1
log.info(s"prepare container context - container ready with $containerId")
new Container(containerId, containerName, imageTag, cmd)
}
// create a context
def around[T : AsResult](t: =>T) = {
try {
AsResult(t)
} finally {
try {
Await.result(docker.containerStop(env.containerId, 10), timeout)
Await.result(docker.containerRemove(env.containerId, true), timeout)
//Await.result(docker.imageRemove("busybox"), timeout)
} catch {
case t:Throwable => // ignore
} finally {
log.info(s"shutdown & cleaned up container context")
}
}
}
// prepare a valid Container env
def outside: Container = env
}
/**
* provides a spec2 Around context with a created but not running ubuntu container
*/
def ubuntu:DockerEnv[Container] = new DockerEnv[Container] {
val env = {
val cmd = Seq("/bin/bash")
val containerName = "reactive-docker"
val imageTag = RepositoryTag.create("ubuntu", Some("latest"))
val cfg = ContainerConfiguration(image=Some(imageTag.repo), cmd=Some(cmd), openStdin=Some(true) )
log.info(s"prepare container context - pulling ubuntu:latest ...")
Await.result(docker.imageCreateIteratee(imageTag)(Iteratee.ignore).flatMap(_.run), timeout)
implicit val fmt:Format[ContainerConfiguration] = com.kolor.docker.api.json.Formats.containerConfigFmt
log.info(s"prepare container context - creating container $containerName (cmd: ${cmd.mkString})")
val containerId = Await.result(docker.containerCreate("ubuntu", cfg, Some(containerName)), timeout)._1
log.info(s"prepare container context - container ready with $containerId")
new Container(containerId, containerName, imageTag, cmd)
}
// create a context
def around[T : AsResult](t: =>T) = {
try {
AsResult(t)
} finally {
try {
Await.result(docker.containerStop(env.containerId, 10), timeout)
Await.result(docker.containerRemove(env.containerId, true), timeout)
// Await.result(docker.imageRemove("busybox"), timeout)
} catch {
case t:Throwable => // ignore
} finally {
log.info(s"shutdown & cleaned up container context")
}
}
}
// prepare a valid Container env
def outside: Container = env
}
/**
* provides a spec2 Around context with a running container
*/
def runningContainer:DockerEnv[Container] = new DockerEnv[Container] {
val env = {
val cmd = Seq("/bin/sh", "-c", "while true; do echo hello world && echo hello stderr >&2; sleep 1; done")
val containerName = "reactive-docker"
val imageTag = RepositoryTag.create("busybox", Some("latest"))
val cfg = ContainerConfig("busybox", cmd)
log.info(s"prepare runningContainer context - pulling busybox:latest ...")
Await.result(docker.imageCreateIteratee(imageTag)(Iteratee.ignore).flatMap(_.run), timeout)
implicit val fmt:Format[ContainerConfiguration] = com.kolor.docker.api.json.Formats.containerConfigFmt
log.info(s"prepare runningContainer context - creating container $containerName (cmd: ${cmd.mkString})")
val containerId = Await.result(docker.containerCreate("busybox", cfg, Some(containerName)), timeout)._1
log.info(s"prepare runningContainer context - container ready with $containerId, starting ...")
implicit val hostFmt: Format[ContainerHostConfiguration] = com.kolor.docker.api.json.Formats.containerHostConfigFmt
Await.result(docker.containerStart(containerId), timeout)
log.info(s"prepare runningContainer context - container $containerId running")
new Container(containerId, containerName, imageTag, cmd)
}
// create a context
def around[T : AsResult](t: =>T) = {
try {
AsResult(t)
} finally {
try {
Await.result(docker.containerStop(env.containerId, 10), timeout)
Await.result(docker.containerRemove(env.containerId, true), timeout)
//Await.result(docker.imageRemove("busybox"), timeout)
} catch {
case t:Throwable => // ignore
} finally {
log.info(s"shutdown & cleaned up runningContainer context")
}
}
}
// prepare a valid Container env
def outside: Container = env
}
/**
* provides a spec2 Around context with a complex / full blown running container
*/
def complexContainer:DockerEnv[Container] = new DockerEnv[Container] {
val env = {
val cmd = Seq("/bin/sh", "-c", "while true; do echo hello world && echo hello stderr >&2; sleep 1; done")
val containerName = "reactive-docker"
val imageTag = RepositoryTag.create("busybox", Some("latest"))
val cfg = ContainerConfig("busybox", cmd)
log.info(s"prepare runningComplexContainer context - pulling busybox:latest ...")
Await.result(docker.imageCreateIteratee(imageTag)(Iteratee.ignore).flatMap(_.run), timeout)
implicit val fmt:Format[ContainerConfiguration] = com.kolor.docker.api.json.Formats.containerConfigFmt
log.info(s"prepare runningComplexContainer context - creating container $containerName (cmd: ${cmd.mkString}) (cfg: ${cfg})")
val containerId = Await.result(docker.containerCreate("busybox", cfg, Some(containerName)), timeout)._1
log.info(s"prepare runningComplexContainer context - creating container $containerName (cmd: ${cmd.mkString})")
implicit val hostFmt: Format[ContainerHostConfiguration] = com.kolor.docker.api.json.Formats.containerHostConfigFmt
Await.result(docker.containerStart(containerId), timeout)
log.info(s"prepare runningComplexContainer context - container $containerId running")
new Container(containerId, containerName, imageTag, cmd)
}
// create a context
def around[T : AsResult](t: =>T) = {
try {
AsResult(t)
} finally {
try {
Await.result(docker.containerStop(env.containerId, 10), timeout)
Await.result(docker.containerRemove(env.containerId, true), timeout)
//Await.result(docker.imageRemove("busybox"), timeout)
} catch {
case t:Throwable => // ignore
} finally {
log.info(s"shutdown & cleaned up runningComplexContainer context")
}
}
}
// prepare a valid Container env
def outside: Container = env
}
} | waveinch/reactive-docker | src/test/scala/test/package.scala | Scala | mit | 9,080 |
package geostat
import scala.math._
object MapPoint extends WGS84 {
val MIN_LATITUDE = -90.0
val MAX_LATITUDE = +90.0
val MIN_LONGITUDE = -180
val MAX_LONGITUDE = +180
private val MIN_DISTANCE = 1e-1
/**
* Convert latitude/longitude coordinate to 3D Cartesian coordinate
*
* @param latitude latitude
* @param longitude longitude
* @return the 3D Cartesian coordinate
*/
def geodesic2cart(pt: MapPoint): (Double, Double, Double, Double) = {
val lat = pt.latitude.toRadians
val lon = pt.longitude.toRadians
val slat = sin(lat)
val clat = cos(lat)
val slong = sin(lon)
val clong = cos(lon)
(R * clat * clong, R * clat * slong, R * slat, pt.value)
}
/**
* Convert 3D Cartesian coordinate to latitude/longitude coordinate
*
* @param latitude latitude
* @param longitude longitude
* @return the 3D Cartesian coordinate
*/
def cart2geodesic(cart: (Double, Double, Double, Double)) = new MapPoint(asin(cart._3 / R).toDegrees.toFloat, atan2(cart._2, cart._1).toDegrees, cart._4)
private def latitude2int(latitude: Double) = floor((latitude - MapPoint.MIN_LATITUDE) * 1e6f).toInt
private def longitude2int(longitude: Double) = floor((longitude - MapPoint.MIN_LONGITUDE) * 1e6f).toInt
private def zorder(a: Int, b: Int): Long = {
var x: Long = a & 0x7fffffffL
x = (x ^ (x << 32)) & 0x00000000ffffffffL
x = (x ^ (x << 16)) & 0x0000ffff0000ffffL
x = (x ^ (x << 8)) & 0x00ff00ff00ff00ffL
x = (x ^ (x << 4)) & 0x0f0f0f0f0f0f0f0fL
x = (x ^ (x << 2)) & 0x3333333333333333L
x = (x ^ (x << 1)) & 0x5555555555555555L
var y: Long = b & 0x7fffffffL
y = (y ^ (y << 32)) & 0x00000000ffffffffL
y = (y ^ (y << 16)) & 0x0000ffff0000ffffL
y = (y ^ (y << 8)) & 0x00ff00ff00ff00ffL
y = (y ^ (y << 4)) & 0x0f0f0f0f0f0f0f0fL
y = (y ^ (y << 2)) & 0x3333333333333333L
y = (y ^ (y << 1)) & 0x5555555555555555L
x | (y << 1)
}
}
/**
* MapPoint class
*
* @param latitude latitude
* @param longitude longitude
* @param value value
*/
@SerialVersionUID(123L)
class MapPoint(val latitude: Double = 0.0, val longitude: Double = 0.0, var value: Double = Double.NaN)
extends WGS84 with Serializable with Ordered[MapPoint] {
import MapPoint._
require(latitude >= MIN_LATITUDE && latitude <= MAX_LATITUDE)
require(longitude >= MIN_LONGITUDE && longitude <= MAX_LONGITUDE)
val key: Long = zorder(latitude2int(latitude), longitude2int(longitude)) // Z-order key
private def hav(theta: Double) = { val h = sin(0.5 * theta); h * h }
/**
* MapPoint constructor from a tuple
*
* @param cart 3D Cartesian coordinate and value tuple
*/
def this(cart: (Double, Double, Double, Double)) = this(asin(cart._3 / 6371008.77141).toDegrees, atan2(cart._2, cart._1).toDegrees, cart._4);
/**
* Haversine distance
*
* @param pt point
* @return return the distance between the point p and the object point
*/
def haversineDistance(pt: MapPoint) = {
val lat1 = latitude.toRadians
val lon1 = longitude.toRadians
val lat2 = pt.latitude.toRadians
val lon2 = pt.longitude.toRadians
val dlat = lat2 - lat1
val dlong = lon2 - lon1
2.0 * R * sqrt(hav(dlat) + cos(lat1) * cos(lat2) * hav(dlong))
}
/**
* Great-circle distance
*
* @param pt point
* @return return the distance between the point p and the object point
*/
def greatCircleDistance(pt: MapPoint) = {
val lat1 = latitude.toRadians
val lon1 = longitude.toRadians
val lat2 = pt.latitude.toRadians
val lon2 = pt.longitude.toRadians
val dlon = lon2 - lon1
R * acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(dlon))
}
/**
* Given a start point and a distance d along a constant bearing, this will calculate the destination point.
*
* @param distance distance expressed in meters
* @param bearing bearing expressed in degrees
*/
def destination(distance: Double, bearing: Double): MapPoint = {
val lat1 = latitude.toRadians
val lon1 = longitude.toRadians
val brng = bearing.toRadians
val lat2 = asin(sin(lat1) * cos(distance / R) + cos(lat1) * math.sin(distance / R) * cos(brng))
val lon2 = lon1 + atan2(sin(brng) * sin(distance / R) * cos(lat1),
cos(distance / R) - sin(lat1) * sin(lat2))
return new MapPoint(lat2.toDegrees, lon2.toDegrees)
}
/**
* This method calculate initial bearing which if followed in a straight line
* along a great-circle arc will take you from the start point to the end point.
*
* @param pt end point
* @return bearing
*/
def bearing(pt: MapPoint) = {
val lat1 = latitude.toRadians
val lon1 = longitude.toRadians
val lat2 = pt.latitude.toRadians
val lon2 = pt.longitude.toRadians
val clat2 = cos(lat2)
val dlon = lon2 - lon1
val y = sin(dlon) * cos(lat2)
val x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dlon)
val ang = atan2(y, x).toDegrees
if (ang < 0.0) 360.0 + ang else ang
}
/**
* Convert latitude/longitude coordinate to 3D Cartesian coordinate
*
* @param latitude latitude
* @param longitude longitude
* @return the 3D Cartesian coordinate and value
*/
def geodetic2cart(): (Double, Double, Double, Double) = {
val lat = latitude.toRadians
val lon = longitude.toRadians
val slat = sin(lat)
val clat = cos(lat)
val slong = sin(lon)
val clong = cos(lon)
(R * clat * clong, R * clat * slong, R * slat, value)
}
/**
* Calculate the half-way point along a great circle path between the two points
*
* @param pt point
* @return midpoint
*/
def midpoint(pt: MapPoint): MapPoint = {
val pt1 = MapPoint.geodesic2cart(this)
val pt2 = MapPoint.geodesic2cart(pt)
var m = (0.5 * (pt1._1 + pt2._1),
0.5 * (pt1._2 + pt2._2),
0.5 * (pt1._3 + pt2._3),
0.5 * (pt1._4 + pt2._4))
MapPoint.cart2geodesic(m)
}
/**
* Cross Track Distance compute the distance from the point pt and the segment passing throw
* this point and the ptdest point
*
* @param ptdest destination point
* @param pt third point
* @return cross track distance
*/
def crossTrackDistence(ptdest: MapPoint, pt: MapPoint) =
asin(sin(greatCircleDistance(pt) * sin(bearing(pt) - bearing(ptdest))))
/**
* Along Track Distances is the distance from the start point (this) to the closest
* point on the path to a third point pt, following a great circle path defined by this point and ptdest.
* MIN_DISTANCE
* @param ptdest destination point
* @param pt third point
* @return along track distance
*/
def alongTrackDistance(ptdest: MapPoint, pt: MapPoint) = {
val distAD = greatCircleDistance(pt)
val xtd = asin(sin(greatCircleDistance(pt) * sin(bearing(pt) - bearing(ptdest))))
acos(cos(distAD) / cos(xtd))
}
def compare(that: MapPoint) = this.key.compare(that.key)
override def toString() = {
val builder = StringBuilder.newBuilder
builder.append("{\\"type\\":\\"Feature\\",\\"_id\\":")
builder.append(key)
builder.append(",\\"geometry\\":{\\"type\\":\\"Point\\",\\"coordinates\\":[")
builder.append(longitude)
builder.append(",")
builder.append(latitude)
builder.append("]},\\"properties\\":")
if (!value.isNaN()) {
builder.append("{\\"value\\":")
builder.append(value)
builder.append("}")
} else builder.append("null")
builder.append("}")
builder.toString()
}
/**
* Returns the angle expressed in radiant between this MapPoint and the MapPoint passed as argument.
* This is the same as the distance on the unit sphere.
*
* @param that the map point
* @return the angle between this point and that point
*/
def angleBetween(that: MapPoint): Double = {
val pt1 = MapPoint.geodesic2cart(this)
val pt2 = MapPoint.geodesic2cart(that)
(pt1._1 * pt2._1 + pt1._2 * pt2._2 + pt1._3 * pt2._3) / (MapPoint.R * MapPoint.R)
}
/**
* Returns the LatLng which lies the given fraction of the way between the
* origin LatLng and the destination LatLng.
* @param from The LatLng from which to start.
* @param to The LatLng toward which to travel.
* @param fraction A fraction of the distance to travel.
* @return The interpolated LatLng.
*/
def interpolate(that: MapPoint, fraction: Double): MapPoint = {
val fromLat = latitude.toRadians
val fromLng = longitude.toRadians
val toLat = that.latitude.toRadians
val toLng = that.longitude.toRadians
val cosFromLat = cos(fromLat);
val cosToLat = cos(toLat);
// Computes Spherical interpolation coefficients.
val angle = angleBetween(that)
val sinAngle = sin(angle)
val a = sin((1 - fraction) * angle) / sinAngle;
val b = sin(fraction * angle) / sinAngle;
// Converts from polar to vector and interpolate.
val x = a * cosFromLat * cos(fromLng) + b * cosToLat * cos(toLng)
val y = a * cosFromLat * sin(fromLng) + b * cosToLat * sin(toLng)
val z = a * sin(fromLat) + b * sin(toLat)
val int = (1 - fraction) * value + fraction * that.value
new MapPoint(atan2(z, sqrt(x * x + y * y)).toDegrees, atan2(y, x).toDegrees, int);
}
}
| alessandroadamo/geostat | src/main/scala/geostat/MapPoint.scala | Scala | lgpl-3.0 | 9,277 |
package scife.enumeration
package combinators
trait Product[T, U] extends Enum[(T, U)] {
val left: Enum[T]
val right: Enum[U]
override def size = left.size * right.size
}
| kaptoxic/SciFe | src/main/scala/scife/enumeration/combinators/Product.scala | Scala | gpl-2.0 | 181 |
package com.giyeok.jparser.parsergen.deprecated.nocond.codegen
import com.giyeok.jparser.parsergen.deprecated.nocond.SimpleParser
// SimpleParser를 생성하면서 만들어진 노드들로 SimpleParser처럼 스택(혹은 리스트)으로 파싱하는 것이 아니라,
// DAG로 관리하면서 파싱하면 실제 그래프 형태와 동일하게 파싱 가능함. 이렇게 파싱하는걸 GeneralParser라고 하자.
// GeneralParser는 SimpleParser의 노드들을 그대로 사용하므로 SimpleParser를 받아서 GeneralParser를 생성함.
class GeneralParserJavaGen(val parser: SimpleParser) {
}
| Joonsoo/moon-parser | fast/src/main/scala/com/giyeok/jparser/parsergen/deprecated/nocond/codegen/GeneralParserJavaGen.scala | Scala | mit | 611 |
package filters
import javax.inject.Inject
import akka.stream.Materializer
import play.api.mvc.{ Result, RequestHeader, Filter }
import scala.concurrent.Future
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import org.log4s._
class AccessLogFilter @Inject() (implicit val mat: Materializer) extends Filter {
private[this] val logger = getLogger
def apply(nextFilter: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] = {
logger.debug(requestHeader.toString())
nextFilter(requestHeader)
}
}
| vetafi/vetafi-web | app/filters/AccessLogFilter.scala | Scala | apache-2.0 | 557 |
import scala.tools.nsc.Settings
import scala.tools.partest.ReplTest
object Test extends ReplTest {
override def transformSettings(s: Settings) = {
s.Yreplclassbased.value = false // macros are object-based only
s
}
override def extraSettings = "-language:experimental.macros"
def code = """
|def bar1(c: scala.reflect.macros.blackbox.Context) = ???
|def foo1: Nothing = macro bar1
|def bar2(c: scala.reflect.macros.whitebox.Context) = ???
|def foo2: Nothing = macro bar2
|""".stripMargin
}
| scala/scala | test/files/run/macro-repl-dontexpand.scala | Scala | apache-2.0 | 530 |
package dto
import java.util.{Date, UUID}
case class ProcessDTO(id: UUID,
name: String,
status: String,
success: Boolean,
tasks: Seq[TaskDTO],
startedAt: Date,
endedAt: Option[Date],
durationStr: String,
graphUrl: Option[String])
| gilt/sundial | app/dto/ProcessDTO.scala | Scala | mit | 408 |
package utils.helpers
import play.api.Logger
import scala.concurrent.duration.DurationInt
import uk.gov.dvla.vehicles.presentation.{common => VPC}
import VPC.ConfigProperties.booleanProp
import VPC.ConfigProperties.getOptionalProperty
import VPC.ConfigProperties.getProperty
import VPC.ConfigProperties.getStringListProperty
import VPC.ConfigProperties.getIntListProperty
import VPC.ConfigProperties.intProp
import VPC.ConfigProperties.longProp
import VPC.ConfigProperties.stringProp
import VPC.services.SEND.EmailConfiguration
import VPC.webserviceclients.emailservice.From
final class ConfigImpl extends Config {
val assetsUrl: Option[String] = getOptionalProperty[String]("assets.url")
// Payment Service
override val purchaseAmountInPence: String = getProperty[String]("retention.purchaseAmountInPence")
override val secureCookies = getOptionalProperty[Boolean]("secureCookies").getOrElse(ConfigImpl.DefaultSecureCookies)
override val encryptCookies = getOptionalProperty[Boolean]("encryptCookies").getOrElse(ConfigImpl.DefaultEncryptCookies)
override val applicationCode: String = getProperty[String]("webHeader.applicationCode")
override val channelCode: String = getProperty[String]("webHeader.channelCode")
override val contactId: Long = getProperty[Long]("webHeader.contactId")
override val orgBusinessUnit: String = getProperty[String]("webHeader.orgBusinessUnit")
override val vssServiceTypeCode: String = getProperty[String]("webHeader.vssServiceTypeCode")
override val dmsServiceTypeCode: String = getProperty[String]("webHeader.dmsServiceTypeCode")
override val vrmRetentionEligibilityMicroServiceUrlBase: String = getProperty[String]("vrmRetentionEligibilityMicroServiceUrlBase")
override val vrmRetentionRetainMicroServiceUrlBase: String = getProperty[String]("vrmRetentionRetainMicroServiceUrlBase")
override val vehicleAndKeeperLookupMicroServiceBaseUrl: String = getProperty[String]("vehicleAndKeeperLookupMicroServiceUrlBase")
override val vrmRetentionEligibilityMsRequestTimeout: Int =
getOptionalProperty[Int]("vrmRetentionEligibility.requesttimeout")
.getOrElse(ConfigImpl.DefaultRequestTimeoutSecs.seconds.toMillis.toInt)
override val vrmRetentionRetainMsRequestTimeout: Int =
getOptionalProperty[Int]("vrmRetentionRetain.requesttimeout")
.getOrElse(ConfigImpl.DefaultRequestTimeoutSecs.seconds.toMillis.toInt)
override val paymentSolveMicroServiceUrlBase: String = getProperty[String]("paymentSolveMicroServiceUrlBase")
override val paymentSolveMsRequestTimeout: Int =
getOptionalProperty[Int]("paymentSolve.ms.requesttimeout")
.getOrElse(ConfigImpl.DefaultSolveRequestTimeoutSecs.seconds.toMillis.toInt)
override val vehicleAndKeeperLookupRequestTimeout: Int =
getOptionalProperty[Int]("vehicleAndKeeperLookup.requesttimeout")
.getOrElse(ConfigImpl.DefaultRequestTimeoutSecs.seconds.toMillis.toInt)
override val isPrototypeBannerVisible: Boolean = getOptionalProperty[Boolean]("prototype.disclaimer")
.getOrElse(ConfigImpl.DefaultPrototypeBannerEnabled)
override val googleAnalyticsTrackingId: Option[String] = getOptionalProperty[String]("googleAnalytics.id.retention")
override val emailWhitelist: Option[List[String]] = getStringListProperty("email.whitelist")
override val emailSenderAddress: String = getOptionalProperty[String]("email.senderAddress")
.getOrElse(ConfigImpl.DefaultSenderEmail)
override val cookieMaxAge = getOptionalProperty[Int]("application.cookieMaxAge")
.getOrElse(ConfigImpl.DefaultCookieMaxAgeMins.minutes.toSeconds.toInt)
override val storeBusinessDetailsMaxAge =
getOptionalProperty[Int]("storeBusinessDetails.cookieMaxAge")
.getOrElse(ConfigImpl.DefaultBusinessDetailsCookieMaxAgeDays.days.toSeconds.toInt)
override val auditMicroServiceUrlBase: String = getProperty[String]("auditMicroServiceUrlBase")
override val auditMsRequestTimeout: Int =
getOptionalProperty[Int]("audit.requesttimeout")
.getOrElse(ConfigImpl.DefaultAuditRequestTimeoutSecs.seconds.toMillis.toInt)
// Email microservice
override val emailServiceMicroServiceUrlBase: String = getProperty[String]("emailServiceMicroServiceUrlBase")
override val emailServiceMsRequestTimeout: Int =
getOptionalProperty[Int]("emailService.ms.requesttimeout")
.getOrElse(ConfigImpl.DefaultRequestTimeoutSecs.seconds.toMillis.toInt)
override val emailConfiguration: EmailConfiguration = EmailConfiguration(
From(getProperty[String]("email.senderAddress"), ConfigImpl.EmailFromName),
From(getProperty[String]("email.feedbackAddress"), ConfigImpl.EmailFeedbackFromName),
getStringListProperty("email.whitelist")
)
override val openingTimeMinOfDay: Int = getProperty[Int]("openingTimeMinOfDay")
override val closingTimeMinOfDay: Int = getProperty[Int]("closingTimeMinOfDay")
override val closingWarnPeriodMins: Int = getOptionalProperty[Int]("closingWarnPeriodMins")
.getOrElse(ConfigImpl.DefaultClosingWarnPeriodMins)
override val closedDays: List[Int] = {
getIntListProperty("closedDays").getOrElse(List())
}
// TODO make property survey.url mandatory
override val surveyUrl: Option[String] = getOptionalProperty[String]("survey.url")
override val liveAgentEnvironmentId: Option[String] = {
val liveAgentId: Option[String] = getOptionalProperty[String]("webchat.liveAgent.environmentId")
liveAgentId.fold(Logger.info("Webchat functionality is not enabled"))
{id => Logger.info("Webchat functionality is enabled")}
liveAgentId
}
override val liveAgentButtonId: String = getProperty[String]("webchat.liveAgent.buttonId")
override val liveAgentOrgId: String = getProperty[String]("webchat.liveAgent.orgId")
override val liveAgentUrl: String = getProperty[String]("webchat.liveAgent.url")
override val liveAgentjsUrl: String = getProperty[String]("webchat.liveAgent.jsUrl")
override val failureCodeBlacklist: Option[List[String]] = getStringListProperty("webchat.failureCodes.blacklist")
}
object ConfigImpl {
final val NotFound = "NOT FOUND"
final val EmailFromName = "DO-NOT-REPLY"
final val EmailFeedbackFromName = "Feedback"
//defaults
final val DefaultSenderEmail = ""
final val DefaultSecureCookies = true
final val DefaultPrototypeBannerEnabled = true
final val DefaultEncryptCookies = true
final val DefaultBusinessDetailsCookieMaxAgeDays = 7
final val DefaultClosingWarnPeriodMins = 15
final val DefaultCookieMaxAgeMins = 30
final val DefaultAuditRequestTimeoutSecs = 30
final val DefaultRequestTimeoutSecs = 30
final val DefaultSolveRequestTimeoutSecs = 5
} | dvla/vrm-retention-online | app/utils/helpers/ConfigImpl.scala | Scala | mit | 6,641 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.transaction
import kafka.common.{InterBrokerSendThread, RequestAndCompletionHandler}
import kafka.metrics.KafkaMetricsGroup
import kafka.server.{DelayedOperationPurgatory, KafkaConfig, MetadataCache}
import kafka.utils.{CoreUtils, Logging}
import org.apache.kafka.clients._
import org.apache.kafka.common.{Node, TopicPartition}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network._
import org.apache.kafka.common.requests.{TransactionResult, WriteTxnMarkersRequest}
import org.apache.kafka.common.security.JaasContext
import org.apache.kafka.common.utils.{LogContext, Time}
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.WriteTxnMarkersRequest.TxnMarkerEntry
import com.yammer.metrics.core.Gauge
import java.util
import java.util.concurrent.{BlockingQueue, ConcurrentHashMap, LinkedBlockingQueue}
import collection.JavaConverters._
import scala.collection.{concurrent, immutable}
object TransactionMarkerChannelManager {
def apply(config: KafkaConfig,
metrics: Metrics,
metadataCache: MetadataCache,
txnStateManager: TransactionStateManager,
txnMarkerPurgatory: DelayedOperationPurgatory[DelayedTxnMarker],
time: Time,
logContext: LogContext): TransactionMarkerChannelManager = {
val channelBuilder = ChannelBuilders.clientChannelBuilder(
config.interBrokerSecurityProtocol,
JaasContext.Type.SERVER,
config,
config.interBrokerListenerName,
config.saslMechanismInterBrokerProtocol,
config.saslInterBrokerHandshakeRequestEnable
)
val selector = new Selector(
NetworkReceive.UNLIMITED,
config.connectionsMaxIdleMs,
metrics,
time,
"txn-marker-channel",
Map.empty[String, String].asJava,
false,
channelBuilder,
logContext
)
val networkClient = new NetworkClient(
selector,
new ManualMetadataUpdater(),
s"broker-${config.brokerId}-txn-marker-sender",
1,
50,
50,
Selectable.USE_DEFAULT_BUFFER_SIZE,
config.socketReceiveBufferBytes,
config.requestTimeoutMs,
time,
false,
new ApiVersions,
logContext
)
new TransactionMarkerChannelManager(config,
metadataCache,
networkClient,
txnStateManager,
txnMarkerPurgatory,
time
)
}
}
class TxnMarkerQueue(@volatile var destination: Node) {
// keep track of the requests per txn topic partition so we can easily clear the queue
// during partition emigration
private val markersPerTxnTopicPartition = new ConcurrentHashMap[Int, BlockingQueue[TxnIdAndMarkerEntry]]().asScala
def removeMarkersForTxnTopicPartition(partition: Int): Option[BlockingQueue[TxnIdAndMarkerEntry]] = {
markersPerTxnTopicPartition.remove(partition)
}
def addMarkers(txnTopicPartition: Int, txnIdAndMarker: TxnIdAndMarkerEntry): Unit = {
val queue = CoreUtils.atomicGetOrUpdate(markersPerTxnTopicPartition, txnTopicPartition,
new LinkedBlockingQueue[TxnIdAndMarkerEntry]())
queue.add(txnIdAndMarker)
}
def forEachTxnTopicPartition[B](f:(Int, BlockingQueue[TxnIdAndMarkerEntry]) => B): Unit =
markersPerTxnTopicPartition.foreach { case (partition, queue) =>
if (!queue.isEmpty) f(partition, queue)
}
def totalNumMarkers: Int = markersPerTxnTopicPartition.values.foldLeft(0) { _ + _.size }
// visible for testing
def totalNumMarkers(txnTopicPartition: Int): Int = markersPerTxnTopicPartition.get(txnTopicPartition).fold(0)(_.size)
}
class TransactionMarkerChannelManager(config: KafkaConfig,
metadataCache: MetadataCache,
networkClient: NetworkClient,
txnStateManager: TransactionStateManager,
txnMarkerPurgatory: DelayedOperationPurgatory[DelayedTxnMarker],
time: Time) extends InterBrokerSendThread("TxnMarkerSenderThread-" + config.brokerId, networkClient, time) with Logging with KafkaMetricsGroup {
this.logIdent = "[Transaction Marker Channel Manager " + config.brokerId + "]: "
private val interBrokerListenerName: ListenerName = config.interBrokerListenerName
private val markersQueuePerBroker: concurrent.Map[Int, TxnMarkerQueue] = new ConcurrentHashMap[Int, TxnMarkerQueue]().asScala
private val markersQueueForUnknownBroker = new TxnMarkerQueue(Node.noNode)
private val txnLogAppendRetryQueue = new LinkedBlockingQueue[TxnLogAppend]()
newGauge(
"UnknownDestinationQueueSize",
new Gauge[Int] {
def value: Int = markersQueueForUnknownBroker.totalNumMarkers
}
)
newGauge(
"LogAppendRetryQueueSize",
new Gauge[Int] {
def value: Int = txnLogAppendRetryQueue.size
}
)
override def generateRequests() = drainQueuedTransactionMarkers()
override def shutdown(): Unit = {
super.shutdown()
txnMarkerPurgatory.shutdown()
markersQueuePerBroker.clear()
}
// visible for testing
private[transaction] def queueForBroker(brokerId: Int) = {
markersQueuePerBroker.get(brokerId)
}
// visible for testing
private[transaction] def queueForUnknownBroker = markersQueueForUnknownBroker
private[transaction] def addMarkersForBroker(broker: Node, txnTopicPartition: Int, txnIdAndMarker: TxnIdAndMarkerEntry) {
val brokerId = broker.id
// we do not synchronize on the update of the broker node with the enqueuing,
// since even if there is a race condition we will just retry
val brokerRequestQueue = CoreUtils.atomicGetOrUpdate(markersQueuePerBroker, brokerId,
new TxnMarkerQueue(broker))
brokerRequestQueue.destination = broker
brokerRequestQueue.addMarkers(txnTopicPartition, txnIdAndMarker)
trace(s"Added marker ${txnIdAndMarker.txnMarkerEntry} for transactional id ${txnIdAndMarker.txnId} to destination broker $brokerId")
}
def retryLogAppends(): Unit = {
val txnLogAppendRetries: java.util.List[TxnLogAppend] = new util.ArrayList[TxnLogAppend]()
txnLogAppendRetryQueue.drainTo(txnLogAppendRetries)
txnLogAppendRetries.asScala.foreach { txnLogAppend =>
debug(s"Retry appending $txnLogAppend transaction log")
tryAppendToLog(txnLogAppend)
}
}
private[transaction] def drainQueuedTransactionMarkers(): Iterable[RequestAndCompletionHandler] = {
retryLogAppends()
val txnIdAndMarkerEntries: java.util.List[TxnIdAndMarkerEntry] = new util.ArrayList[TxnIdAndMarkerEntry]()
markersQueueForUnknownBroker.forEachTxnTopicPartition { case (_, queue) =>
queue.drainTo(txnIdAndMarkerEntries)
}
for (txnIdAndMarker: TxnIdAndMarkerEntry <- txnIdAndMarkerEntries.asScala) {
val transactionalId = txnIdAndMarker.txnId
val producerId = txnIdAndMarker.txnMarkerEntry.producerId
val producerEpoch = txnIdAndMarker.txnMarkerEntry.producerEpoch
val txnResult = txnIdAndMarker.txnMarkerEntry.transactionResult
val coordinatorEpoch = txnIdAndMarker.txnMarkerEntry.coordinatorEpoch
val topicPartitions = txnIdAndMarker.txnMarkerEntry.partitions.asScala.toSet
addTxnMarkersToBrokerQueue(transactionalId, producerId, producerEpoch, txnResult, coordinatorEpoch, topicPartitions)
}
markersQueuePerBroker.values.map { brokerRequestQueue =>
val txnIdAndMarkerEntries = new util.ArrayList[TxnIdAndMarkerEntry]()
brokerRequestQueue.forEachTxnTopicPartition { case (_, queue) =>
queue.drainTo(txnIdAndMarkerEntries)
}
(brokerRequestQueue.destination, txnIdAndMarkerEntries)
}.filter { case (_, entries) => !entries.isEmpty }.map { case (node, entries) =>
val markersToSend = entries.asScala.map(_.txnMarkerEntry).asJava
val requestCompletionHandler = new TransactionMarkerRequestCompletionHandler(node.id, txnStateManager, this, entries)
RequestAndCompletionHandler(node, new WriteTxnMarkersRequest.Builder(markersToSend), requestCompletionHandler)
}
}
def addTxnMarkersToSend(transactionalId: String,
coordinatorEpoch: Int,
txnResult: TransactionResult,
txnMetadata: TransactionMetadata,
newMetadata: TxnTransitMetadata): Unit = {
def appendToLogCallback(error: Errors): Unit = {
error match {
case Errors.NONE =>
trace(s"Completed sending transaction markers for $transactionalId as $txnResult")
txnStateManager.getTransactionState(transactionalId) match {
case Left(Errors.NOT_COORDINATOR) =>
info(s"No longer the coordinator for $transactionalId with coordinator epoch $coordinatorEpoch; cancel appending $newMetadata to transaction log")
case Left(Errors.COORDINATOR_LOAD_IN_PROGRESS) =>
info(s"Loading the transaction partition that contains $transactionalId while my current coordinator epoch is $coordinatorEpoch; " +
s"so cancel appending $newMetadata to transaction log since the loading process will continue the remaining work")
case Left(unexpectedError) =>
throw new IllegalStateException(s"Unhandled error $unexpectedError when fetching current transaction state")
case Right(Some(epochAndMetadata)) =>
if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) {
debug(s"Sending $transactionalId's transaction markers for $txnMetadata with coordinator epoch $coordinatorEpoch succeeded, trying to append complete transaction log now")
tryAppendToLog(TxnLogAppend(transactionalId, coordinatorEpoch, txnMetadata, newMetadata))
} else {
info(s"The cached metadata $txnMetadata has changed to $epochAndMetadata after completed sending the markers with coordinator " +
s"epoch $coordinatorEpoch; abort transiting the metadata to $newMetadata as it may have been updated by another process")
}
case Right(None) =>
val errorMsg = s"The coordinator still owns the transaction partition for $transactionalId, but there is " +
s"no metadata in the cache; this is not expected"
fatal(errorMsg)
throw new IllegalStateException(errorMsg)
}
case other =>
val errorMsg = s"Unexpected error ${other.exceptionName} before appending to txn log for $transactionalId"
fatal(errorMsg)
throw new IllegalStateException(errorMsg)
}
}
val delayedTxnMarker = new DelayedTxnMarker(txnMetadata, appendToLogCallback, txnStateManager.stateReadLock)
txnMarkerPurgatory.tryCompleteElseWatch(delayedTxnMarker, Seq(transactionalId))
addTxnMarkersToBrokerQueue(transactionalId, txnMetadata.producerId, txnMetadata.producerEpoch, txnResult, coordinatorEpoch, txnMetadata.topicPartitions.toSet)
}
private def tryAppendToLog(txnLogAppend: TxnLogAppend) = {
// try to append to the transaction log
def appendCallback(error: Errors): Unit =
error match {
case Errors.NONE =>
trace(s"Completed transaction for ${txnLogAppend.transactionalId} with coordinator epoch ${txnLogAppend.coordinatorEpoch}, final state after commit: ${txnLogAppend.txnMetadata.state}")
case Errors.NOT_COORDINATOR =>
info(s"No longer the coordinator for transactionalId: ${txnLogAppend.transactionalId} while trying to append to transaction log, skip writing to transaction log")
case Errors.COORDINATOR_NOT_AVAILABLE =>
info(s"Not available to append $txnLogAppend: possible causes include ${Errors.UNKNOWN_TOPIC_OR_PARTITION}, ${Errors.NOT_ENOUGH_REPLICAS}, " +
s"${Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND} and ${Errors.REQUEST_TIMED_OUT}; retry appending")
// enqueue for retry
txnLogAppendRetryQueue.add(txnLogAppend)
case Errors.COORDINATOR_LOAD_IN_PROGRESS =>
info(s"Coordinator is loading the partition ${txnStateManager.partitionFor(txnLogAppend.transactionalId)} and hence cannot complete append of $txnLogAppend; " +
s"skip writing to transaction log as the loading process should complete it")
case other: Errors =>
val errorMsg = s"Unexpected error ${other.exceptionName} while appending to transaction log for ${txnLogAppend.transactionalId}"
fatal(errorMsg)
throw new IllegalStateException(errorMsg)
}
txnStateManager.appendTransactionToLog(txnLogAppend.transactionalId, txnLogAppend.coordinatorEpoch, txnLogAppend.newMetadata, appendCallback,
_ == Errors.COORDINATOR_NOT_AVAILABLE)
}
def addTxnMarkersToBrokerQueue(transactionalId: String, producerId: Long, producerEpoch: Short,
result: TransactionResult, coordinatorEpoch: Int,
topicPartitions: immutable.Set[TopicPartition]): Unit = {
val txnTopicPartition = txnStateManager.partitionFor(transactionalId)
val partitionsByDestination: immutable.Map[Option[Node], immutable.Set[TopicPartition]] = topicPartitions.groupBy { topicPartition: TopicPartition =>
metadataCache.getPartitionLeaderEndpoint(topicPartition.topic, topicPartition.partition, interBrokerListenerName)
}
for ((broker: Option[Node], topicPartitions: immutable.Set[TopicPartition]) <- partitionsByDestination) {
broker match {
case Some(brokerNode) =>
val marker = new TxnMarkerEntry(producerId, producerEpoch, coordinatorEpoch, result, topicPartitions.toList.asJava)
val txnIdAndMarker = TxnIdAndMarkerEntry(transactionalId, marker)
if (brokerNode == Node.noNode) {
// if the leader of the partition is known but node not available, put it into an unknown broker queue
// and let the sender thread to look for its broker and migrate them later
markersQueueForUnknownBroker.addMarkers(txnTopicPartition, txnIdAndMarker)
} else {
addMarkersForBroker(brokerNode, txnTopicPartition, txnIdAndMarker)
}
case None =>
txnStateManager.getTransactionState(transactionalId) match {
case Left(error) =>
info(s"Encountered $error trying to fetch transaction metadata for $transactionalId with coordinator epoch $coordinatorEpoch; cancel sending markers to its partition leaders")
txnMarkerPurgatory.cancelForKey(transactionalId)
case Right(Some(epochAndMetadata)) =>
if (epochAndMetadata.coordinatorEpoch != coordinatorEpoch) {
info(s"The cached metadata has changed to $epochAndMetadata (old coordinator epoch is $coordinatorEpoch) since preparing to send markers; cancel sending markers to its partition leaders")
txnMarkerPurgatory.cancelForKey(transactionalId)
} else {
// if the leader of the partition is unknown, skip sending the txn marker since
// the partition is likely to be deleted already
info(s"Couldn't find leader endpoint for partitions $topicPartitions while trying to send transaction markers for " +
s"$transactionalId, these partitions are likely deleted already and hence can be skipped")
val txnMetadata = epochAndMetadata.transactionMetadata
txnMetadata.inLock {
topicPartitions.foreach(txnMetadata.removePartition)
}
txnMarkerPurgatory.checkAndComplete(transactionalId)
}
case Right(None) =>
val errorMsg = s"The coordinator still owns the transaction partition for $transactionalId, but there is " +
s"no metadata in the cache; this is not expected"
fatal(errorMsg)
throw new IllegalStateException(errorMsg)
}
}
}
wakeup()
}
def removeMarkersForTxnTopicPartition(txnTopicPartitionId: Int): Unit = {
markersQueueForUnknownBroker.removeMarkersForTxnTopicPartition(txnTopicPartitionId).foreach { queue =>
for (entry: TxnIdAndMarkerEntry <- queue.asScala)
removeMarkersForTxnId(entry.txnId)
}
markersQueuePerBroker.foreach { case(_, brokerQueue) =>
brokerQueue.removeMarkersForTxnTopicPartition(txnTopicPartitionId).foreach { queue =>
for (entry: TxnIdAndMarkerEntry <- queue.asScala)
removeMarkersForTxnId(entry.txnId)
}
}
}
def removeMarkersForTxnId(transactionalId: String): Unit = {
// we do not need to clear the queue since it should have
// already been drained by the sender thread
txnMarkerPurgatory.cancelForKey(transactionalId)
}
def completeSendMarkersForTxnId(transactionalId: String): Unit = {
txnMarkerPurgatory.checkAndComplete(transactionalId)
}
}
case class TxnIdAndMarkerEntry(txnId: String, txnMarkerEntry: TxnMarkerEntry)
case class TxnLogAppend(transactionalId: String, coordinatorEpoch: Int, txnMetadata: TransactionMetadata, newMetadata: TxnTransitMetadata) {
override def toString: String = {
"TxnLogAppend(" +
s"transactionalId=$transactionalId, " +
s"coordinatorEpoch=$coordinatorEpoch, " +
s"txnMetadata=$txnMetadata, " +
s"newMetadata=$newMetadata)"
}
}
| themarkypantz/kafka | core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala | Scala | apache-2.0 | 18,247 |
/*
* Copyright (c) 2015 Robert Conrad - All Rights Reserved.
* Unauthorized copying of this file, via any medium is strictly prohibited.
* This file is proprietary and confidential.
* Last modified by rconrad, 1/3/15 7:18 PM
*/
package base.socket.message
import base.common.lib.Actors
import base.socket._
import base.socket.json.JsonFormats
import base.socket.message.Command.Cmd
import io.netty.channel.ChannelHandlerContext
import org.json4s.JsonAST.JString
import org.json4s.JsonDSL._
import org.json4s.{ CustomSerializer, JValue, MappingException }
import scala.reflect.runtime.universe._
// scalastyle:off line.size.limit
sealed abstract class Command[T <: Message](implicit val man: Manifest[T]) {
val cmd: String
protected implicit val formats = JsonFormats.defaultWithCommands
final override def toString = cmd
final def toJValue: JValue = cmd
final protected def extract(json: JValue) = json.extract[T]
}
sealed abstract class ProcessableCommand[T <: Message](implicit override val man: Manifest[T]) extends Command[T] {
protected def process(implicit ctx: ChannelHandlerContext, msg: T)
final def process(ctx: ChannelHandlerContext, json: JValue) { process(ctx, extract(json)) }
}
sealed abstract class UnprocessableCommand[T <: Message](implicit override val man: Manifest[T]) extends Command[T]
sealed abstract class ClientCommand[T <: ClientMessage](implicit override val man: Manifest[T])
extends ProcessableCommand[T]
// communications client->server
sealed abstract class ControlCommand[T <: ControlMessage](implicit override val man: Manifest[T])
extends ProcessableCommand[T]
// communications server->server
sealed abstract class ServerCommand[T <: ServerMessage](implicit override val man: Manifest[T])
extends UnprocessableCommand[T]
// communications server->client
abstract class UserClientCommand[T <: UserClientMessage](override val cmd: String)(implicit override val man: Manifest[T])
extends ClientCommand[T]
abstract class UserServerCommand[T <: UserServerMessage](override val cmd: String)(implicit override val man: Manifest[T])
extends ServerCommand[T]
abstract class TestControlCommand[T <: TestControlMessage](override val cmd: String)(implicit override val man: Manifest[T])
extends ControlCommand[T]
abstract class CommandObject {
implicit val dispatcher = Actors.actorSystem.dispatcher
implicit def command2JValue(cmd: Cmd) = cmd.toJValue
implicit def ctx2ch(ctx: ChannelHandlerContext) = ctx.channel
implicit def ctx2UserId(ctx: ChannelHandlerContext) = ctx.channel.userId
val cmds: Set[Cmd]
protected def init[T: TypeTag](self: T, in: Cmd*) = {
val cmds = in.toSet
val members = typeOf[T].members.filter(_.typeSignature match {
case tpe if tpe <:< typeOf[Cmd] => true
case _ => false
})
lazy val name = this.getClass.getSimpleName
lazy val err = s"$name command members do not match initialized commands\n\nmembers: $members\n\ncommands: $cmds"
assert(members.size == cmds.size, err)
cmds
}
}
object Command {
type Cmd = Command[_ <: Message]
type ProcessableCmd = ProcessableCommand[_ <: Message]
private var processable = Set[ProcessableCmd]()
def map[T <: ProcessableCmd](cmds: T*) = {
processable ++= cmds
assertNoDuplicates(processable)
cmds.map(c => c.cmd -> c).toMap
}
private def assertNoDuplicates(cmds: Set[ProcessableCmd]) {
cmds.foreach { cmd =>
assert(cmds.count(_.cmd == cmd.cmd) == 1, s"Commands strings must be unique, duplicate found: $cmd")
}
cmds.foreach { cmd =>
assert(cmds.count(_.man == cmd.man) == 1, s"Commands must have unique messages, duplicate found: $cmd")
}
}
}
class CommandSerializer extends CustomSerializer[Command[_ <: Message]](format => (
{ case JString(cmd) => throw new MappingException(s"Unknown command $cmd") },
{ case cmd: Cmd => JString(cmd.toString) }))
| robconrad/base-api | project-socket/src/main/scala/base/socket/message/Command.scala | Scala | mit | 3,936 |
package kalmanb.akka.push
import akka.actor.ActorLogging
import akka.actor.Actor
class Processor extends Actor with ActorLogging {
def receive = {
case toBeProcessed ⇒ {
log.info(s"Processing $toBeProcessed")
// Do some work
Thread sleep 500
sender ! s"here's a processed result $toBeProcessed"
}
}
}
| kalmanb/akka-examples | src/main/scala/kalmanb/akka/push/Processor.scala | Scala | apache-2.0 | 342 |
package org.jetbrains.plugins.scala.annotator
import com.intellij.codeInspection.util.InspectionMessage
import com.intellij.lang.annotation.{AnnotationBuilder, AnnotationSession, HighlightSeverity}
/**
* This is a clone of public API of [[com.intellij.lang.annotation.AnnotationHolder]]
*
* We need it for now to handle desugarings (see [[annotationHolder.DelegateAnnotationHolder]]),
* and because AnnotationHolder is not supposed to be overridden anymore.
*/
trait ScalaAnnotationHolder extends ScalaAnnotationHolderAPI {
def getCurrentAnnotationSession: AnnotationSession
def isBatchMode: Boolean
/**
* Begin constructing a new annotation.
* To finish construction and show the annotation on screen {@link AnnotationBuilder# create ( )} must be called.
* For example: <p>{@code holder.newAnnotation(HighlightSeverity.WARNING, "My warning message").create();}</p>
*
* @param severity The severity of the annotation.
* @param message The message this annotation will show in the status bar and the tooltip.
* @apiNote The builder created by this method is already initialized by the current element, i.e. the psiElement currently visited by inspection
* visitor. You'll need to call {@link AnnotationBuilder# range ( TextRange )} or similar method explicitly only if target element differs from current element.
* Please note, that the range in {@link AnnotationBuilder# range ( TextRange )} must be inside the range of the current element.
*/
def newAnnotation(severity: HighlightSeverity, @InspectionMessage message: String): ScalaAnnotationBuilder
/**
* Begin constructing a new annotation with no message and no tooltip.
* To finish construction and show the annotation on screen {@link AnnotationBuilder# create ( )} must be called.
* For example: <p>{@code holder.newSilentAnnotation(HighlightSeverity.WARNING).textAttributes(MY_ATTRIBUTES_KEY).create();}</p>
*
* @param severity The severity of the annotation.
* @apiNote The builder created by this method is already initialized by the current element, i.e. the psiElement currently visited by inspection
* visitor. You'll need to call {@link AnnotationBuilder# range ( TextRange )} or similar method explicitly only if target element differs from current element.
* Please note, that the range in {@link AnnotationBuilder# range ( TextRange )} must be inside the range of the current element.
*/
def newSilentAnnotation(severity: HighlightSeverity): ScalaAnnotationBuilder
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/ScalaAnnotationHolder.scala | Scala | apache-2.0 | 2,552 |
package chess
import variant.Horde
class HordeVariantTest extends ChessTest {
"Horde chess" should {
"Must not be insufficient winning material for horde with only 1 pawn left" in {
val position = "k7/ppP5/brp5/8/8/8/8/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beFalse
}
}
"Must recognise insufficient winning material for horde with only 1 pawn left" in {
val position = "8/2k5/3q4/8/8/8/1P6/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
"Must not be insufficient winning material for king with only 1 pawn left" in {
val position = "8/2k5/3q4/8/8/8/1P6/8 w - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beFalse
}
}
"Must recognise insufficient winning material for horde with only 1 bishop left" in {
val position = "r7/2Bb4/q3k3/8/8/3q4/8/5qqr b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.autoDraw must beFalse
game.situation.end must beFalse
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
"Must recognise insufficient winning material for horde with only 1 queen left" in {
val position = "8/2k5/3q4/8/8/1Q6/8/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
"Must not be insufficient winning material for king with only 1 queen left" in {
val position = "8/2k5/3q4/8/8/1Q6/8/8 w - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beFalse
}
}
"Must recognise insufficient winning material for horde with only 2 minor pieces left" in {
val position = "8/2k5/3q4/8/8/1B2N3/8/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
"Must not be insufficient winning material for king with only 2 minor pieces left" in {
val position = "8/2k5/3q4/8/8/1B2N3/8/8 w - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beFalse
}
}
"Must not be insufficient winning material for horde with 3 minor pieces left" in {
val position = "8/2k5/3q4/8/8/3B4/4NB2/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.opponentHasInsufficientMaterial must beFalse
}
}
"Must auto-draw in simple pawn fortress" in {
val position = "8/p7/pk6/P7/P7/8/8/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.autoDraw must beTrue
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
"Must auto-draw if horde is stalemated and only king can move" in {
val position = "QNBRRBNQ/PPpPPpPP/P1P2PkP/8/8/8/8/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.autoDraw must beTrue
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
"Must not auto-draw in B vs K endgame, king can win" in {
val position = "7B/6k1/8/8/8/8/8/8 b - -"
val game = fenToGame(position, Horde)
game must beValid.like {
case game =>
game.situation.autoDraw must beFalse
game.situation.opponentHasInsufficientMaterial must beTrue
}
}
}
}
| niklasf/scalachess | src/test/scala/HordeVariantTest.scala | Scala | mit | 4,182 |
package org.dbpedia.lookup.inputformat
import java.io.InputStream
import org.dbpedia.extraction.util.WikiUtil
import org.dbpedia.lookup.lucene.LuceneConfig
import scala.io.Source
class WikiStatsExtractor(dataSet: InputStream, pSfGivenUriThreshold: Double) extends InputFormat {
private val it = Source.fromInputStream(dataSet, "utf-8").getLines()
override def foreach[U](f: ((String,String,String)) => U) {
while(it.hasNext) {
val elements = it.next().split("\t")
if (elements.size >= 3) {
val uri = WikiUtil.wikiEncode(elements(1))
val sf = elements(0)
val uriCount = elements(2)
f((uri, LuceneConfig.Fields.SURFACE_FORM_KEYWORD, sf))
f((uri, LuceneConfig.Fields.REFCOUNT, uriCount))
}
}
}
}
| dbpedia/lookup | src/main/scala/org/dbpedia/lookup/inputformat/WikiStatsExtractor.scala | Scala | apache-2.0 | 777 |
package com.classcat.ccnsm2
import org.apache.spark.SparkContext
// import org.apache.spark._
import org.apache.spark.rdd.RDD // MapPartitionsRDD
import java.io.{PrintWriter, StringWriter}
// import com.classcat.ccnsm2.CCConfig
class DataCurrConn (proto : String) extends DataBasic { // sc : SparkContext) {
private val log_file : String = "%s/current/conn.log".format(bro_logs)
// val log_file : String = "hdfs://localhost:9000/bro/logs/current/conn.log"
// val log_file : String = "file://%s/current/conn.log".format(bro_logs)
def getLogFileName : String = {
return log_file
}
private var rdd_incoming : RDD[Array[String]] = _
private var rdd_outgoing : RDD[Array[String]] = _
private var rdd_others : RDD[Array[String]] = _
private var rdd_incoming_group_by_orig_h : RDD[(String, Int)] = _
private var rdd_outgoing_group_by_resp_h : RDD[(String, Int)] = _
private var rdd_incoming_group_by_resp_p : RDD[(String, Int)] = _
private var rdd_outgoing_group_by_resp_p : RDD[(String, Int)] = _
try {
val rdd_raw : RDD[String] = sc.textFile(log_file).cache()
println ("cc-info >> log file %s loaded".format(log_file))
val rdd_all : RDD[Array[String]] = rdd_raw.filter(! _.startsWith("#")).map(_.split("\\t"))
println("cc-info >> tsv processed to get tokens.")
// protocol specific rdd, sorted by ts
/*
# fields ts(0) uid(1) id.orig_h(2) id.orig_p(3) id.resp_h(4) id.resp_p(5) proto(6) service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents
# types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string]
*/
var rdd : RDD[Array[String]] = null
proto match {
case "tcp" => { rdd = rdd_all.filter(_(6) == "tcp").sortBy( { x => x(0) }, false) }
case "udp" => { rdd = rdd_all.filter(_(6) == "udp").sortBy( { x => x(0) }, false) }
}
println("cc-info >> filter applied for protocol specific")
rdd_incoming = rdd.filter( { x => x(4) == MyConfig.myip } )
rdd_outgoing = rdd.filter( { x => x(2) == MyConfig.myip } )
rdd_others = rdd.filter( { x => (x(2) != MyConfig.myip) && (x(4) != MyConfig.myip) } )
rdd_incoming_group_by_orig_h = rdd_incoming.groupBy({ x => x(2)}).map( x => {(x._1, x._2.toArray.length)}).sortBy( { x => x._2 }, false)
rdd_outgoing_group_by_resp_h = rdd_outgoing.groupBy({ x => x(4)}).map( x => {(x._1, x._2.toArray.length)}).sortBy( { x => x._2 }, false)
rdd_incoming_group_by_resp_p = rdd_incoming.groupBy({ x => x(5)}).map( x => {(x._1, x._2.toArray.length)}).sortBy( { x => x._2 }, false)
rdd_outgoing_group_by_resp_p = rdd_outgoing.groupBy({ x => x(5)}).map( x => {(x._1, x._2.toArray.length)}).sortBy( { x => x._2 }, false)
} catch {
case ex : Exception => {
is_error = true
msg_error = ex.toString
println ("Unexpected Error >> %s".format(ex.toString))
val sw = new StringWriter
ex.printStackTrace(new PrintWriter(sw))
println(sw.toString)
}
}
def getRddIncoming () : RDD[Array[String]] = {
return rdd_incoming
}
def getRddOutgoing () : RDD[Array[String]] = {
return rdd_outgoing
}
def getRddOthers () : RDD[Array[String]] = {
return rdd_others
}
def getRddIncomingGroupByOrigH () : RDD[(String, Int)] = {
return rdd_incoming_group_by_orig_h
}
def getRddOutgoingGroupByRespH () : RDD[(String, Int)] = {
return rdd_outgoing_group_by_resp_h
}
def getRddIncomingGroupByRespP () : RDD[(String, Int)] = {
return rdd_incoming_group_by_resp_p
}
def getRddOutgoingGroupByRespP () : RDD[(String, Int)] = {
return rdd_outgoing_group_by_resp_p
}
}
| classcat/cc-nsm2-ui | src/main/scala/com/classcat/ccnsm2/DataCurrConn.scala | Scala | gpl-3.0 | 4,145 |
package iot.pood.management.base
import com.typesafe.config.ConfigFactory
import iot.pood.management.security.SecurityConfig
/**
* Created by rafik on 12.10.2017.
*/
trait TestSecurityConfig {
private def config = {
ConfigFactory.parseString(
"""
|security {
| expiration = 2 minutes
| secret_key = "thisjusasodifsodifj"
| header = "HS256"
|}
""".stripMargin)
}
def securityConfig = {
SecurityConfig.securityConfig(config)
}
}
| rafajpet/iot-pood | iot-pood-management/src/test/scala/iot/pood/management/base/TestSecurityConfig.scala | Scala | mit | 505 |
package akka.io
import com.typesafe.config.Config
import java.util.concurrent._
import akka.dispatch.{MonitorableThreadFactory, ThreadPoolConfig}
private [io] class ThreadPoolConfigurator(config: Config) {
val pool: ExecutorService = config.getString("type") match {
case "fork-join-executor" => createForkJoinExecutor(config.getConfig("fork-join-executor"))
case "thread-pool-executor" => createThreadPoolExecutor(config.getConfig("thread-pool-executor"))
}
private def createForkJoinExecutor(config: Config) =
new ForkJoinPool(
ThreadPoolConfig.scaledPoolSize(
config.getInt("parallelism-min"),
config.getDouble("parallelism-factor"),
config.getInt("parallelism-max")),
ForkJoinPool.defaultForkJoinWorkerThreadFactory,
MonitorableThreadFactory.doNothing, true)
private def createThreadPoolExecutor(config: Config) = {
def createQueue(tpe: String, size: Int): BlockingQueue[Runnable] = tpe match {
case "array" => new ArrayBlockingQueue[Runnable](size, false)
case "" | "linked" => new LinkedBlockingQueue[Runnable](size)
case x => throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x)
}
val corePoolSize = ThreadPoolConfig.scaledPoolSize(config.getInt("core-pool-size-min"), config.getDouble("core-pool-size-factor"), config.getInt("core-pool-size-max"))
val maxPoolSize = ThreadPoolConfig.scaledPoolSize(config.getInt("max-pool-size-min"), config.getDouble("max-pool-size-factor"), config.getInt("max-pool-size-max"))
new ThreadPoolExecutor(
corePoolSize,
maxPoolSize,
config.getDuration("keep-alive-time", TimeUnit.MILLISECONDS),
TimeUnit.MILLISECONDS,
createQueue(config.getString("task-queue-type"), config.getInt("task-queue-size"))
)
}
}
| drexin/akka-io-file | src/main/scala/akka/io/ThreadPoolConfigurator.scala | Scala | apache-2.0 | 1,855 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.io
import java.io._
import scala.language.implicitConversions
import org.junit.Test
import org.junit.Assert._
class ByteArrayOutputStreamTest extends CommonStreamsTests {
@Test def should_support_simple_write_int(): Unit = {
val out = new ByteArrayOutputStream()
for (i <- 0 to 9)
out.write(i)
assertArrayEquals(Array[Byte](0, 1, 2, 3, 4, 5, 6, 7, 8, 9), out.toByteArray)
}
@Test def should_support_simple_write_byte_array(): Unit = {
val out = new ByteArrayOutputStream()
val arr = Array[Byte](0, 1, 2, 3, 4, 5)
out.write(arr, 1, 4)
out.write(arr)
assertArrayEquals(Array[Byte](1, 2, 3, 4, 0, 1, 2, 3, 4, 5), out.toByteArray)
}
@Test def should_support_write_byte_array_with_buffer_resize(): Unit = {
val out = new ByteArrayOutputStream(16)
val arr = Array[Byte](0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
out.write(arr)
out.write(arr)
assertArrayEquals(arr ++ arr, out.toByteArray)
}
@Test def should_support_toString_with_UTF8(): Unit = {
val buf = Array[Byte](72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100,
46, -29, -127, -109, -29, -126, -109, -29, -127, -85, -29, -127, -95,
-29, -127, -81, -26, -105, -91, -26, -100, -84, -24, -86, -98, -29,
-126, -110, -24, -86, -83, -29, -126, -127, -29, -127, -66, -29, -127,
-103, -29, -127, -117, -29, -128, -126)
val out = new ByteArrayOutputStream()
out.write(buf)
assertEquals("Hello World.こんにちは日本語を読めますか。", out.toString)
}
@Test def should_support_reset(): Unit = {
val out = new ByteArrayOutputStream()
for (i <- 0 to 9) out.write(i)
out.reset()
for (i <- 0 to 9) out.write(i)
assertArrayEquals(Array[Byte](0, 1, 2, 3, 4, 5, 6, 7, 8, 9), out.toByteArray)
}
}
| ummels/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/io/ByteArrayOutputStreamTest.scala | Scala | bsd-3-clause | 2,351 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import scala.collection.JavaConverters._
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, NamespaceAlreadyExistsException, NoSuchDatabaseException, NoSuchNamespaceException, NoSuchTableException, TableAlreadyExistsException}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME
import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf}
import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION
import org.apache.spark.sql.sources.SimpleScanSource
import org.apache.spark.sql.types.{BooleanType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
class DataSourceV2SQLSuite
extends InsertIntoTests(supportsDynamicOverwrite = true, includeSQLOnlyTests = true)
with AlterTableTests {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
private val v2Source = classOf[FakeV2Provider].getName
override protected val v2Format = v2Source
override protected val catalogAndNamespace = "testcat.ns1.ns2."
private def catalog(name: String): CatalogPlugin = {
spark.sessionState.catalogManager.catalog(name)
}
protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode): Unit = {
val tmpView = "tmp_view"
withTempView(tmpView) {
insert.createOrReplaceTempView(tmpView)
val overwrite = if (mode == SaveMode.Overwrite) "OVERWRITE" else "INTO"
sql(s"INSERT $overwrite TABLE $tableName SELECT * FROM $tmpView")
}
}
override def verifyTable(tableName: String, expected: DataFrame): Unit = {
checkAnswer(spark.table(tableName), expected)
}
override def getTableMetadata(tableName: String): Table = {
val nameParts = spark.sessionState.sqlParser.parseMultipartIdentifier(tableName)
val v2Catalog = catalog(nameParts.head).asTableCatalog
val namespace = nameParts.drop(1).init.toArray
v2Catalog.loadTable(Identifier.of(namespace, nameParts.last))
}
before {
spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName)
spark.conf.set(
"spark.sql.catalog.testcat_atomic", classOf[StagingInMemoryTableCatalog].getName)
spark.conf.set("spark.sql.catalog.testcat2", classOf[InMemoryTableCatalog].getName)
spark.conf.set(
V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[InMemoryTableSessionCatalog].getName)
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
val df2 = spark.createDataFrame(Seq((4L, "d"), (5L, "e"), (6L, "f"))).toDF("id", "data")
df2.createOrReplaceTempView("source2")
}
after {
spark.sessionState.catalog.reset()
spark.sessionState.catalogManager.reset()
spark.sessionState.conf.clear()
}
test("CreateTable: use v2 plan because catalog is set") {
spark.sql("CREATE TABLE testcat.table_name (id bigint NOT NULL, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType()
.add("id", LongType, nullable = false)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("DescribeTable using v2 catalog") {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string)" +
" USING foo" +
" PARTITIONED BY (id)")
val descriptionDf = spark.sql("DESCRIBE TABLE testcat.table_name")
assert(descriptionDf.schema.map(field => (field.name, field.dataType)) ===
Seq(
("col_name", StringType),
("data_type", StringType),
("comment", StringType)))
val description = descriptionDf.collect()
assert(description === Seq(
Row("id", "bigint", ""),
Row("data", "string", ""),
Row("", "", ""),
Row("# Partitioning", "", ""),
Row("Part 0", "id", "")))
val e = intercept[AnalysisException] {
sql("DESCRIBE TABLE testcat.table_name PARTITION (id = 1)")
}
assert(e.message.contains("DESCRIBE does not support partition for v2 tables"))
}
test("DescribeTable with v2 catalog when table does not exist.") {
intercept[AnalysisException] {
spark.sql("DESCRIBE TABLE testcat.table_name")
}
}
test("DescribeTable extended using v2 catalog") {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string)" +
" USING foo" +
" PARTITIONED BY (id)" +
" TBLPROPERTIES ('bar'='baz')" +
" COMMENT 'this is a test table'" +
" LOCATION '/tmp/testcat/table_name'")
val descriptionDf = spark.sql("DESCRIBE TABLE EXTENDED testcat.table_name")
assert(descriptionDf.schema.map(field => (field.name, field.dataType))
=== Seq(
("col_name", StringType),
("data_type", StringType),
("comment", StringType)))
assert(descriptionDf.collect()
.map(_.toSeq)
.map(_.toArray.map(_.toString.trim)) === Array(
Array("id", "bigint", ""),
Array("data", "string", ""),
Array("", "", ""),
Array("# Partitioning", "", ""),
Array("Part 0", "id", ""),
Array("", "", ""),
Array("# Detailed Table Information", "", ""),
Array("Name", "testcat.table_name", ""),
Array("Comment", "this is a test table", ""),
Array("Location", "/tmp/testcat/table_name", ""),
Array("Provider", "foo", ""),
Array("Table Properties", "[bar=baz]", "")))
}
test("CreateTable: use v2 plan and session catalog when provider is v2") {
spark.sql(s"CREATE TABLE table_name (id bigint, data string) USING $v2Source")
val testCatalog = catalog(SESSION_CATALOG_NAME).asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "default.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> v2Source).asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("CreateTable: fail if table exists") {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
// run a second create query that should fail
val exc = intercept[TableAlreadyExistsException] {
spark.sql("CREATE TABLE testcat.table_name (id bigint, data string, id2 bigint) USING bar")
}
assert(exc.getMessage.contains("table_name"))
// table should not have changed
val table2 = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table2.name == "testcat.table_name")
assert(table2.partitioning.isEmpty)
assert(table2.properties == Map("provider" -> "foo").asJava)
assert(table2.schema == new StructType().add("id", LongType).add("data", StringType))
// check that the table is still empty
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("CreateTable: if not exists") {
spark.sql(
"CREATE TABLE IF NOT EXISTS testcat.table_name (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
spark.sql("CREATE TABLE IF NOT EXISTS testcat.table_name (id bigint, data string) USING bar")
// table should not have changed
val table2 = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table2.name == "testcat.table_name")
assert(table2.partitioning.isEmpty)
assert(table2.properties == Map("provider" -> "foo").asJava)
assert(table2.schema == new StructType().add("id", LongType).add("data", StringType))
// check that the table is still empty
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), Seq.empty)
}
test("CreateTable: use default catalog for v2 sources when default catalog is set") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
spark.sql(s"CREATE TABLE table_name (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType().add("id", LongType).add("data", StringType))
// check that the table is empty
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty)
}
test("CreateTableAsSelect: use v2 plan because catalog is set") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT id, data FROM source")
val table = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == identifier)
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
}
test("ReplaceTableAsSelect: basic v2 implementation.") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT id, data FROM source")
val originalTable = catalog.loadTable(Identifier.of(Array(), "table_name"))
spark.sql(s"REPLACE TABLE $identifier USING foo AS SELECT id FROM source")
val replacedTable = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(replacedTable != originalTable, "Table should have been replaced.")
assert(replacedTable.name == identifier)
assert(replacedTable.partitioning.isEmpty)
assert(replacedTable.properties == Map("provider" -> "foo").asJava)
assert(replacedTable.schema == new StructType().add("id", LongType))
val rdd = spark.sparkContext.parallelize(replacedTable.asInstanceOf[InMemoryTable].rows)
checkAnswer(
spark.internalCreateDataFrame(rdd, replacedTable.schema),
spark.table("source").select("id"))
}
}
test("ReplaceTableAsSelect: Non-atomic catalog drops the table if the write fails.") {
spark.sql("CREATE TABLE testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.asInstanceOf[InMemoryTable].rows.nonEmpty)
intercept[Exception] {
spark.sql("REPLACE TABLE testcat.table_name" +
s" USING foo OPTIONS (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}`=true)" +
s" AS SELECT id FROM source")
}
assert(!testCatalog.tableExists(Identifier.of(Array(), "table_name")),
"Table should have been dropped as a result of the replace.")
}
test("ReplaceTableAsSelect: Non-atomic catalog drops the table permanently if the" +
" subsequent table creation fails.") {
spark.sql("CREATE TABLE testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.asInstanceOf[InMemoryTable].rows.nonEmpty)
intercept[Exception] {
spark.sql("REPLACE TABLE testcat.table_name" +
s" USING foo" +
s" TBLPROPERTIES (`${InMemoryTableCatalog.SIMULATE_FAILED_CREATE_PROPERTY}`=true)" +
s" AS SELECT id FROM source")
}
assert(!testCatalog.tableExists(Identifier.of(Array(), "table_name")),
"Table should have been dropped and failed to be created.")
}
test("ReplaceTableAsSelect: Atomic catalog does not drop the table when replace fails.") {
spark.sql("CREATE TABLE testcat_atomic.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat_atomic").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
intercept[Exception] {
spark.sql("REPLACE TABLE testcat_atomic.table_name" +
s" USING foo OPTIONS (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}=true)" +
s" AS SELECT id FROM source")
}
var maybeReplacedTable = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(maybeReplacedTable === table, "Table should not have changed.")
intercept[Exception] {
spark.sql("REPLACE TABLE testcat_atomic.table_name" +
s" USING foo" +
s" TBLPROPERTIES (`${InMemoryTableCatalog.SIMULATE_FAILED_CREATE_PROPERTY}`=true)" +
s" AS SELECT id FROM source")
}
maybeReplacedTable = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(maybeReplacedTable === table, "Table should not have changed.")
}
test("ReplaceTable: Erases the table contents and changes the metadata.") {
spark.sql(s"CREATE TABLE testcat.table_name USING $v2Source AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.asInstanceOf[InMemoryTable].rows.nonEmpty)
spark.sql("REPLACE TABLE testcat.table_name (id bigint NOT NULL) USING foo")
val replaced = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(replaced.asInstanceOf[InMemoryTable].rows.isEmpty,
"Replaced table should have no rows after committing.")
assert(replaced.schema().fields.length === 1,
"Replaced table should have new schema.")
assert(replaced.schema().fields(0) === StructField("id", LongType, nullable = false),
"Replaced table should have new schema.")
}
test("ReplaceTableAsSelect: CREATE OR REPLACE new table has same behavior as CTAS.") {
Seq("testcat", "testcat_atomic").foreach { catalogName =>
spark.sql(
s"""
|CREATE TABLE $catalogName.created USING $v2Source
|AS SELECT id, data FROM source
""".stripMargin)
spark.sql(
s"""
|CREATE OR REPLACE TABLE $catalogName.replaced USING $v2Source
|AS SELECT id, data FROM source
""".stripMargin)
val testCatalog = catalog(catalogName).asTableCatalog
val createdTable = testCatalog.loadTable(Identifier.of(Array(), "created"))
val replacedTable = testCatalog.loadTable(Identifier.of(Array(), "replaced"))
assert(createdTable.asInstanceOf[InMemoryTable].rows ===
replacedTable.asInstanceOf[InMemoryTable].rows)
assert(createdTable.schema === replacedTable.schema)
}
}
test("ReplaceTableAsSelect: REPLACE TABLE throws exception if table does not exist.") {
Seq("testcat", "testcat_atomic").foreach { catalog =>
spark.sql(s"CREATE TABLE $catalog.created USING $v2Source AS SELECT id, data FROM source")
intercept[CannotReplaceMissingTableException] {
spark.sql(s"REPLACE TABLE $catalog.replaced USING $v2Source AS SELECT id, data FROM source")
}
}
}
test("ReplaceTableAsSelect: REPLACE TABLE throws exception if table is dropped before commit.") {
import InMemoryTableCatalog._
spark.sql(s"CREATE TABLE testcat_atomic.created USING $v2Source AS SELECT id, data FROM source")
intercept[CannotReplaceMissingTableException] {
spark.sql(s"REPLACE TABLE testcat_atomic.replaced" +
s" USING $v2Source" +
s" TBLPROPERTIES (`$SIMULATE_DROP_BEFORE_REPLACE_PROPERTY`=true)" +
s" AS SELECT id, data FROM source")
}
}
test("CreateTableAsSelect: use v2 plan and session catalog when provider is v2") {
spark.sql(s"CREATE TABLE table_name USING $v2Source AS SELECT id, data FROM source")
val testCatalog = catalog(SESSION_CATALOG_NAME).asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "default.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> v2Source).asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: fail if table exists") {
spark.sql("CREATE TABLE testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
// run a second CTAS query that should fail
val exc = intercept[TableAlreadyExistsException] {
spark.sql(
"CREATE TABLE testcat.table_name USING bar AS SELECT id, data, id as id2 FROM source2")
}
assert(exc.getMessage.contains("table_name"))
// table should not have changed
val table2 = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table2.name == "testcat.table_name")
assert(table2.partitioning.isEmpty)
assert(table2.properties == Map("provider" -> "foo").asJava)
assert(table2.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: if not exists") {
spark.sql(
"CREATE TABLE IF NOT EXISTS testcat.table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
spark.sql(
"CREATE TABLE IF NOT EXISTS testcat.table_name USING foo AS SELECT id, data FROM source2")
// check that the table contains data from just the first CTAS
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: use default catalog for v2 sources when default catalog is set") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
// setting the default catalog breaks the reference to source because the default catalog is
// used and AsTableIdentifier no longer matches
spark.sql(s"CREATE TABLE table_name USING foo AS SELECT id, data FROM source")
val testCatalog = catalog("testcat").asTableCatalog
val table = testCatalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == "testcat.table_name")
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType()
.add("id", LongType)
.add("data", StringType))
val rdd = sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), spark.table("source"))
}
test("CreateTableAsSelect: v2 session catalog can load v1 source table") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView("source")
sql(s"CREATE TABLE table_name USING parquet AS SELECT id, data FROM source")
checkAnswer(sql(s"TABLE default.table_name"), spark.table("source"))
// The fact that the following line doesn't throw an exception means, the session catalog
// can load the table.
val t = catalog(SESSION_CATALOG_NAME).asTableCatalog
.loadTable(Identifier.of(Array.empty, "table_name"))
assert(t.isInstanceOf[V1Table], "V1 table wasn't returned as an unresolved table")
}
test("CreateTableAsSelect: nullable schema") {
val basicCatalog = catalog("testcat").asTableCatalog
val atomicCatalog = catalog("testcat_atomic").asTableCatalog
val basicIdentifier = "testcat.table_name"
val atomicIdentifier = "testcat_atomic.table_name"
Seq((basicCatalog, basicIdentifier), (atomicCatalog, atomicIdentifier)).foreach {
case (catalog, identifier) =>
spark.sql(s"CREATE TABLE $identifier USING foo AS SELECT 1 i")
val table = catalog.loadTable(Identifier.of(Array(), "table_name"))
assert(table.name == identifier)
assert(table.partitioning.isEmpty)
assert(table.properties == Map("provider" -> "foo").asJava)
assert(table.schema == new StructType().add("i", "int"))
val rdd = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Row(1))
sql(s"INSERT INTO $identifier SELECT CAST(null AS INT)")
val rdd2 = spark.sparkContext.parallelize(table.asInstanceOf[InMemoryTable].rows)
checkAnswer(spark.internalCreateDataFrame(rdd2, table.schema), Seq(Row(1), Row(null)))
}
}
test("DropTable: basic") {
val tableName = "testcat.ns1.ns2.tbl"
val ident = Identifier.of(Array("ns1", "ns2"), "tbl")
sql(s"CREATE TABLE $tableName USING foo AS SELECT id, data FROM source")
assert(catalog("testcat").asTableCatalog.tableExists(ident) === true)
sql(s"DROP TABLE $tableName")
assert(catalog("testcat").asTableCatalog.tableExists(ident) === false)
}
test("DropTable: table qualified with the session catalog name") {
val ident = Identifier.of(Array(), "tbl")
sql("CREATE TABLE tbl USING json AS SELECT 1 AS i")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true)
sql("DROP TABLE spark_catalog.tbl")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === false)
}
test("DropTable: if exists") {
intercept[NoSuchTableException] {
sql(s"DROP TABLE testcat.db.notbl")
}
sql(s"DROP TABLE IF EXISTS testcat.db.notbl")
}
test("Relation: basic") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
checkAnswer(sql(s"TABLE $t1"), spark.table("source"))
checkAnswer(sql(s"SELECT * FROM $t1"), spark.table("source"))
}
}
test("Relation: SparkSession.table()") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
checkAnswer(spark.table(s"$t1"), spark.table("source"))
}
}
test("Relation: CTE") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
checkAnswer(
sql(s"""
|WITH cte AS (SELECT * FROM $t1)
|SELECT * FROM cte
""".stripMargin),
spark.table("source"))
}
}
test("Relation: view text") {
val t1 = "testcat.ns1.ns2.tbl"
withTable(t1) {
withView("view1") { v1: String =>
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
sql(s"CREATE VIEW $v1 AS SELECT * from $t1")
checkAnswer(sql(s"TABLE $v1"), spark.table("source"))
}
}
}
test("Relation: join tables in 2 catalogs") {
val t1 = "testcat.ns1.ns2.tbl"
val t2 = "testcat2.v2tbl"
withTable(t1, t2) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source")
sql(s"CREATE TABLE $t2 USING foo AS SELECT id, data FROM source2")
val df1 = spark.table("source")
val df2 = spark.table("source2")
val df_joined = df1.join(df2).where(df1("id") + 1 === df2("id"))
checkAnswer(
sql(s"""
|SELECT *
|FROM $t1 t1, $t2 t2
|WHERE t1.id + 1 = t2.id
""".stripMargin),
df_joined)
}
}
test("InsertInto: append - across catalog") {
val t1 = "testcat.ns1.ns2.tbl"
val t2 = "testcat2.db.tbl"
withTable(t1, t2) {
sql(s"CREATE TABLE $t1 USING foo AS SELECT * FROM source")
sql(s"CREATE TABLE $t2 (id bigint, data string) USING foo")
sql(s"INSERT INTO $t2 SELECT * FROM $t1")
checkAnswer(spark.table(t2), spark.table("source"))
}
}
test("ShowTables: using v2 catalog") {
spark.sql("CREATE TABLE testcat.db.table_name (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.n1.n2.db.table_name (id bigint, data string) USING foo")
runShowTablesSql("SHOW TABLES FROM testcat.db", Seq(Row("db", "table_name")))
runShowTablesSql(
"SHOW TABLES FROM testcat.n1.n2.db",
Seq(Row("n1.n2.db", "table_name")))
}
test("ShowTables: using v2 catalog with a pattern") {
spark.sql("CREATE TABLE testcat.db.table (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.db.table_name_1 (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.db.table_name_2 (id bigint, data string) USING foo")
spark.sql("CREATE TABLE testcat.db2.table_name_2 (id bigint, data string) USING foo")
runShowTablesSql(
"SHOW TABLES FROM testcat.db",
Seq(
Row("db", "table"),
Row("db", "table_name_1"),
Row("db", "table_name_2")))
runShowTablesSql(
"SHOW TABLES FROM testcat.db LIKE '*name*'",
Seq(Row("db", "table_name_1"), Row("db", "table_name_2")))
runShowTablesSql(
"SHOW TABLES FROM testcat.db LIKE '*2'",
Seq(Row("db", "table_name_2")))
}
test("ShowTables: using v2 catalog, namespace doesn't exist") {
runShowTablesSql("SHOW TABLES FROM testcat.unknown", Seq())
}
test("ShowTables: using v1 catalog") {
runShowTablesSql(
"SHOW TABLES FROM default",
Seq(Row("", "source", true), Row("", "source2", true)),
expectV2Catalog = false)
}
test("ShowTables: using v1 catalog, db doesn't exist ") {
// 'db' below resolves to a database name for v1 catalog because there is no catalog named
// 'db' and there is no default catalog set.
val exception = intercept[NoSuchDatabaseException] {
runShowTablesSql("SHOW TABLES FROM db", Seq(), expectV2Catalog = false)
}
assert(exception.getMessage.contains("Database 'db' not found"))
}
test("ShowTables: using v1 catalog, db name with multipartIdentifier ('a.b') is not allowed.") {
val exception = intercept[AnalysisException] {
runShowTablesSql("SHOW TABLES FROM a.b", Seq(), expectV2Catalog = false)
}
assert(exception.getMessage.contains("The database name is not valid: a.b"))
}
test("ShowTables: using v2 catalog with empty namespace") {
spark.sql("CREATE TABLE testcat.table (id bigint, data string) USING foo")
runShowTablesSql("SHOW TABLES FROM testcat", Seq(Row("", "table")))
}
test("ShowTables: namespace is not specified and default v2 catalog is set") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
spark.sql("CREATE TABLE testcat.table (id bigint, data string) USING foo")
// v2 catalog is used where default namespace is empty for TestInMemoryTableCatalog.
runShowTablesSql("SHOW TABLES", Seq(Row("", "table")))
}
test("ShowTables: namespace not specified and default v2 catalog not set - fallback to v1") {
runShowTablesSql(
"SHOW TABLES",
Seq(Row("", "source", true), Row("", "source2", true)),
expectV2Catalog = false)
runShowTablesSql(
"SHOW TABLES LIKE '*2'",
Seq(Row("", "source2", true)),
expectV2Catalog = false)
}
test("ShowTables: change current catalog and namespace with USE statements") {
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
// Initially, the v2 session catalog (current catalog) is used.
runShowTablesSql(
"SHOW TABLES", Seq(Row("", "source", true), Row("", "source2", true)),
expectV2Catalog = false)
// Update the current catalog, and no table is matched since the current namespace is Array().
sql("USE testcat")
runShowTablesSql("SHOW TABLES", Seq())
// Update the current namespace to match ns1.ns2.table.
sql("USE testcat.ns1.ns2")
runShowTablesSql("SHOW TABLES", Seq(Row("ns1.ns2", "table")))
}
private def runShowTablesSql(
sqlText: String,
expected: Seq[Row],
expectV2Catalog: Boolean = true): Unit = {
val schema = if (expectV2Catalog) {
new StructType()
.add("namespace", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
} else {
new StructType()
.add("database", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
.add("isTemporary", BooleanType, nullable = false)
}
val df = spark.sql(sqlText)
assert(df.schema === schema)
assert(expected === df.collect())
}
test("SHOW TABLE EXTENDED not valid v1 database") {
def testV1CommandNamespace(sqlCommand: String, namespace: String): Unit = {
val e = intercept[AnalysisException] {
sql(sqlCommand)
}
assert(e.message.contains(s"The database name is not valid: ${namespace}"))
}
val namespace = "testcat.ns1.ns2"
val table = "tbl"
withTable(s"$namespace.$table") {
sql(s"CREATE TABLE $namespace.$table (id bigint, data string) " +
s"USING foo PARTITIONED BY (id)")
testV1CommandNamespace(s"SHOW TABLE EXTENDED FROM $namespace LIKE 'tb*'",
namespace)
testV1CommandNamespace(s"SHOW TABLE EXTENDED IN $namespace LIKE 'tb*'",
namespace)
testV1CommandNamespace("SHOW TABLE EXTENDED " +
s"FROM $namespace LIKE 'tb*' PARTITION(id=1)",
namespace)
testV1CommandNamespace("SHOW TABLE EXTENDED " +
s"IN $namespace LIKE 'tb*' PARTITION(id=1)",
namespace)
}
}
test("SHOW TABLE EXTENDED valid v1") {
val expected = Seq(Row("", "source", true), Row("", "source2", true))
val schema = new StructType()
.add("database", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
.add("isTemporary", BooleanType, nullable = false)
.add("information", StringType, nullable = false)
val df = sql("SHOW TABLE EXTENDED FROM default LIKE '*source*'")
val result = df.collect()
val resultWithoutInfo = result.map{ case Row(db, table, temp, _) => Row(db, table, temp)}
assert(df.schema === schema)
assert(resultWithoutInfo === expected)
result.foreach{ case Row(_, _, _, info: String) => assert(info.nonEmpty)}
}
test("CreateNameSpace: basic tests") {
// Session catalog is used.
withNamespace("ns") {
sql("CREATE NAMESPACE ns")
testShowNamespaces("SHOW NAMESPACES", Seq("default", "ns"))
}
// V2 non-session catalog is used.
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE testcat.ns1.ns2")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns2"))
}
withNamespace("testcat.test") {
withTempDir { tmpDir =>
val path = tmpDir.getCanonicalPath
sql(s"CREATE NAMESPACE testcat.test LOCATION '$path'")
val metadata =
catalog("testcat").asNamespaceCatalog.loadNamespaceMetadata(Array("test")).asScala
val catalogPath = metadata(SupportsNamespaces.PROP_LOCATION)
assert(catalogPath.equals(catalogPath))
}
}
}
test("CreateNameSpace: test handling of 'IF NOT EXIST'") {
withNamespace("testcat.ns1") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1")
// The 'ns1' namespace already exists, so this should fail.
val exception = intercept[NamespaceAlreadyExistsException] {
sql("CREATE NAMESPACE testcat.ns1")
}
assert(exception.getMessage.contains("Namespace 'ns1' already exists"))
// The following will be no-op since the namespace already exists.
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1")
}
}
test("CreateNameSpace: reserved properties") {
import SupportsNamespaces._
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "false")) {
RESERVED_PROPERTIES.asScala.filterNot(_ == PROP_COMMENT).foreach { key =>
val exception = intercept[ParseException] {
sql(s"CREATE NAMESPACE testcat.reservedTest WITH DBPROPERTIES('$key'='dummyVal')")
}
assert(exception.getMessage.contains(s"$key is a reserved namespace property"))
}
}
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "true")) {
RESERVED_PROPERTIES.asScala.filterNot(_ == PROP_COMMENT).foreach { key =>
withNamespace("testcat.reservedTest") {
sql(s"CREATE NAMESPACE testcat.reservedTest WITH DBPROPERTIES('$key'='foo')")
assert(sql("DESC NAMESPACE EXTENDED testcat.reservedTest")
.toDF("k", "v")
.where("k='Properties'")
.isEmpty, s"$key is a reserved namespace property and ignored")
val meta =
catalog("testcat").asNamespaceCatalog.loadNamespaceMetadata(Array("reservedTest"))
assert(meta.get(key) == null || !meta.get(key).contains("foo"),
"reserved properties should not have side effects")
}
}
}
}
test("create/replace/alter table - reserved properties") {
import TableCatalog._
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "false")) {
RESERVED_PROPERTIES.asScala.filterNot(_ == PROP_COMMENT).foreach { key =>
Seq("OPTIONS", "TBLPROPERTIES").foreach { clause =>
Seq("CREATE", "REPLACE").foreach { action =>
val e = intercept[ParseException] {
sql(s"$action TABLE testcat.reservedTest (key int) USING foo $clause ('$key'='bar')")
}
assert(e.getMessage.contains(s"$key is a reserved table property"))
}
}
val e1 = intercept[ParseException] {
sql(s"ALTER TABLE testcat.reservedTest SET TBLPROPERTIES ('$key'='bar')")
}
assert(e1.getMessage.contains(s"$key is a reserved table property"))
val e2 = intercept[ParseException] {
sql(s"ALTER TABLE testcat.reservedTest UNSET TBLPROPERTIES ('$key')")
}
assert(e2.getMessage.contains(s"$key is a reserved table property"))
}
}
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "true")) {
RESERVED_PROPERTIES.asScala.filterNot(_ == PROP_COMMENT).foreach { key =>
Seq("OPTIONS", "TBLPROPERTIES").foreach { clause =>
withTable("testcat.reservedTest") {
Seq("CREATE", "REPLACE").foreach { action =>
sql(s"$action TABLE testcat.reservedTest (key int) USING foo $clause ('$key'='bar')")
val tableCatalog = catalog("testcat").asTableCatalog
val identifier = Identifier.of(Array(), "reservedTest")
val originValue = tableCatalog.loadTable(identifier).properties().get(key)
assert(originValue != "bar", "reserved properties should not have side effects")
sql(s"ALTER TABLE testcat.reservedTest SET TBLPROPERTIES ('$key'='newValue')")
assert(tableCatalog.loadTable(identifier).properties().get(key) == originValue,
"reserved properties should not have side effects")
sql(s"ALTER TABLE testcat.reservedTest UNSET TBLPROPERTIES ('$key')")
assert(tableCatalog.loadTable(identifier).properties().get(key) == originValue,
"reserved properties should not have side effects")
}
}
}
}
}
}
test("create/replace - path property") {
Seq("true", "false").foreach { conf =>
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, conf)) {
withTable("testcat.reservedTest") {
Seq("CREATE", "REPLACE").foreach { action =>
val e1 = intercept[ParseException] {
sql(s"$action TABLE testcat.reservedTest USING foo LOCATION 'foo' OPTIONS" +
s" ('path'='bar')")
}
assert(e1.getMessage.contains(s"Duplicated table paths found: 'foo' and 'bar'"))
val e2 = intercept[ParseException] {
sql(s"$action TABLE testcat.reservedTest USING foo OPTIONS" +
s" ('path'='foo', 'PaTh'='bar')")
}
assert(e2.getMessage.contains(s"Duplicated table paths found: 'foo' and 'bar'"))
sql(s"$action TABLE testcat.reservedTest USING foo LOCATION 'foo' TBLPROPERTIES" +
s" ('path'='bar', 'Path'='noop')")
val tableCatalog = catalog("testcat").asTableCatalog
val identifier = Identifier.of(Array(), "reservedTest")
assert(tableCatalog.loadTable(identifier).properties()
.get(TableCatalog.PROP_LOCATION) == "foo",
"path as a table property should not have side effects")
assert(tableCatalog.loadTable(identifier).properties().get("path") == "bar",
"path as a table property should not have side effects")
assert(tableCatalog.loadTable(identifier).properties().get("Path") == "noop",
"path as a table property should not have side effects")
}
}
}
}
}
test("DropNamespace: basic tests") {
// Session catalog is used.
sql("CREATE NAMESPACE ns")
testShowNamespaces("SHOW NAMESPACES", Seq("default", "ns"))
sql("DROP NAMESPACE ns")
testShowNamespaces("SHOW NAMESPACES", Seq("default"))
// V2 non-session catalog is used.
sql("CREATE NAMESPACE testcat.ns1")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
sql("DROP NAMESPACE testcat.ns1")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq())
}
test("DropNamespace: drop non-empty namespace with a non-cascading mode") {
sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns2"))
def assertDropFails(): Unit = {
val e = intercept[SparkException] {
sql("DROP NAMESPACE testcat.ns1")
}
assert(e.getMessage.contains("Cannot drop a non-empty namespace: ns1"))
}
// testcat.ns1.table is present, thus testcat.ns1 cannot be dropped.
assertDropFails()
sql("DROP TABLE testcat.ns1.table")
// testcat.ns1.ns2.table is present, thus testcat.ns1 cannot be dropped.
assertDropFails()
sql("DROP TABLE testcat.ns1.ns2.table")
// testcat.ns1.ns2 namespace is present, thus testcat.ns1 cannot be dropped.
assertDropFails()
sql("DROP NAMESPACE testcat.ns1.ns2")
// Now that testcat.ns1 is empty, it can be dropped.
sql("DROP NAMESPACE testcat.ns1")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq())
}
test("DropNamespace: drop non-empty namespace with a cascade mode") {
sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns2"))
sql("DROP NAMESPACE testcat.ns1 CASCADE")
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq())
}
test("DropNamespace: test handling of 'IF EXISTS'") {
sql("DROP NAMESPACE IF EXISTS testcat.unknown")
val exception = intercept[NoSuchNamespaceException] {
sql("DROP NAMESPACE testcat.ns1")
}
assert(exception.getMessage.contains("Namespace 'ns1' not found"))
}
test("DescribeNamespace using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test'")
val descriptionDf = sql("DESCRIBE NAMESPACE testcat.ns1.ns2")
assert(descriptionDf.schema.map(field => (field.name, field.dataType)) ===
Seq(
("name", StringType),
("value", StringType)
))
val description = descriptionDf.collect()
assert(description === Seq(
Row("Namespace Name", "ns2"),
Row("Description", "test namespace"),
Row("Location", "/tmp/ns_test"),
Row("Owner Name", Utils.getCurrentUserName()),
Row("Owner Type", "USER")
))
}
}
test("AlterNamespaceSetProperties using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test' WITH PROPERTIES ('a'='a','b'='b','c'='c')")
sql("ALTER NAMESPACE testcat.ns1.ns2 SET PROPERTIES ('a'='b','b'='a')")
val descriptionDf = sql("DESCRIBE NAMESPACE EXTENDED testcat.ns1.ns2")
assert(descriptionDf.collect() === Seq(
Row("Namespace Name", "ns2"),
Row("Description", "test namespace"),
Row("Location", "/tmp/ns_test"),
Row("Owner Name", Utils.getCurrentUserName()),
Row("Owner Type", "USER"),
Row("Properties", "((a,b),(b,a),(c,c))")
))
}
}
test("AlterNamespaceSetProperties: reserved properties") {
import SupportsNamespaces._
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "false")) {
RESERVED_PROPERTIES.asScala.filterNot(_ == PROP_COMMENT).foreach { key =>
withNamespace("testcat.reservedTest") {
sql("CREATE NAMESPACE testcat.reservedTest")
val exception = intercept[ParseException] {
sql(s"ALTER NAMESPACE testcat.reservedTest SET PROPERTIES ('$key'='dummyVal')")
}
assert(exception.getMessage.contains(s"$key is a reserved namespace property"))
}
}
}
withSQLConf((SQLConf.LEGACY_PROPERTY_NON_RESERVED.key, "true")) {
RESERVED_PROPERTIES.asScala.filterNot(_ == PROP_COMMENT).foreach { key =>
withNamespace("testcat.reservedTest") {
sql(s"CREATE NAMESPACE testcat.reservedTest")
sql(s"ALTER NAMESPACE testcat.reservedTest SET PROPERTIES ('$key'='foo')")
assert(sql("DESC NAMESPACE EXTENDED testcat.reservedTest")
.toDF("k", "v")
.where("k='Properties'")
.isEmpty, s"$key is a reserved namespace property and ignored")
val meta =
catalog("testcat").asNamespaceCatalog.loadNamespaceMetadata(Array("reservedTest"))
assert(meta.get(key) == null || !meta.get(key).contains("foo"),
"reserved properties should not have side effects")
}
}
}
}
test("AlterNamespaceSetLocation using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test_1'")
sql("ALTER NAMESPACE testcat.ns1.ns2 SET LOCATION '/tmp/ns_test_2'")
val descriptionDf = sql("DESCRIBE NAMESPACE EXTENDED testcat.ns1.ns2")
assert(descriptionDf.collect() === Seq(
Row("Namespace Name", "ns2"),
Row("Description", "test namespace"),
Row("Location", "/tmp/ns_test_2"),
Row("Owner Name", Utils.getCurrentUserName()),
Row("Owner Type", "USER")
))
}
}
test("AlterNamespaceSetOwner using v2 catalog") {
withNamespace("testcat.ns1.ns2") {
sql("CREATE NAMESPACE IF NOT EXISTS testcat.ns1.ns2 COMMENT " +
"'test namespace' LOCATION '/tmp/ns_test_3'")
sql("ALTER NAMESPACE testcat.ns1.ns2 SET OWNER ROLE adminRole")
val descriptionDf = sql("DESCRIBE NAMESPACE EXTENDED testcat.ns1.ns2")
assert(descriptionDf.collect() === Seq(
Row("Namespace Name", "ns2"),
Row("Description", "test namespace"),
Row("Location", "/tmp/ns_test_3"),
Row("Owner Name", "adminRole"),
Row("Owner Type", "ROLE")
))
}
}
test("ShowNamespaces: show root namespaces with default v2 catalog") {
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
testShowNamespaces("SHOW NAMESPACES", Seq())
spark.sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns2.table (id bigint) USING foo")
testShowNamespaces("SHOW NAMESPACES", Seq("ns1", "ns2"))
testShowNamespaces("SHOW NAMESPACES LIKE '*1*'", Seq("ns1"))
}
test("ShowNamespaces: show namespaces with v2 catalog") {
spark.sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns1.ns1_2.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns2.table (id bigint) USING foo")
spark.sql("CREATE TABLE testcat.ns2.ns2_1.table (id bigint) USING foo")
// Look up only with catalog name, which should list root namespaces.
testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1", "ns2"))
// Look up sub-namespaces.
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns1_1", "ns1.ns1_2"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1 LIKE '*2*'", Seq("ns1.ns1_2"))
testShowNamespaces("SHOW NAMESPACES IN testcat.ns2", Seq("ns2.ns2_1"))
// Try to look up namespaces that do not exist.
testShowNamespaces("SHOW NAMESPACES IN testcat.ns3", Seq())
testShowNamespaces("SHOW NAMESPACES IN testcat.ns1.ns3", Seq())
}
test("ShowNamespaces: default v2 catalog is not set") {
spark.sql("CREATE TABLE testcat.ns.table (id bigint) USING foo")
// The current catalog is resolved to a v2 session catalog.
testShowNamespaces("SHOW NAMESPACES", Seq("default"))
}
test("ShowNamespaces: default v2 catalog doesn't support namespace") {
spark.conf.set(
"spark.sql.catalog.testcat_no_namspace",
classOf[BasicInMemoryTableCatalog].getName)
spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat_no_namspace")
val exception = intercept[AnalysisException] {
sql("SHOW NAMESPACES")
}
assert(exception.getMessage.contains("does not support namespaces"))
}
test("ShowNamespaces: v2 catalog doesn't support namespace") {
spark.conf.set(
"spark.sql.catalog.testcat_no_namspace",
classOf[BasicInMemoryTableCatalog].getName)
val exception = intercept[AnalysisException] {
sql("SHOW NAMESPACES in testcat_no_namspace")
}
assert(exception.getMessage.contains("does not support namespaces"))
}
test("ShowNamespaces: session catalog is used and namespace doesn't exist") {
val exception = intercept[AnalysisException] {
sql("SHOW NAMESPACES in dummy")
}
assert(exception.getMessage.contains("Namespace 'dummy' not found"))
}
test("ShowNamespaces: change catalog and namespace with USE statements") {
sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo")
// Initially, the current catalog is a v2 session catalog.
testShowNamespaces("SHOW NAMESPACES", Seq("default"))
// Update the current catalog to 'testcat'.
sql("USE testcat")
testShowNamespaces("SHOW NAMESPACES", Seq("ns1"))
// Update the current namespace to 'ns1'.
sql("USE ns1")
// 'SHOW NAMESPACES' is not affected by the current namespace and lists root namespaces.
testShowNamespaces("SHOW NAMESPACES", Seq("ns1"))
}
private def testShowNamespaces(
sqlText: String,
expected: Seq[String]): Unit = {
val schema = new StructType().add("namespace", StringType, nullable = false)
val df = spark.sql(sqlText)
assert(df.schema === schema)
assert(df.collect().map(_.getAs[String](0)).sorted === expected.sorted)
}
test("Use: basic tests with USE statements") {
val catalogManager = spark.sessionState.catalogManager
// Validate the initial current catalog and namespace.
assert(catalogManager.currentCatalog.name() == SESSION_CATALOG_NAME)
assert(catalogManager.currentNamespace === Array("default"))
// The following implicitly creates namespaces.
sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo")
sql("CREATE TABLE testcat2.ns2.ns2_2.table (id bigint) USING foo")
sql("CREATE TABLE testcat2.ns3.ns3_3.table (id bigint) USING foo")
sql("CREATE TABLE testcat2.testcat.table (id bigint) USING foo")
// Catalog is resolved to 'testcat'.
sql("USE testcat.ns1.ns1_1")
assert(catalogManager.currentCatalog.name() == "testcat")
assert(catalogManager.currentNamespace === Array("ns1", "ns1_1"))
// Catalog is resolved to 'testcat2'.
sql("USE testcat2.ns2.ns2_2")
assert(catalogManager.currentCatalog.name() == "testcat2")
assert(catalogManager.currentNamespace === Array("ns2", "ns2_2"))
// Only the namespace is changed.
sql("USE ns3.ns3_3")
assert(catalogManager.currentCatalog.name() == "testcat2")
assert(catalogManager.currentNamespace === Array("ns3", "ns3_3"))
// Only the namespace is changed (explicit).
sql("USE NAMESPACE testcat")
assert(catalogManager.currentCatalog.name() == "testcat2")
assert(catalogManager.currentNamespace === Array("testcat"))
// Catalog is resolved to `testcat`.
sql("USE testcat")
assert(catalogManager.currentCatalog.name() == "testcat")
assert(catalogManager.currentNamespace === Array())
}
test("Use: set v2 catalog as a current catalog") {
val catalogManager = spark.sessionState.catalogManager
assert(catalogManager.currentCatalog.name() == SESSION_CATALOG_NAME)
sql("USE testcat")
assert(catalogManager.currentCatalog.name() == "testcat")
}
test("Use: v2 session catalog is used and namespace does not exist") {
val exception = intercept[NoSuchDatabaseException] {
sql("USE ns1")
}
assert(exception.getMessage.contains("Database 'ns1' not found"))
}
test("Use: v2 catalog is used and namespace does not exist") {
// Namespaces are not required to exist for v2 catalogs.
sql("USE testcat.ns1.ns2")
val catalogManager = spark.sessionState.catalogManager
assert(catalogManager.currentNamespace === Array("ns1", "ns2"))
}
test("ShowCurrentNamespace: basic tests") {
def testShowCurrentNamespace(expectedCatalogName: String, expectedNamespace: String): Unit = {
val schema = new StructType()
.add("catalog", StringType, nullable = false)
.add("namespace", StringType, nullable = false)
val df = sql("SHOW CURRENT NAMESPACE")
val rows = df.collect
assert(df.schema === schema)
assert(rows.length == 1)
assert(rows(0).getAs[String](0) === expectedCatalogName)
assert(rows(0).getAs[String](1) === expectedNamespace)
}
// Initially, the v2 session catalog is set as a current catalog.
testShowCurrentNamespace("spark_catalog", "default")
sql("USE testcat")
testShowCurrentNamespace("testcat", "")
sql("USE testcat.ns1.ns2")
testShowCurrentNamespace("testcat", "ns1.ns2")
}
test("tableCreation: partition column case insensitive resolution") {
val testCatalog = catalog("testcat").asTableCatalog
val sessionCatalog = catalog(SESSION_CATALOG_NAME).asTableCatalog
def checkPartitioning(cat: TableCatalog, partition: String): Unit = {
val table = cat.loadTable(Identifier.of(Array.empty, "tbl"))
val partitions = table.partitioning().map(_.references())
assert(partitions.length === 1)
val fieldNames = partitions.flatMap(_.map(_.fieldNames()))
assert(fieldNames === Array(Array(partition)))
}
sql(s"CREATE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkPartitioning(sessionCatalog, "a")
sql(s"CREATE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkPartitioning(testCatalog, "a")
sql(s"CREATE OR REPLACE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
checkPartitioning(sessionCatalog, "b")
sql(s"CREATE OR REPLACE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
checkPartitioning(testCatalog, "b")
}
test("tableCreation: partition column case sensitive resolution") {
def checkFailure(statement: String): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val e = intercept[AnalysisException] {
sql(statement)
}
assert(e.getMessage.contains("Couldn't find column"))
}
}
checkFailure(s"CREATE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkFailure(s"CREATE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (A)")
checkFailure(
s"CREATE OR REPLACE TABLE tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
checkFailure(
s"CREATE OR REPLACE TABLE testcat.tbl (a int, b string) USING $v2Source PARTITIONED BY (B)")
}
test("tableCreation: duplicate column names in the table definition") {
val errorMsg = "Found duplicate column(s) in the table definition of t"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t ($c0 INT, $c1 INT) USING $v2Source",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.t ($c0 INT, $c1 INT) USING $v2Source",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t ($c0 INT, $c1 INT) USING $v2Source",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t ($c0 INT, $c1 INT) USING $v2Source",
errorMsg
)
}
}
}
test("tableCreation: duplicate nested column names in the table definition") {
val errorMsg = "Found duplicate column(s) in the table definition of t"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t (d struct<$c0: INT, $c1: INT>) USING $v2Source",
errorMsg
)
}
}
}
test("tableCreation: bucket column names not in table definition") {
val errorMsg = "Couldn't find column c in"
assertAnalysisError(
s"CREATE TABLE tbl (a int, b string) USING $v2Source CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.tbl (a int, b string) USING $v2Source CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE tbl (a int, b string) USING $v2Source " +
"CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.tbl (a int, b string) USING $v2Source " +
"CLUSTERED BY (c) INTO 4 BUCKETS",
errorMsg
)
}
test("tableCreation: bucket column name containing dot") {
withTable("t") {
sql(
"""
|CREATE TABLE testcat.t (id int, `a.b` string) USING foo
|CLUSTERED BY (`a.b`) INTO 4 BUCKETS
|OPTIONS ('allow-unsupported-transforms'=true)
""".stripMargin)
val testCatalog = catalog("testcat").asTableCatalog.asInstanceOf[InMemoryTableCatalog]
val table = testCatalog.loadTable(Identifier.of(Array.empty, "t"))
val partitioning = table.partitioning()
assert(partitioning.length == 1 && partitioning.head.name() == "bucket")
val references = partitioning.head.references()
assert(references.length == 1)
assert(references.head.fieldNames().toSeq == Seq("a.b"))
}
}
test("tableCreation: column repeated in partition columns") {
val errorMsg = "Found duplicate column(s) in the partitioning"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t ($c0 INT) USING $v2Source PARTITIONED BY ($c0, $c1)",
errorMsg
)
}
}
}
test("tableCreation: column repeated in bucket columns") {
val errorMsg = "Found duplicate column(s) in the bucket definition"
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
assertAnalysisError(
s"CREATE TABLE t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE TABLE testcat.t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
assertAnalysisError(
s"CREATE OR REPLACE TABLE testcat.t ($c0 INT) USING $v2Source " +
s"CLUSTERED BY ($c0, $c1) INTO 2 BUCKETS",
errorMsg
)
}
}
}
test("REFRESH TABLE: v2 table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
val testCatalog = catalog("testcat").asTableCatalog.asInstanceOf[InMemoryTableCatalog]
val identifier = Identifier.of(Array("ns1", "ns2"), "tbl")
assert(!testCatalog.isTableInvalidated(identifier))
sql(s"REFRESH TABLE $t")
assert(testCatalog.isTableInvalidated(identifier))
}
}
test("REPLACE TABLE: v1 table") {
val e = intercept[AnalysisException] {
sql(s"CREATE OR REPLACE TABLE tbl (a int) USING ${classOf[SimpleScanSource].getName}")
}
assert(e.message.contains("REPLACE TABLE is only supported with v2 tables"))
}
test("DeleteFrom: basic - delete all") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t")
checkAnswer(spark.table(t), Seq())
}
}
test("DeleteFrom: basic - delete with where clause") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t WHERE id = 2")
checkAnswer(spark.table(t), Seq(
Row(3, "c", 3)))
}
}
test("DeleteFrom: delete from aliased target table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t AS tbl WHERE tbl.id = 2")
checkAnswer(spark.table(t), Seq(
Row(3, "c", 3)))
}
}
test("DeleteFrom: normalize attribute names") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
sql(s"DELETE FROM $t AS tbl WHERE tbl.ID = 2")
checkAnswer(spark.table(t), Seq(
Row(3, "c", 3)))
}
}
test("DeleteFrom: fail if has subquery") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)")
val exc = intercept[AnalysisException] {
sql(s"DELETE FROM $t WHERE id IN (SELECT id FROM $t)")
}
assert(spark.table(t).count === 3)
assert(exc.getMessage.contains("Delete by condition with subquery is not supported"))
}
}
test("DeleteFrom: DELETE is only supported with v2 tables") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val v1Table = "tbl"
withTable(v1Table) {
sql(s"CREATE TABLE $v1Table" +
s" USING ${classOf[SimpleScanSource].getName} OPTIONS (from=0,to=1)")
val exc = intercept[AnalysisException] {
sql(s"DELETE FROM $v1Table WHERE i = 2")
}
assert(exc.getMessage.contains("DELETE is only supported with v2 tables"))
}
}
test("UPDATE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, name string, age int, p int)
|USING foo
|PARTITIONED BY (id, p)
""".stripMargin)
// UPDATE non-existing table
assertAnalysisError(
"UPDATE dummy SET name='abc'",
"Table or view not found")
// UPDATE non-existing column
assertAnalysisError(
s"UPDATE $t SET dummy='abc'",
"cannot resolve")
assertAnalysisError(
s"UPDATE $t SET name='abc' WHERE dummy=1",
"cannot resolve")
// UPDATE is not implemented yet.
val e = intercept[UnsupportedOperationException] {
sql(s"UPDATE $t SET name='Robert', age=32 WHERE p=1")
}
assert(e.getMessage.contains("UPDATE TABLE is not supported temporarily"))
}
}
test("MERGE INTO TABLE") {
val target = "testcat.ns1.ns2.target"
val source = "testcat.ns1.ns2.source"
withTable(target, source) {
sql(
s"""
|CREATE TABLE $target (id bigint, name string, age int, p int)
|USING foo
|PARTITIONED BY (id, p)
""".stripMargin)
sql(
s"""
|CREATE TABLE $source (id bigint, name string, age int, p int)
|USING foo
|PARTITIONED BY (id, p)
""".stripMargin)
// MERGE INTO non-existing table
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.dummy AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET *
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"Table or view not found")
// USING non-existing table
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.dummy AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET *
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"Table or view not found")
// UPDATE non-existing column
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET target.dummy = source.age
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"cannot resolve")
// UPDATE using non-existing column
assertAnalysisError(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.age < 10) THEN DELETE
|WHEN MATCHED AND (target.age > 10) THEN UPDATE SET target.age = source.dummy
|WHEN NOT MATCHED AND (target.col2='insert')
|THEN INSERT *
""".stripMargin,
"cannot resolve")
// MERGE INTO is not implemented yet.
val e = intercept[UnsupportedOperationException] {
sql(
s"""
|MERGE INTO testcat.ns1.ns2.target AS target
|USING testcat.ns1.ns2.source AS source
|ON target.id = source.id
|WHEN MATCHED AND (target.p < 0) THEN DELETE
|WHEN MATCHED AND (target.p > 0) THEN UPDATE SET *
|WHEN NOT MATCHED THEN INSERT *
""".stripMargin)
}
assert(e.getMessage.contains("MERGE INTO TABLE is not supported temporarily"))
}
}
test("AlterTable: rename table basic test") {
withTable("testcat.ns1.new") {
sql(s"CREATE TABLE testcat.ns1.ns2.old USING foo AS SELECT id, data FROM source")
checkAnswer(sql("SHOW TABLES FROM testcat.ns1.ns2"), Seq(Row("ns1.ns2", "old")))
sql(s"ALTER TABLE testcat.ns1.ns2.old RENAME TO ns1.new")
checkAnswer(sql("SHOW TABLES FROM testcat.ns1.ns2"), Seq.empty)
checkAnswer(sql("SHOW TABLES FROM testcat.ns1"), Seq(Row("ns1", "new")))
}
}
test("AlterTable: renaming views are not supported") {
val e = intercept[AnalysisException] {
sql(s"ALTER VIEW testcat.ns.tbl RENAME TO ns.view")
}
assert(e.getMessage.contains("Renaming view is not supported in v2 catalogs"))
}
test("ANALYZE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("ANALYZE TABLE", s"$t COMPUTE STATISTICS")
testV1Command("ANALYZE TABLE", s"$t COMPUTE STATISTICS FOR ALL COLUMNS")
}
}
test("MSCK REPAIR TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("MSCK REPAIR TABLE", t)
}
}
test("TRUNCATE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, data string)
|USING foo
|PARTITIONED BY (id)
""".stripMargin)
testV1Command("TRUNCATE TABLE", t)
testV1Command("TRUNCATE TABLE", s"$t PARTITION(id='1')")
}
}
test("SHOW PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, data string)
|USING foo
|PARTITIONED BY (id)
""".stripMargin)
testV1Command("SHOW PARTITIONS", t)
testV1Command("SHOW PARTITIONS", s"$t PARTITION(id='1')")
}
}
test("LOAD DATA INTO TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(
s"""
|CREATE TABLE $t (id bigint, data string)
|USING foo
|PARTITIONED BY (id)
""".stripMargin)
testV1Command("LOAD DATA", s"INPATH 'filepath' INTO TABLE $t")
testV1Command("LOAD DATA", s"LOCAL INPATH 'filepath' INTO TABLE $t")
testV1Command("LOAD DATA", s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t")
testV1Command("LOAD DATA",
s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t PARTITION(id=1)")
}
}
test("SHOW CREATE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("SHOW CREATE TABLE", t)
}
}
test("CACHE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("CACHE TABLE", t)
val e = intercept[AnalysisException] {
sql(s"CACHE LAZY TABLE $t")
}
assert(e.message.contains("CACHE TABLE is only supported with v1 tables"))
}
}
test("UNCACHE TABLE") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("UNCACHE TABLE", t)
testV1Command("UNCACHE TABLE", s"IF EXISTS $t")
}
}
test("SHOW COLUMNS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
testV1Command("SHOW COLUMNS", s"FROM $t")
testV1Command("SHOW COLUMNS", s"IN $t")
val e3 = intercept[AnalysisException] {
sql(s"SHOW COLUMNS FROM tbl IN testcat.ns1.ns2")
}
assert(e3.message.contains("Namespace name should have " +
"only one part if specified: testcat.ns1.ns2"))
}
}
test("ALTER TABLE RECOVER PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t RECOVER PARTITIONS")
}
assert(e.message.contains("ALTER TABLE RECOVER PARTITIONS is only supported with v1 tables"))
}
}
test("ALTER TABLE ADD PARTITION") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'")
}
assert(e.message.contains("ALTER TABLE ADD PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE RENAME PARTITION") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t PARTITION (id=1) RENAME TO PARTITION (id=2)")
}
assert(e.message.contains("ALTER TABLE RENAME PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE DROP PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t DROP PARTITION (id=1)")
}
assert(e.message.contains("ALTER TABLE DROP PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE SerDe properties") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t SET SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')")
}
assert(e.message.contains("ALTER TABLE SerDe Properties is only supported with v1 tables"))
}
}
test("ALTER VIEW AS QUERY") {
val v = "testcat.ns1.ns2.v"
val e = intercept[AnalysisException] {
sql(s"ALTER VIEW $v AS SELECT 1")
}
assert(e.message.contains("ALTER VIEW QUERY is only supported with v1 tables"))
}
test("CREATE VIEW") {
val v = "testcat.ns1.ns2.v"
val e = intercept[AnalysisException] {
sql(s"CREATE VIEW $v AS SELECT * FROM tab1")
}
assert(e.message.contains("CREATE VIEW is only supported with v1 tables"))
}
test("SHOW TBLPROPERTIES: v2 table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
val owner = "andrew"
val status = "new"
val provider = "foo"
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING $provider " +
s"TBLPROPERTIES ('owner'='$owner', 'status'='$status')")
val properties = sql(s"SHOW TBLPROPERTIES $t")
val schema = new StructType()
.add("key", StringType, nullable = false)
.add("value", StringType, nullable = false)
val expected = Seq(
Row("owner", owner),
Row("status", status),
Row("provider", provider))
assert(properties.schema === schema)
assert(expected === properties.collect())
}
}
test("SHOW TBLPROPERTIES(key): v2 table") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
val owner = "andrew"
val status = "new"
val provider = "foo"
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING $provider " +
s"TBLPROPERTIES ('owner'='$owner', 'status'='$status')")
val properties = sql(s"SHOW TBLPROPERTIES $t ('status')")
val expected = Seq(Row("status", status))
assert(expected === properties.collect())
}
}
test("SHOW TBLPROPERTIES(key): v2 table, key not found") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
val nonExistingKey = "nonExistingKey"
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo " +
s"TBLPROPERTIES ('owner'='andrew', 'status'='new')")
val properties = sql(s"SHOW TBLPROPERTIES $t ('$nonExistingKey')")
val expected = Seq(Row(nonExistingKey, s"Table $t does not have property: $nonExistingKey"))
assert(expected === properties.collect())
}
}
test("DESCRIBE FUNCTION: only support session catalog") {
val e = intercept[AnalysisException] {
sql("DESCRIBE FUNCTION testcat.ns1.ns2.fun")
}
assert(e.message.contains("DESCRIBE FUNCTION is only supported in v1 catalog"))
val e1 = intercept[AnalysisException] {
sql("DESCRIBE FUNCTION default.ns1.ns2.fun")
}
assert(e1.message.contains("Unsupported function name 'default.ns1.ns2.fun'"))
}
test("SHOW FUNCTIONS not valid v1 namespace") {
val function = "testcat.ns1.ns2.fun"
val e = intercept[AnalysisException] {
sql(s"SHOW FUNCTIONS LIKE $function")
}
assert(e.message.contains("SHOW FUNCTIONS is only supported in v1 catalog"))
}
test("DROP FUNCTION: only support session catalog") {
val e = intercept[AnalysisException] {
sql("DROP FUNCTION testcat.ns1.ns2.fun")
}
assert(e.message.contains("DROP FUNCTION is only supported in v1 catalog"))
val e1 = intercept[AnalysisException] {
sql("DESCRIBE FUNCTION default.ns1.ns2.fun")
}
assert(e1.message.contains("Unsupported function name 'default.ns1.ns2.fun'"))
}
test("CREATE FUNCTION: only support session catalog") {
val e = intercept[AnalysisException] {
sql("CREATE FUNCTION testcat.ns1.ns2.fun as 'f'")
}
assert(e.message.contains("CREATE FUNCTION is only supported in v1 catalog"))
val e1 = intercept[AnalysisException] {
sql("CREATE FUNCTION default.ns1.ns2.fun as 'f'")
}
assert(e1.message.contains("Unsupported function name 'default.ns1.ns2.fun'"))
}
test("global temp view should not be masked by v2 catalog") {
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
spark.conf.set(s"spark.sql.catalog.$globalTempDB", classOf[InMemoryTableCatalog].getName)
try {
sql("create global temp view v as select 1")
sql(s"alter view $globalTempDB.v rename to v2")
checkAnswer(spark.table(s"$globalTempDB.v2"), Row(1))
sql(s"drop view $globalTempDB.v2")
} finally {
spark.sharedState.globalTempViewManager.clear()
}
}
test("SPARK-30104: global temp db is used as a table name under v2 catalog") {
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
val t = s"testcat.$globalTempDB"
withTable(t) {
sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
sql("USE testcat")
// The following should not throw AnalysisException, but should use `testcat.$globalTempDB`.
sql(s"DESCRIBE TABLE $globalTempDB")
}
}
test("SPARK-30104: v2 catalog named global_temp will be masked") {
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
spark.conf.set(s"spark.sql.catalog.$globalTempDB", classOf[InMemoryTableCatalog].getName)
val e = intercept[AnalysisException] {
// Since the following multi-part name starts with `globalTempDB`, it is resolved to
// the session catalog, not the `gloabl_temp` v2 catalog.
sql(s"CREATE TABLE $globalTempDB.ns1.ns2.tbl (id bigint, data string) USING json")
}
assert(e.message.contains("global_temp.ns1.ns2.tbl is not a valid TableIdentifier"))
}
test("table name same as catalog can be used") {
withTable("testcat.testcat") {
sql(s"CREATE TABLE testcat.testcat (id bigint, data string) USING foo")
sql("USE testcat")
// The following should not throw AnalysisException.
sql(s"DESCRIBE TABLE testcat")
}
}
test("SPARK-30001: session catalog name can be specified in SQL statements") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
withTable("t") {
sql("CREATE TABLE t USING json AS SELECT 1 AS i")
checkAnswer(sql("select * from t"), Row(1))
checkAnswer(sql("select * from spark_catalog.t"), Row(1))
checkAnswer(sql("select * from spark_catalog.default.t"), Row(1))
}
}
test("SPARK-30259: session catalog can be specified in CREATE TABLE AS SELECT command") {
withTable("tbl") {
val ident = Identifier.of(Array(), "tbl")
sql("CREATE TABLE spark_catalog.tbl USING json AS SELECT 1 AS i")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true)
}
}
test("SPARK-30259: session catalog can be specified in CREATE TABLE command") {
withTable("tbl") {
val ident = Identifier.of(Array(), "tbl")
sql("CREATE TABLE spark_catalog.tbl (col string) USING json")
assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true)
}
}
test("SPARK-30094: current namespace is used during table resolution") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
withTable("spark_catalog.t", "testcat.ns.t") {
sql("CREATE TABLE t USING parquet AS SELECT 1")
sql("CREATE TABLE testcat.ns.t USING parquet AS SELECT 2")
checkAnswer(sql("SELECT * FROM t"), Row(1))
sql("USE testcat.ns")
checkAnswer(sql("SELECT * FROM t"), Row(2))
}
}
test("SPARK-30284: CREATE VIEW should track the current catalog and namespace") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
val sessionCatalogName = CatalogManager.SESSION_CATALOG_NAME
sql("USE testcat.ns1.ns2")
sql("CREATE TABLE t USING foo AS SELECT 1 col")
checkAnswer(spark.table("t"), Row(1))
withTempView("t") {
spark.range(10).createTempView("t")
withView(s"$sessionCatalogName.v") {
val e = intercept[AnalysisException] {
sql(s"CREATE VIEW $sessionCatalogName.v AS SELECT * FROM t")
}
assert(e.message.contains("referencing a temporary view"))
}
}
withTempView("t") {
withView(s"$sessionCatalogName.v") {
sql(s"CREATE VIEW $sessionCatalogName.v AS SELECT t1.col FROM t t1 JOIN ns1.ns2.t t2")
sql(s"USE $sessionCatalogName")
// The view should read data from table `testcat.ns1.ns2.t` not the temp view.
spark.range(10).createTempView("t")
checkAnswer(spark.table("v"), Row(1))
}
}
}
test("COMMENT ON NAMESPACE") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
// Session catalog is used.
sql("CREATE NAMESPACE ns")
checkNamespaceComment("ns", "minor revision")
checkNamespaceComment("ns", null)
checkNamespaceComment("ns", "NULL")
intercept[AnalysisException](sql("COMMENT ON NAMESPACE abc IS NULL"))
// V2 non-session catalog is used.
sql("CREATE NAMESPACE testcat.ns1")
checkNamespaceComment("testcat.ns1", "minor revision")
checkNamespaceComment("testcat.ns1", null)
checkNamespaceComment("testcat.ns1", "NULL")
intercept[AnalysisException](sql("COMMENT ON NAMESPACE testcat.abc IS NULL"))
}
private def checkNamespaceComment(namespace: String, comment: String): Unit = {
sql(s"COMMENT ON NAMESPACE $namespace IS " +
Option(comment).map("'" + _ + "'").getOrElse("NULL"))
val expectedComment = Option(comment).getOrElse("")
assert(sql(s"DESC NAMESPACE extended $namespace").toDF("k", "v")
.where("k='Description'")
.head().getString(1) === expectedComment)
}
test("COMMENT ON TABLE") {
// unset this config to use the default v2 session catalog.
spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
// Session catalog is used.
withTable("t") {
sql("CREATE TABLE t(k int) USING json")
checkTableComment("t", "minor revision")
checkTableComment("t", null)
checkTableComment("t", "NULL")
}
intercept[AnalysisException](sql("COMMENT ON TABLE abc IS NULL"))
// V2 non-session catalog is used.
withTable("testcat.ns1.ns2.t") {
sql("CREATE TABLE testcat.ns1.ns2.t(k int) USING foo")
checkTableComment("testcat.ns1.ns2.t", "minor revision")
checkTableComment("testcat.ns1.ns2.t", null)
checkTableComment("testcat.ns1.ns2.t", "NULL")
}
intercept[AnalysisException](sql("COMMENT ON TABLE testcat.abc IS NULL"))
val globalTempDB = spark.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
spark.conf.set(s"spark.sql.catalog.$globalTempDB", classOf[InMemoryTableCatalog].getName)
withTempView("v") {
sql("create global temp view v as select 1")
val e = intercept[AnalysisException](sql("COMMENT ON TABLE global_temp.v IS NULL"))
assert(e.getMessage.contains("global_temp.v is a temp view not table."))
}
}
private def checkTableComment(tableName: String, comment: String): Unit = {
sql(s"COMMENT ON TABLE $tableName IS " + Option(comment).map("'" + _ + "'").getOrElse("NULL"))
val expectedComment = Option(comment).getOrElse("")
assert(sql(s"DESC extended $tableName").toDF("k", "v", "c")
.where("k='Comment'")
.head().getString(1) === expectedComment)
}
private def testV1Command(sqlCommand: String, sqlParams: String): Unit = {
val e = intercept[AnalysisException] {
sql(s"$sqlCommand $sqlParams")
}
assert(e.message.contains(s"$sqlCommand is only supported with v1 tables"))
}
private def assertAnalysisError(sqlStatement: String, expectedError: String): Unit = {
val errMsg = intercept[AnalysisException] {
sql(sqlStatement)
}.getMessage
assert(errMsg.contains(expectedError))
}
}
/** Used as a V2 DataSource for V2SessionCatalog DDL */
class FakeV2Provider extends TableProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = {
throw new UnsupportedOperationException("Unnecessary for DDL tests")
}
}
| ptkool/spark | sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala | Scala | apache-2.0 | 87,361 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.raster.data
import org.junit.runner.RunWith
import org.locationtech.geomesa.raster.RasterTestsUtils._
import org.locationtech.geomesa.raster._
import org.locationtech.geomesa.utils.geohash.{BoundingBox, GeoHash}
import org.locationtech.geomesa.utils.stats.{NoOpTimings, Timings}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AccumuloRasterStoreQueryIntegratedTest extends Specification {
sequential
var testIteration = 0
def getNewIteration() = {
testIteration += 1
s"testRSQIT_Table_$testIteration"
}
def correctRes(r: Double): Double = BigDecimal(r).round(mc).toDouble
"RasterStore" should {
"create an empty table and be able delete itself" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
val tableOps = rasterStore.connector.tableOperations()
tableOps.exists(tableName) must beTrue
tableOps.exists(s"${tableName}_queries") must beTrue
tableOps.exists(GEOMESA_RASTER_BOUNDS_TABLE) must beTrue
rasterStore.deleteRasterTable()
tableOps.exists(tableName) must beFalse
tableOps.exists(s"${tableName}_queries") must beFalse
// the deleteRasterTable does not delete the bounds table, just metadata rows contained in the table
tableOps.exists(GEOMESA_RASTER_BOUNDS_TABLE) must beTrue
}
"create a table, write to it, and be able delete itself" in {
val tableName = getNewIteration()
val theStore = createMockRasterStore(tableName)
val tableOps = theStore.connector.tableOperations()
tableOps.exists(tableName) must beTrue
tableOps.exists(s"${tableName}_queries") must beTrue
tableOps.exists(GEOMESA_RASTER_BOUNDS_TABLE) must beTrue
// populate store
val testRaster = generateTestRaster(0, 50, 0, 50)
theStore.putRaster(testRaster)
//test the query
val query = generateQuery(0, 50, 0, 50)
val rasters = theStore.getRasters(query).toList
rasters.length must beEqualTo(1)
// test the bounds
theStore.getResToGeoHashLenMap.isEmpty must beFalse
val theBounds = theStore.getBounds
theBounds must beAnInstanceOf[BoundingBox]
theBounds.maxLon must beEqualTo(50.0)
theBounds.maxLat must beEqualTo(50.0)
theBounds.minLon must beEqualTo(0.0)
theBounds.minLat must beEqualTo(0.0)
// Now delete the table and test if things worked
theStore.deleteRasterTable()
val nullBounds = theStore.getBounds
nullBounds.maxLon must beEqualTo(180.0)
nullBounds.maxLat must beEqualTo(90.0)
nullBounds.minLon must beEqualTo(-180.0)
nullBounds.minLat must beEqualTo(-90.0)
tableOps.exists(tableName) must beFalse
tableOps.exists(s"${tableName}_queries") must beFalse
// the deleteRasterTable does not delete the bounds table, just metadata rows contained in the table
tableOps.exists(GEOMESA_RASTER_BOUNDS_TABLE) must beTrue
theStore.getResToGeoHashLenMap.isEmpty must beTrue
}
"create an empty RasterStore and return nothing" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
//generate query
val query = generateQuery(0, 50, 0, 50)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query)
theResults.toList.length must beEqualTo(0)
}
"create a Raster Store, populate it and run a query" in {
val tableName = getNewIteration()
val theStore = createMockRasterStore(tableName)
// populate store
val testRaster = generateTestRaster(0, 50, 0, 50)
theStore.putRaster(testRaster)
//generate query
val query = generateQuery(0, 50, 0, 50)
theStore must beAnInstanceOf[AccumuloRasterStore]
val theIterator = theStore.getRasters(query)
val theRaster = theIterator.next()
theRaster must beAnInstanceOf[Raster]
}
"Properly filter in a raster via a query bbox" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val testRaster = generateTestRasterFromGeoHash(GeoHash("s"))
rasterStore.putRaster(testRaster)
//generate query
val query = generateQuery(0, 50, 0, 50)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theIterator = rasterStore.getRasters(query)
val theRaster = theIterator.next()
theRaster must beAnInstanceOf[Raster]
}
"Properly filter out a raster via a query bbox" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val testRaster = generateTestRasterFromGeoHash(GeoHash("d"))
rasterStore.putRaster(testRaster)
//generate query
val query = generateQuery(0, 45, 0, 45)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theIterator = rasterStore.getRasters(query)
theIterator.isEmpty must beTrue
}
"Properly filter out a raster via a query bbox and maintain a valid raster in the results" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val testRaster1 = generateTestRasterFromGeoHash(GeoHash("s"))
rasterStore.putRaster(testRaster1)
val testRaster2 = generateTestRasterFromGeoHash(GeoHash("d"))
rasterStore.putRaster(testRaster2)
//generate query
val query = generateQuery(0, 50, 0, 50)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly filter in a raster conforming to GeoHashes via a query bbox and resolution" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val testRaster = generateTestRasterFromGeoHash(GeoHash("s"), res = 5.0)
rasterStore.putRaster(testRaster)
//generate query
val query = generateQuery(0, 50, 0, 50, res = 5.0)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return a raster slightly smaller than a GeoHash via a query bbox" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val gh = GeoHash("dqcjr")
val env = gh.getEnvelopeInternal
val inG = gh.geom.buffer(-0.0001).getEnvelopeInternal
val testRaster = generateTestRaster(inG.getMinX, inG.getMaxX, inG.getMinY, inG.getMaxY)
rasterStore.putRaster(testRaster)
//generate query
val query = generateQuery(env.getMinX-0.0001, env.getMaxX+0.0001, env.getMinY-0.0001, env.getMaxY+0.0001)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return a raster slightly larger than a GeoHash via a query bbox" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val gh = GeoHash("dqcjr")
val env = gh.getEnvelopeInternal
val inG = gh.geom.buffer(0.0001).getEnvelopeInternal
val testRaster = generateTestRaster(inG.getMinX, inG.getMaxX, inG.getMinY, inG.getMaxY)
rasterStore.putRaster(testRaster)
//generate query
val query = generateQuery(env.getMinX-0.0001, env.getMaxX+0.0001, env.getMinY-0.0001, env.getMaxY+0.0001)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return a group of four Rasters Conforming to GeoHashes Near (0, 0)" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val bbox1 = GeoHash("7").bbox
val testRaster1 = generateTestRasterFromBoundingBox(bbox1)
rasterStore.putRaster(testRaster1)
val bbox2 = GeoHash("k").bbox
val testRaster2 = generateTestRasterFromBoundingBox(bbox2)
rasterStore.putRaster(testRaster2)
val bbox3 = GeoHash("s").bbox
val testRaster3 = generateTestRasterFromBoundingBox(bbox3)
rasterStore.putRaster(testRaster3)
val bbox4 = GeoHash("e").bbox
val testRaster4 = generateTestRasterFromBoundingBox(bbox4)
rasterStore.putRaster(testRaster4)
//generate query
val query = generateQuery(bbox1.minLon, bbox3.maxLon, bbox1.minLat, bbox3.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(4)
}
"Properly return a group of four Small Rasters Conforming to GeoHashes" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val bbox1 = GeoHash("dqb0m").bbox
val testRaster1 = generateTestRasterFromBoundingBox(bbox1)
rasterStore.putRaster(testRaster1)
val bbox2 = GeoHash("dqb0q").bbox
val testRaster2 = generateTestRasterFromBoundingBox(bbox2)
rasterStore.putRaster(testRaster2)
val bbox3 = GeoHash("dqb0w").bbox
val testRaster3 = generateTestRasterFromBoundingBox(bbox3)
rasterStore.putRaster(testRaster3)
val bbox4 = GeoHash("dqb0t").bbox
val testRaster4 = generateTestRasterFromBoundingBox(bbox4)
rasterStore.putRaster(testRaster4)
//generate query
val query = generateQuery(bbox1.minLon, bbox3.maxLon, bbox1.minLat, bbox3.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(4)
}
"Do the correct thing when querying the whole world" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val wholeWorld = BoundingBox(-180.0, 180, -90.0, 90.0)
val allFiveCharacterHashes = BoundingBox.getGeoHashesFromBoundingBox(wholeWorld)
val testRasters = allFiveCharacterHashes.map{ hash => generateTestRasterFromBoundingBox(GeoHash(hash).bbox) }
testRasters.foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(-180.0, 180.0, -90.0, 90.0)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(32)
}
"Properly return one raster in a QLevel 1 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.quadrant1
RasterTestsUtils.generateQuadTreeLevelRasters(1).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 2 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(2, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(2).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 3 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(3, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(3).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 4 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(4, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(4).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 5 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(5, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(5).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 6 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(6, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(6).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 7 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(7, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(7).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 8 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(8, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(8).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 9 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(9, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(9).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 10 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(10, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(10).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 11 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(11, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(11).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 12 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(12, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(12).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 13 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(13, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(13).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 14 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(14, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(14).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Properly return one raster in a QLevel 15 bounding box" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val qbbox = RasterTestsUtils.generateSubQuadrant(15, RasterTestsUtils.quadrant1, 1)
RasterTestsUtils.generateQuadTreeLevelRasters(15).foreach(rasterStore.putRaster)
//generate query
val query = generateQuery(qbbox.minLon, qbbox.maxLon, qbbox.minLat, qbbox.maxLat)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
}
"Given a simple pyramid, select the top level when doing a whole world query" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// expected res
val expectedResolution = correctRes(50.0 / 256)
// general setup
val testRaster1 = generateTestRaster(0, 45.0, 0, 45.0, res = 50.0 / 256)
rasterStore.putRaster(testRaster1)
val testRaster2 = generateTestRaster(0, 45.0/2, 0, 45.0/2, res = 40.0 / 256)
rasterStore.putRaster(testRaster2)
val testRaster3 = generateTestRaster(0, 45.0/4, 0, 45.0/4, res = 30.0 / 256)
rasterStore.putRaster(testRaster3)
val testRaster4 = generateTestRaster(0, 45.0/8, 0, 45.0/8, res = 20.0 / 256)
rasterStore.putRaster(testRaster4)
val testRaster5 = generateTestRaster(0, 45.0/16, 0, 45.0/16, res = 10.0 / 256)
rasterStore.putRaster(testRaster5)
//generate query
val query = generateQuery(-180.0, 180.0, -90.0, 90.0)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theResults = rasterStore.getRasters(query).toList
theResults.length must beEqualTo(1)
theResults.head.resolution must beEqualTo(expectedResolution)
}
"Given a odd pyramid (equal resolutions at varying GeoHash precision), return the correct Availability Map" in {
val tableName = getNewIteration()
val rasterStore = createMockRasterStore(tableName)
// general setup
val testRaster1 = generateTestRaster(0, 45.0, 0, 45.0, res = 50.0 / 256)
rasterStore.putRaster(testRaster1)
val testRaster2 = generateTestRaster(0, 45.0/2, 0, 45.0/2, res = 40.0 / 256)
rasterStore.putRaster(testRaster2)
val testRaster3 = generateTestRaster(0, 45.0/4, 0, 45.0/4, res = 50.0 / 256)
rasterStore.putRaster(testRaster3)
val testRaster4 = generateTestRaster(0, 45.0/8, 0, 45.0/8, res = 50.0 / 256)
rasterStore.putRaster(testRaster4)
rasterStore must beAnInstanceOf[AccumuloRasterStore]
val theAvailability = rasterStore.getResToGeoHashLenMap
theAvailability.keys().size() must beEqualTo(3)
theAvailability.keySet().size() must beEqualTo(2)
theAvailability.values().size() must beEqualTo(3)
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-raster/src/test/scala/org/locationtech/geomesa/raster/data/AccumuloRasterStoreQueryIntegratedTest.scala | Scala | apache-2.0 | 23,132 |
object SCL7544A {
trait Relationsz {
type ZeroOne[Int]
implicit def zeroOneOps[T]: ZeroOneOps[T]
trait ZeroOneOps[T] {
def seq(zo: ZeroOne[T]): Seq[T]
}
implicit class ZeroOneSyntax[T](val _zo: ZeroOne[T])(implicit ops: ZeroOneOps[T]) {
def seq = ops.seq(_zo)
}
}
class Implicits[R1 <: Relationsz, R2 <: Relationsz](r1: R1, val r2: R2) {
import r1._
implicit def zeroOne[T](zo: r1.ZeroOne[Int]) = /*start*/zo.seq/*end*/
}
}
//Seq[Int] | LPTK/intellij-scala | testdata/typeInference/bugs5/SCL7544A.scala | Scala | apache-2.0 | 490 |
/*
* Copyright 2011 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.scrooge.backend
import com.twitter.scrooge.ast._
import com.twitter.scrooge.frontend.{ScroogeInternalException, ResolvedDocument}
import com.twitter.scrooge.mustache.Dictionary._
import com.twitter.scrooge.mustache.HandlebarLoader
import java.io.File
object ScalaGeneratorFactory extends GeneratorFactory {
val lang = "scala"
val handlebarLoader = new HandlebarLoader("/scalagen/", ".scala")
def apply(
includeMap: Map[String, ResolvedDocument],
defaultNamespace: String,
experimentFlags: Seq[String]
): ThriftGenerator = new ScalaGenerator(
includeMap,
defaultNamespace,
experimentFlags,
handlebarLoader)
}
class ScalaGenerator(
val includeMap: Map[String, ResolvedDocument],
val defaultNamespace: String,
val experimentFlags: Seq[String],
val templatesLoader: HandlebarLoader
) extends TemplateGenerator {
def templates: HandlebarLoader = templatesLoader
val fileExtension = ".scala"
var warnOnJavaNamespaceFallback: Boolean = false
private object ScalaKeywords {
private[this] val set = Set[String](
"abstract", "case", "catch", "class", "def", "do", "else", "extends",
"false", "final", "finally", "for", "forSome", "if", "implicit", "import",
"lazy", "match", "new", "null", "object", "override", "package", "private",
"protected", "return", "sealed", "super", "this", "throw", "trait", "try",
"true", "type", "val", "var", "while", "with", "yield")
def contains(str: String): Boolean = set.contains(str)
}
// Quote Scala reserved words in ``
def quoteKeyword(str: String): String =
if (ScalaKeywords.contains(str))
"`" + str + "`"
else
str
def normalizeCase[N <: Node](node: N) = {
(node match {
case d: Document =>
d.copy(defs = d.defs.map(normalizeCase(_)))
case id: Identifier => id.toTitleCase
case e: EnumRHS =>
e.copy(normalizeCase(e.enum), normalizeCase(e.value))
case f: Field =>
f.copy(
sid = f.sid.toCamelCase,
default = f.default.map(normalizeCase(_)))
case f: Function =>
f.copy(
args = f.args.map(normalizeCase(_)),
throws = f.throws.map(normalizeCase(_)))
case c: ConstDefinition =>
c.copy(value = normalizeCase(c.value))
case e: Enum =>
e.copy(values = e.values.map(normalizeCase(_)))
case e: EnumField =>
e.copy(sid = e.sid.toTitleCase)
case s: Struct =>
s.copy(fields = s.fields.map(normalizeCase(_)))
case f: FunctionArgs =>
f.copy(fields = f.fields.map(normalizeCase(_)))
case f: FunctionResult =>
f.copy(fields = f.fields.map(normalizeCase(_)))
case e: Exception_ =>
e.copy(fields = e.fields.map(normalizeCase(_)))
case s: Service =>
s.copy(functions = s.functions.map(normalizeCase(_)))
case n => n
}).asInstanceOf[N]
}
private[this] def getNamespaceWithWarning(doc: Document): Option[Identifier] =
doc.namespace("scala") orElse {
val ns = doc.namespace("java")
if (ns.isDefined && warnOnJavaNamespaceFallback)
println("falling back to the java namespace. this will soon be deprecated")
ns
}
override protected def getIncludeNamespace(includeFileName: String): Identifier = {
val javaNamespace = includeMap.get(includeFileName).flatMap {
doc: ResolvedDocument => getNamespaceWithWarning(doc.document)
}
javaNamespace.getOrElse(SimpleID(defaultNamespace))
}
override def getNamespace(doc: Document): Identifier =
getNamespaceWithWarning(doc) getOrElse (SimpleID(defaultNamespace))
def genList(list: ListRHS, mutable: Boolean = false): CodeFragment = {
val code = (if (mutable) "mutable.Buffer(" else "Seq(") +
list.elems.map(genConstant(_).toData).mkString(", ") + ")"
codify(code)
}
def genSet(set: SetRHS, mutable: Boolean = false): CodeFragment = {
val code = (if (mutable) "mutable.Set(" else "Set(") +
set.elems.map(genConstant(_).toData).mkString(", ") + ")"
codify(code)
}
def genMap(map: MapRHS, mutable: Boolean = false): CodeFragment = {
val code = (if (mutable) "mutable.Map(" else "Map(") + (map.elems.map {
case (k, v) =>
genConstant(k).toData + " -> " + genConstant(v).toData
} mkString (", ")) + ")"
codify(code)
}
def genEnum(enum: EnumRHS, fieldType: Option[FieldType] = None): CodeFragment = {
def getTypeId: Identifier = fieldType.getOrElse(Void) match {
case n: NamedType => qualifyNamedType(n)
case _ => enum.enum.sid
}
genID(enum.value.sid.toTitleCase.addScope(getTypeId.toTitleCase))
}
def genStruct(struct: StructRHS): CodeFragment = {
val values = struct.elems
val fields = values map { case (f, value) =>
val v = genConstant(value)
genID(f.sid.toCamelCase) + "=" + (if (f.requiredness.isOptional) "Some(" + v + ")" else v)
}
codify(genID(struct.sid) + "(" + fields.mkString(", ") + ")")
}
override def genDefaultValue(fieldType: FieldType, mutable: Boolean = false): CodeFragment = {
val code = fieldType match {
case TI64 => "0L"
case MapType(_, _, _) | SetType(_, _) | ListType(_, _) =>
genType(fieldType, mutable).toData + "()"
case _ => super.genDefaultValue(fieldType, mutable).toData
}
codify(code)
}
override def genConstant(constant: RHS, mutable: Boolean = false, fieldType: Option[FieldType] = None): CodeFragment = {
(constant, fieldType) match {
case (IntLiteral(value), Some(TI64)) => codify(value.toString + "L")
case _ => super.genConstant(constant, mutable, fieldType)
}
}
/**
* Generates a suffix to append to a field expression that will
* convert the value to an immutable equivalent.
*/
def genToImmutable(t: FieldType): CodeFragment = {
val code = t match {
case MapType(_, _, _) => ".toMap"
case SetType(_, _) => ".toSet"
case ListType(_, _) => ".toList"
case _ => ""
}
codify(code)
}
/**
* Generates a suffix to append to a field expression that will
* convert the value to an immutable equivalent.
*/
def genToImmutable(f: Field): CodeFragment = {
if (f.requiredness.isOptional) {
val code = genToImmutable(f.fieldType).toData match {
case "" => ""
case underlyingToImmutable => ".map(_" + underlyingToImmutable + ")"
}
codify(code)
} else {
genToImmutable(f.fieldType)
}
}
/**
* Generates a prefix and suffix to wrap around a field expression that will
* convert the value to a mutable equivalent.
*/
def toMutable(t: FieldType): (String, String) = {
t match {
case MapType(_, _, _) | SetType(_, _) => (genType(t, true).toData + "() ++= ", "")
case ListType(_, _) => ("", ".toBuffer")
case _ => ("", "")
}
}
/**
* Generates a prefix and suffix to wrap around a field expression that will
* convert the value to a mutable equivalent.
*/
def toMutable(f: Field): (String, String) = {
if (f.requiredness.isOptional) {
toMutable(f.fieldType) match {
case ("", "") => ("", "")
case (prefix, suffix) => ("", ".map(" + prefix + "_" + suffix + ")")
}
} else {
toMutable(f.fieldType)
}
}
def genType(t: FunctionType, mutable: Boolean = false): CodeFragment = {
val code = t match {
case Void => "Unit"
case OnewayVoid => "Unit"
case TBool => "Boolean"
case TByte => "Byte"
case TI16 => "Short"
case TI32 => "Int"
case TI64 => "Long"
case TDouble => "Double"
case TString => "String"
case TBinary => "ByteBuffer"
case MapType(k, v, _) =>
(if (mutable) "mutable." else "") + "Map[" + genType(k).toData + ", " + genType(v).toData + "]"
case SetType(x, _) =>
(if (mutable) "mutable." else "") + "Set[" + genType(x).toData + "]"
case ListType(x, _) =>
(if (mutable) "mutable.Buffer" else "Seq") + "[" + genType(x).toData + "]"
case n: NamedType => genID(qualifyNamedType(n).toTitleCase).toData
case r: ReferenceType =>
throw new ScroogeInternalException("ReferenceType should not appear in backend")
}
codify(code)
}
def genPrimitiveType(t: FunctionType, mutable: Boolean = false): CodeFragment = genType(t, mutable)
def genFieldType(f: Field, mutable: Boolean = false): CodeFragment = {
val baseType = genType(f.fieldType, mutable).toData
val code = if (f.requiredness.isOptional) {
"Option[" + baseType + "]"
} else {
baseType
}
codify(code)
}
def genFieldParams(fields: Seq[Field], asVal: Boolean = false): CodeFragment = {
val code = fields.map {
f =>
val valPrefix = if (asVal) "val " else ""
val nameAndType = genID(f.sid).toData + ": " + genFieldType(f).toData
val defaultValue = genDefaultFieldValue(f) map {
" = " + _.toData
}
valPrefix + nameAndType + defaultValue.getOrElse("")
}.mkString(", ")
codify(code)
}
def genBaseFinagleService: CodeFragment = codify("FinagleService[Array[Byte], Array[Byte]]")
def getParentFinagleService(p: ServiceParent): CodeFragment =
genID(Identifier(getServiceParentID(p).fullName + "$FinagleService"))
def getParentFinagleClient(p: ServiceParent): CodeFragment =
genID(Identifier(getServiceParentID(p).fullName + "$FinagleClient"))
override def finagleClientFile(
packageDir: File,
service: Service, options:
Set[ServiceOption]
): Option[File] =
options.find(_ == WithFinagle) map { _ =>
new File(packageDir, service.sid.toTitleCase.name + "$FinagleClient" + fileExtension)
}
override def finagleServiceFile(
packageDir: File,
service: Service, options:
Set[ServiceOption]
): Option[File] =
options.find(_ == WithFinagle) map { _ =>
new File(packageDir, service.sid.toTitleCase.name + "$FinagleService" + fileExtension)
}
}
| tellapart/scrooge | scrooge-generator/src/main/scala/com/twitter/scrooge/backend/ScalaGenerator.scala | Scala | apache-2.0 | 10,627 |
package mesosphere.marathon
package api.akkahttp
import java.net.URI
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Route
import com.typesafe.scalalogging.StrictLogging
/**
* The ResourceController delivers static content from the classpath.
*/
class ResourceController extends Controller with StrictLogging {
import Directives._
/**
* This route serves resources from the class path.
* It makes sure, only resources below base are send.
*/
private[this] def fromResource(base: String, resource: String): Route = {
logger.info(s"Serve static resource from base $base resource: $resource")
val effectiveResource = if (resource.isEmpty) "index.html" else resource
val effectivePath = s"$base/$effectiveResource"
// make sure, a request does not escape its base path via ..
val normalized = new URI(effectivePath).normalize().getPath
if (normalized.startsWith(base)) getFromResource(effectivePath)
else reject
}
private[this] def webJarResource(jar: String)(resource: String): Route = {
fromResource(s"META-INF/resources/webjars/$jar", resource)
}
private[this] def publicResource(resource: String): Route = {
fromResource("public", resource)
}
override val route: Route = get {
pathSingleSlash { redirect("ui/", StatusCodes.TemporaryRedirect) } ~
path("ui") { redirect("ui/", StatusCodes.TemporaryRedirect) } ~
path("help") { redirect("help/", StatusCodes.TemporaryRedirect) } ~
path("api-console") { redirect("api-console/", StatusCodes.TemporaryRedirect) } ~
path("public") { redirect("public/", StatusCodes.TemporaryRedirect) } ~
path("ui" / Remaining) { webJarResource("ui") } ~
path("help" / Remaining) { webJarResource("api-console") } ~
path("api-console" / Remaining) { webJarResource("api-console") } ~
path("public" / Remaining) { publicResource }
}
}
| janisz/marathon | src/main/scala/mesosphere/marathon/api/akkahttp/ResourceController.scala | Scala | apache-2.0 | 1,912 |
/* Copyright (c) 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.gdata.client
/**
* An authentication token used by GData services. It encapsulate the logic necessary
* to fill the 'Auth' HTTP header in GData requests. Such tokens are obtained using
* an AuthFactory.
*
* @see http://code.google.com/apis/gdata/auth.html for information on Google
* authentication.
*/
trait AuthToken {
/**
* Return the contents of the Auth: header field to be passed in GData requests that
* are using this authentication token.
*/
def getAuthHeader: String
}
| mjanson/gdata-scala-client | src/com/google/gdata/client/AuthToken.scala | Scala | apache-2.0 | 1,125 |
package blended.updater.config
/**
* Used as HTTP response for [[ContainerInfo]] updated, returns potentially update actions.
* @param id
* @param actions
*/
case class ContainerRegistryResponseOK(id: String, actions: List[UpdateAction] = List.empty)
| lefou/blended | blended.updater.config/shared/src/main/scala/blended/updater/config/ContainerRegistryResponseOK.scala | Scala | apache-2.0 | 256 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import org.apache.spark.SparkContext._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{
MultiContigNonoverlappingRegions,
ReferenceRegion
}
import scala.Predef._
import scala.reflect.ClassTag
/**
* Contains multiple implementations of a 'region join', an operation that joins two sets of
* regions based on the spatial overlap between the regions.
*
* Different implementations will have different performance characteristics -- and new implementations
* will likely be added in the future, see the notes to each individual method for more details.
*/
sealed trait BroadcastRegionJoin[T, U, RT] extends RegionJoin[T, U, RT, U] {
/**
* Performs a region join between two RDDs (broadcast join).
*
* This implementation first _collects_ the left-side RDD; therefore, if the left-side RDD is large
* or otherwise idiosyncratic in a spatial sense (i.e. contains a set of regions whose unions overlap
* a significant fraction of the genome) then the performance of this implementation will likely be
* quite bad.
*
* Once the left-side RDD is collected, its elements are reduced to their distinct unions;
* these can then be used to define the partitions over which the region-join will be computed.
*
* The regions in the left-side are keyed by their corresponding partition (each such region should have
* exactly one partition). The regions in the right-side are also keyed by their corresponding partitions
* (here there can be more than one partition for a region, since a region may cross the boundaries of
* the partitions defined by the left-side).
*
* Finally, within each separate partition, we essentially perform a cartesian-product-and-filter
* operation. The result is the region-join.
*
* @param baseRDD The 'left' side of the join
* @param joinedRDD The 'right' side of the join
* @param tManifest implicit type of baseRDD
* @param uManifest implicit type of joinedRDD
* @tparam T type of baseRDD
* @tparam U type of joinedRDD
* @return An RDD of pairs (x, y), where x is from baseRDD, y is from joinedRDD, and the region
* corresponding to x overlaps the region corresponding to y.
*/
def partitionAndJoin(
baseRDD: RDD[(ReferenceRegion, T)],
joinedRDD: RDD[(ReferenceRegion, U)])(implicit tManifest: ClassTag[T],
uManifest: ClassTag[U]): RDD[(RT, U)] = {
val sc = baseRDD.context
/**
* Original Join Design:
*
* Parameters:
* (1) f : (Range, Range) => T // an aggregation function
* (2) a : RDD[Range]
* (3) b : RDD[Range]
*
* Return type: RDD[(Range,T)]
*
* Algorithm:
* 1. a.collect() (where a is smaller than b)
* 2. build a non-overlapping partition on a
* 3. ak = a.map( v => (partition(v), v) )
* 4. bk = b.flatMap( v => partitions(v).map( i=>(i,v) ) )
* 5. joined = ak.join(bk).filter( (i, (r1, r2)) => r1.overlaps(r2) ).map( (i, (r1,r2))=>(r1, r2) )
* 6. return: joined.reduceByKey(f)
*
* Ways in which we've generalized this plan:
* - removed the aggregation step altogether
* - carry a sequence dictionary through the computation.
*/
// First, we group the regions in the left side of the join by their referenceName,
// and collect them.
val collectedLeft: Seq[(String, Iterable[ReferenceRegion])] =
baseRDD
.map(_._1) // RDD[ReferenceRegion]
.keyBy(_.referenceName) // RDD[(String,ReferenceRegion)]
.groupByKey() // RDD[(String,Seq[ReferenceRegion])]
.collect() // Iterable[(String,Seq[ReferenceRegion])]
.toSeq // Seq[(String,Seq[ReferenceRegion])]
// Next, we turn that into a data structure that reduces those regions to their non-overlapping
// pieces, which we will use as a partition.
val multiNonOverlapping = new MultiContigNonoverlappingRegions(collectedLeft)
// Then, we broadcast those partitions -- this will be the function that allows us to
// partition all the regions on the right side of the join.
val regions = sc.broadcast(multiNonOverlapping)
// each element of the left-side RDD should have exactly one partition.
val smallerKeyed: RDD[(ReferenceRegion, (ReferenceRegion, T))] =
baseRDD.map(t => (regions.value.regionsFor(t).head, t))
// each element of the right-side RDD may have 0, 1, or more than 1 corresponding partition.
val largerKeyed: RDD[(ReferenceRegion, (ReferenceRegion, U))] =
joinedRDD.flatMap(t => regionsFor(t, regions).map((r: ReferenceRegion) => (r, t)))
joinAndFilterFn(smallerKeyed, largerKeyed)
}
protected def regionsFor(u: (ReferenceRegion, U),
regions: Broadcast[MultiContigNonoverlappingRegions]): Iterable[ReferenceRegion]
protected def joinAndFilterFn(tRdd: RDD[(ReferenceRegion, (ReferenceRegion, T))],
uRdd: RDD[(ReferenceRegion, (ReferenceRegion, U))]): RDD[(RT, U)]
}
/**
* Extends the BroadcastRegionJoin trait to implement an inner join.
*/
case class InnerBroadcastRegionJoin[T, U]() extends BroadcastRegionJoin[T, U, T] {
protected def joinAndFilterFn(tRdd: RDD[(ReferenceRegion, (ReferenceRegion, T))],
uRdd: RDD[(ReferenceRegion, (ReferenceRegion, U))]): RDD[(T, U)] = {
// this is (essentially) performing a cartesian product within each partition...
val joined: RDD[(ReferenceRegion, ((ReferenceRegion, T), (ReferenceRegion, U)))] =
tRdd.join(uRdd)
// ... so we need to filter the final pairs to make sure they're overlapping.
joined.flatMap(kv => {
val (_, (t: (ReferenceRegion, T), u: (ReferenceRegion, U))) = kv
if (t._1.overlaps(u._1)) {
Some((t._2, u._2))
} else {
None
}
})
}
protected def regionsFor(u: (ReferenceRegion, U),
regions: Broadcast[MultiContigNonoverlappingRegions]): Iterable[ReferenceRegion] = {
regions.value.regionsFor(u)
}
}
/**
* Extends the BroadcastRegionJoin trait to implement a right outer join.
*/
case class RightOuterBroadcastRegionJoin[T, U]() extends BroadcastRegionJoin[T, U, Option[T]] {
protected def joinAndFilterFn(tRdd: RDD[(ReferenceRegion, (ReferenceRegion, T))],
uRdd: RDD[(ReferenceRegion, (ReferenceRegion, U))]): RDD[(Option[T], U)] = {
// this is (essentially) performing a cartesian product within each partition...
val joined: RDD[(ReferenceRegion, (Option[(ReferenceRegion, T)], (ReferenceRegion, U)))] =
tRdd.rightOuterJoin(uRdd)
// ... so we need to filter the final pairs to make sure they're overlapping.
joined.map(kv => {
val (_, (optT: Option[(ReferenceRegion, T)], u: (ReferenceRegion, U))) = kv
(optT.filter(t => t._1.overlaps(u._1)).map(_._2), u._2)
})
}
protected def regionsFor(u: (ReferenceRegion, U),
regions: Broadcast[MultiContigNonoverlappingRegions]): Iterable[ReferenceRegion] = {
val reg = regions.value.regionsFor(u)
if (reg.isEmpty) {
Iterable(u._1)
} else {
reg
}
}
}
| tdanford/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/BroadcastRegionJoin.scala | Scala | apache-2.0 | 8,080 |
import euler._
object Main extends App {
val answer = (1 until 1000).filter((x) => x % 3 == 0 || x % 5 == 0).foldLeft(0)(_+_)
println(answer)
}
| yaworsw/euler-manager | example/1/scala/1.scala | Scala | mit | 152 |
package com.sfxcode.sapphire.extension.filter
import javafx.scene.layout.Pane
import com.sfxcode.sapphire.core.control.{ FXTableCellFactory, FXTableValueFactory }
import com.sfxcode.sapphire.core.value.FXBean
import com.sfxcode.sapphire.extension.control.table.TableColumnFactory
import javafx.beans.property.ReadOnlyObjectProperty
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.{ universe => ru }
import javafx.beans.property.ObjectProperty
import javafx.collections.ObservableList
import javafx.scene.control.{ TableView, _ }
import javafx.scene.text.TextAlignment
import com.sfxcode.sapphire.core.CollectionExtensions._
class DataTableFilter[S <: AnyRef](
table: TableView[FXBean[S]],
items: ObjectProperty[ObservableList[FXBean[S]]],
pane: ObjectProperty[Pane])(implicit ct: ClassTag[S])
extends DataFilter[S](items, pane) {
// columns
val columnMapping = new mutable.HashMap[String, TableColumn[FXBean[S], _]]()
val columnPropertyMap = new mutable.HashMap[String, String]()
val columnHeaderMap = new mutable.HashMap[String, String]()
// reflection
private val mirror = ru.runtimeMirror(ct.runtimeClass.getClassLoader)
private val members = mirror.classSymbol(ct.runtimeClass).asType.typeSignature.members.toList.reverse
logger.debug(
members
.collect({ case x if x.isTerm => x.asTerm })
.filter(t => t.isVal || t.isVar)
.map(m => m.name.toString)
.toString())
filterResult.addChangeListener { _ =>
table.getItems.clear()
table.getItems.addAll(filterResult)
table.sort()
}
filter()
override def itemsHasChanged(): Unit = {
super.itemsHasChanged()
table.autosize()
table.layout()
}
def addColumn(key: String, column: TableColumn[FXBean[S], _]): Unit = {
table.getColumns.add(column)
columnMapping.put(key, column)
}
def addColumns[T](
editable: Boolean = false,
numberFormat: String = "#,##0",
decimalFormat: String = "#,##0.00"): Unit = {
val columnList = TableColumnFactory.columnListFromMembers[S, T](
members,
columnHeaderMap.toMap,
columnPropertyMap.toMap,
editable,
numberFormat,
decimalFormat)
columnList._1.foreach(key => addColumn(key, columnList._2(key)))
}
def addColumn[T](
header: String,
property: String,
alignment: TextAlignment = TextAlignment.LEFT): TableColumn[FXBean[S], T] = {
val valueFactory = new FXTableValueFactory[FXBean[S], T]()
valueFactory.setProperty(columnPropertyMap.getOrElse(property, property))
val cellFactory = new FXTableCellFactory[FXBean[S], T]()
cellFactory.setAlignment(alignment)
val result = TableColumnFactory.columnFromFactories[S, T](header, valueFactory, Some(cellFactory))
addColumn(header, result)
result
}
def getColumn[T](property: String): Option[TableColumn[FXBean[S], _]] =
columnMapping.get(property)
def getTable: TableView[FXBean[S]] = table
def getItems: ObservableList[FXBean[S]] = table.getItems
def hideColumn(name: String*): Unit = name.foreach(name => getColumn(name).foreach(c => c.setVisible(false)))
def showColumn(name: String*): Unit = name.foreach(name => getColumn(name).foreach(c => c.setVisible(true)))
def setColumnText(name: String, text: String): Unit = getColumn(name).foreach(c => c.setText(text))
def setColumnPrefWidth(name: String, value: Double): Unit = getColumn(name).foreach(c => c.setPrefWidth(value))
def selectedBean: FXBean[S] = table.getSelectionModel.selectedItemProperty.get
def selectedItem: ReadOnlyObjectProperty[FXBean[S]] = table.getSelectionModel.selectedItemProperty
def selectedItems: ObservableList[FXBean[S]] = table.getSelectionModel.getSelectedItems
}
| sfxcode/sapphire-extension | src/main/scala/com/sfxcode/sapphire/extension/filter/DataTableFilter.scala | Scala | apache-2.0 | 3,755 |
package com.thangiee.lolhangouts.ui.profile
import android.content.Context
import android.support.v7.widget.{LinearLayoutManager, RecyclerView}
import android.util.AttributeSet
import android.view.View
import android.widget.FrameLayout
import com.pnikosis.materialishprogress.ProgressWheel
import com.skocken.efficientadapter.lib.adapter.SimpleAdapter
import com.thangiee.lolhangouts.R
import com.thangiee.lolhangouts.data.usecases.entities.Match
import com.thangiee.lolhangouts.data.usecases.ViewProfileUseCaseImpl
import com.thangiee.lolhangouts.ui.core.CustomView
import com.thangiee.lolhangouts.ui.regionselection.RegionViewHolder
import com.thangiee.lolhangouts.ui.utils._
import jp.wasabeef.recyclerview.animators.adapters.AlphaInAnimationAdapter
import tr.xip.errorview.ErrorView
import scala.collection.JavaConversions._
class ProfileMatchHistView(implicit ctx: Context, a: AttributeSet) extends FrameLayout(ctx, a) with CustomView {
private lazy val loadingWheel = find[ProgressWheel](R.id.loading_wheel)
private lazy val errorView = find[ErrorView](R.id.error_view)
private lazy val matchRecyclerView = find[RecyclerView](R.id.rv_suggestions)
override protected val presenter = new ProfileMatchHistPresenter(this, ViewProfileUseCaseImpl())
override def onAttached(): Unit = {
super.onAttached()
addView(layoutInflater.inflate(R.layout.profile_match_hist, this, false))
val llm = new LinearLayoutManager(ctx)
llm.setSmoothScrollbarEnabled(true)
matchRecyclerView.setLayoutManager(llm)
matchRecyclerView.setHasFixedSize(true)
matchRecyclerView.setItemViewCacheSize(10)
}
def setProfile(name: String, regionId: String) = {
presenter.handleSetProfile(name, regionId)
errorView.setOnRetryListener(() => presenter.handleSetProfile(name, regionId))
}
def initializeViewData(matches: List[Match]): Unit = {
delay(500) { // wait for loading wheel to hide
val adapter = new SimpleAdapter[Match](R.layout.card_match_hist, classOf[MatchViewHolder], matches).asInstanceOf[RecyclerView.Adapter[RegionViewHolder]]
val alphaInAdapter = new AlphaInAnimationAdapter(adapter)
alphaInAdapter.setDuration(500)
matchRecyclerView.setAdapter(alphaInAdapter)
}
}
def showLoading(): Unit = {
loadingWheel.spin()
loadingWheel.setVisibility(View.VISIBLE)
errorView.setVisibility(View.GONE)
loadingWheel.fadeInDown(duration = 1)
matchRecyclerView.setVisibility(View.INVISIBLE)
}
def hideLoading(): Unit = {
loadingWheel.setProgress(1)
loadingWheel.fadeOutUp(duration = 750, delay = 1000)
matchRecyclerView.fadeIn(duration = 1, delay = 1750)
}
def showDataNotFound(): Unit = showError("No Result", R.string.no_match_hist.r2String)
def showGetDataError(): Unit = showError(
title = (if (hasWifiConnection) R.string.server_busy else R.string.no_wifi).r2String,
subTitle = R.string.err_get_data.r2String
)
private def showError(title: String, subTitle: String): Unit = {
loadingWheel.fadeOutUp(duration = 500, delay = 1000)
delay(1500) {
errorView.setTitle(title)
errorView.setSubtitle(subTitle)
errorView.setVisibility(View.VISIBLE)
}
}
}
| Thangiee/LoL-Hangouts | src/com/thangiee/lolhangouts/ui/profile/ProfileMatchHistView.scala | Scala | apache-2.0 | 3,219 |
package io.iteratee.benchmark
import org.scalatest.FlatSpec
class InMemoryBenchmarkSpec extends FlatSpec {
val benchmark: InMemoryBenchmark = new InMemoryBenchmark
val sum = 49995000
"The in-memory benchmark" should "correctly calculate the sum using io.iteratee.modules.id" in {
assert(benchmark.sumInts0II === sum)
}
it should "correctly calculate the sum using io.iteratee.monix" in {
assert(benchmark.sumInts1IM === sum)
}
it should "correctly calculate the sum using io.iteratee.scalaz" in {
assert(benchmark.sumInts2IT === sum)
}
it should "correctly calculate the sum using scalaz-stream" in {
assert(benchmark.sumInts3S === sum)
}
it should "correctly calculate the sum using scalaz-iteratee" in {
assert(benchmark.sumInts4Z === sum)
}
it should "correctly calculate the sum using fs2" in {
assert(benchmark.sumInts5F === sum)
}
it should "correctly calculate the sum using the collections library" in {
assert(benchmark.sumInts6C === sum)
}
}
| flyingwalrusllc/iteratee | benchmark/src/test/scala/io/iteratee/benchmark/InMemoryBenchmarkSpec.scala | Scala | apache-2.0 | 1,021 |
import scala.quoted._
object Macro {
inline def foo[X](x: X): Unit = ${fooImpl('x)}
def fooImpl[X: quoted.Type](x: Expr[X])(using QuoteContext): Expr[Unit] = '{}
}
| som-snytt/dotty | tests/run-macros/i4515/Macro_1.scala | Scala | apache-2.0 | 168 |
package ch.wsl.box.client.views.components.widget
import java.util.UUID
import ch.wsl.box.client.routes.Routes
import ch.wsl.box.client.services.{ClientConf, REST}
import ch.wsl.box.client.styles.{BootstrapCol, GlobalStyles}
import ch.wsl.box.client.views.components.Debug
import ch.wsl.box.model.shared._
import io.circe.Json
import io.udash._
import io.udash.bindings.Bindings
import io.udash.bootstrap.BootstrapStyles
import org.scalajs.dom
import org.scalajs.dom.File
import scalatags.JsDom
import scribe.Logging
import scala.concurrent.Future
import scala.util.Random
/**
*
* @param id
* @param prop holds the filename
* @param field
* @param entity
*/
case class FileWidget(id:ReadableProperty[Option[String]], data:Property[Json], field:JSONField, entity:String) extends Widget with HasData with Logging {
import ch.wsl.box.client.Context._
import scalatags.JsDom.all._
import scalacss.ScalatagsCss._
import io.udash.css.CssView._
import ch.wsl.box.shared.utils.JSONUtils._
import io.circe.syntax._
val instanceId = UUID.randomUUID().toString
def url(idString:String):Option[String] = {
JSONID.fromString(idString).map{ id =>
s"/file/${entity}.${field.file.get.file_field}/${idString}"
}
}
val urlProp:Property[Option[String]] = Property(id.get.flatMap(url))
val fileName:Property[String] = Property("")
val selectedFile: SeqProperty[File] = SeqProperty(Seq.empty[File])
autoRelease(id.listen({ idString =>
selectedFile.set(Seq())
fileName.set(data.get.string)
val newUrl = idString.flatMap(url)
if(urlProp.get != newUrl) {
urlProp.set(newUrl)
}
},true))
autoRelease(selectedFile.listen{ files =>
logger.info(s"selected file changed ${files.map(_.name)}")
data.set(files.headOption.map(_.name).asJson)
})
override def afterSave(result:Json, metadata: JSONMetadata) = {
logger.debug(s"FileWidget afterSave json: $result")
val jsonid = result.ID(metadata.keys)
for{
idfile <- Future.sequence{
val r: Seq[Future[Int]] = selectedFile.get.map(services.rest.sendFile(_,jsonid.get,s"${metadata.entity}.${field.file.get.file_field}")).toSeq
r
}
} yield {
logger.info("image saved")
//id.touch()
result
}
}
private def showImage = {
logger.debug("showImage")
autoRelease(produceWithNested(urlProp) { (url,nested) =>
val randomString = UUID.randomUUID().toString
url match {
case Some(u) => div(
//need to understand why is been uploaded two times
img(src := Routes.apiV1(s"${u}/thumb?$randomString"),ClientConf.style.imageThumb) ,br,
nested(produce(fileName) { name => a(href := Routes.apiV1(s"$u?name=$name"), name).render })
).render
case None => div().render
}
})
}
override protected def show(): JsDom.all.Modifier = div(BootstrapCol.md(12),ClientConf.style.noPadding,
label(field.title),
showImage,
div(BootstrapStyles.Visibility.clearfix),
).render
override def edit() = {
div(BootstrapCol.md(12),ClientConf.style.noPadding,
WidgetUtils.toLabel(field),
showImage,
autoRelease(produce(id) { _ => div(FileInput(selectedFile, Property(false))("file")).render }),
div(BootstrapStyles.Visibility.clearfix)
).render
}
}
object FileWidgetFactory extends ComponentWidgetFactory {
override def name: String = WidgetsNames.fileWithPreview
override def create(params: WidgetParams): Widget = FileWidget(params.id,params.prop,params.field,params.metadata.entity)
}
| Insubric/box | client/src/main/scala/ch/wsl/box/client/views/components/widget/FileWidget.scala | Scala | apache-2.0 | 3,605 |
package upickle.example
import acyclic.file
import utest._
import upickle.example.Simple.Thing
case class Opt(a: Option[String], b: Option[Int])
object Opt{
implicit def rw: OptionPickler.ReadWriter[Opt] = OptionPickler.macroRW
}
object OptionPickler extends upickle.AttributeTagged {
override implicit def OptionWriter[T: Writer]: Writer[Option[T]] =
implicitly[Writer[T]].comap[Option[T]] {
case None => null.asInstanceOf[T]
case Some(x) => x
}
override implicit def OptionReader[T: Reader]: Reader[Option[T]] = {
new Reader.Delegate[Any, Option[T]](implicitly[Reader[T]].map(Some(_))){
override def visitNull(index: Int) = None
}
}
}
// end_ex
object OptionsAsNullTests extends TestSuite {
import OptionPickler._
implicit def rw: OptionPickler.ReadWriter[Thing] = OptionPickler.macroRW
val tests = TestSuite {
test("nullAsNone"){
// Quick check to ensure we didn't break anything
test("primitive"){
write("A String") ==> "\\"A String\\""
read[String]("\\"A String\\"") ==> "A String"
write(1) ==> "1"
read[Int]("1") ==> 1
write(Thing(1, "gg")) ==> """{"myFieldA":1,"myFieldB":"gg"}"""
read[Thing]("""{"myFieldA":1,"myFieldB":"gg"}""") ==> Thing(1, "gg")
}
test("none"){
write[None.type](None) ==> "null"
read[None.type]("null") ==> None
}
test("some"){
write(Some("abc")) ==> "\\"abc\\""
read[Some[String]]("\\"abc\\"") ==> Some("abc")
write(Some(1)) ==> "1"
read[Some[Int]]("1") ==> Some(1)
write(Some(3.14159)) ==> "3.14159"
read[Some[Double]]("3.14159") ==> Some(3.14159)
}
test("option"){
write(Option("abc")) ==> "\\"abc\\""
read[Option[String]]("\\"abc\\"") ==> Some("abc")
read[Option[String]]("null") ==> None
}
test("caseClass"){
write(Opt(None, None)) ==> """{"a":null,"b":null}"""
read[Opt]("""{"a":null,"b":null}""") ==> Opt(None, None)
write(Opt(Some("abc"), Some(1))) ==> """{"a":"abc","b":1}"""
}
test("optionCaseClass"){
implicit val thingReader = implicitly[Reader[Thing]]
implicit val thingWriter = implicitly[Writer[Thing]]
write(Opt(None, None)) ==> """{"a":null,"b":null}"""
read[Opt]("""{"a":null,"b":null}""") ==> Opt(None, None)
write(Opt(Some("abc"), Some(1))) ==> """{"a":"abc","b":1}"""
write(Option(Thing(1, "gg"))) ==> """{"myFieldA":1,"myFieldB":"gg"}"""
read[Option[Thing]]("""{"myFieldA":1,"myFieldB":"gg"}""") ==> Option(Thing(1, "gg"))
}
// New tests. Work as expected.
'customPickler {
// Custom pickler copied from the documentation
class CustomThing2(val i: Int, val s: String)
object CustomThing2 {
implicit val rw = /*upickle.default*/ OptionPickler.readwriter[String].bimap[CustomThing2](
x => x.i + " " + x.s,
str => {
val Array(i, s) = str.split(" ", 2)
new CustomThing2(i.toInt, s)
}
)
}
'customClass {
write(new CustomThing2(10, "Custom")) ==> "\\"10 Custom\\""
val r = read[CustomThing2]("\\"10 Custom\\"")
assert(r.i == 10, r.s == "Custom")
}
'optCustomClass_Some {
write(Some(new CustomThing2(10, "Custom"))) ==> "\\"10 Custom\\""
val r = read[Option[CustomThing2]]("\\"10 Custom\\"")
assert(r.get.i == 10, r.get.s == "Custom")
}
'optCustomClass_None {
read[Option[CustomThing2]]("null") ==> None
}
}
// Copied from ExampleTests
'Js {
import OptionPickler._ // changed from upickle.default._
case class Bar(i: Int, s: String)
implicit val fooReadWrite: ReadWriter[Bar] =
readwriter[ujson.Value].bimap[Bar](
x => ujson.Arr(x.s, x.i),
json => new Bar(json(1).num.toInt, json(0).str)
)
write(Bar(123, "abc")) ==> """["abc",123]"""
read[Bar]("""["abc",123]""") ==> Bar(123, "abc")
// New tests. Last one fails. Why?
'option {
'write {write(Some(Bar(123, "abc"))) ==> """["abc",123]"""}
'readSome {read[Option[Bar]]("""["abc",123]""") ==> Some(Bar(123, "abc"))}
'readNull {read[Option[Bar]]("""null""") ==> None}
}
}
}
}
} | lihaoyi/upickle-pprint | upickle/test/src/upickle/example/OptionsAsNullTests.scala | Scala | mit | 4,435 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.funsuite.ignore
import org.scalatest.FunSuite
class SetSuite extends FunSuite {
ignore("An empty Set should have size 0") {
assert(Set.empty.size === 0)
}
test("Invoking head on an empty Set should produce NoSuchElementException") {
intercept[NoSuchElementException] {
Set.empty.head
}
}
}
| travisbrown/scalatest | examples/src/main/scala/org/scalatest/examples/funsuite/ignore/SetSuite.scala | Scala | apache-2.0 | 952 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.TimeZone
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.StringType
class DataFrameTimeWindowingSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
import testImplicits._
test("tumbling window groupBy statement") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select("counts"),
Seq(Row(1), Row(1), Row(1))
)
}
test("tumbling window groupBy statement with startTime") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds", "10 seconds", "5 seconds"), $"id")
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select("counts"),
Seq(Row(1), Row(1), Row(1)))
}
test("tumbling window with multi-column projection") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.select(window($"time", "10 seconds"), $"value")
.orderBy($"window.start".asc)
.select($"window.start".cast("string"), $"window.end".cast("string"), $"value"),
Seq(
Row("2016-03-27 19:39:20", "2016-03-27 19:39:30", 4),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:50", "2016-03-27 19:40:00", 2)
)
)
}
test("sliding window grouping") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds", "3 seconds", "0 second"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select($"window.start".cast("string"), $"window.end".cast("string"), $"counts"),
// 2016-03-27 19:39:27 UTC -> 4 bins
// 2016-03-27 19:39:34 UTC -> 3 bins
// 2016-03-27 19:39:56 UTC -> 3 bins
Seq(
Row("2016-03-27 19:39:18", "2016-03-27 19:39:28", 1),
Row("2016-03-27 19:39:21", "2016-03-27 19:39:31", 1),
Row("2016-03-27 19:39:24", "2016-03-27 19:39:34", 1),
Row("2016-03-27 19:39:27", "2016-03-27 19:39:37", 2),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:33", "2016-03-27 19:39:43", 1),
Row("2016-03-27 19:39:48", "2016-03-27 19:39:58", 1),
Row("2016-03-27 19:39:51", "2016-03-27 19:40:01", 1),
Row("2016-03-27 19:39:54", "2016-03-27 19:40:04", 1))
)
}
test("sliding window projection") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.select(window($"time", "10 seconds", "3 seconds", "0 second"), $"value")
.orderBy($"window.start".asc, $"value".desc).select("value"),
// 2016-03-27 19:39:27 UTC -> 4 bins
// 2016-03-27 19:39:34 UTC -> 3 bins
// 2016-03-27 19:39:56 UTC -> 3 bins
Seq(Row(4), Row(4), Row(4), Row(4), Row(1), Row(1), Row(1), Row(2), Row(2), Row(2))
)
}
test("windowing combined with explode expression") {
val df = Seq(
("2016-03-27 19:39:34", 1, Seq("a", "b")),
("2016-03-27 19:39:56", 2, Seq("a", "c", "d"))).toDF("time", "value", "ids")
checkAnswer(
df.select(window($"time", "10 seconds"), $"value", explode($"ids"))
.orderBy($"window.start".asc).select("value"),
// first window exploded to two rows for "a", and "b", second window exploded to 3 rows
Seq(Row(1), Row(1), Row(2), Row(2), Row(2))
)
}
test("null timestamps") {
val df = Seq(
("2016-03-27 09:00:05", 1),
("2016-03-27 09:00:32", 2),
(null, 3),
(null, 4)).toDF("time", "value")
checkDataset(
df.select(window($"time", "10 seconds"), $"value")
.orderBy($"window.start".asc)
.select("value")
.as[Int],
1, 2) // null columns are dropped
}
test("time window joins") {
val df = Seq(
("2016-03-27 09:00:05", 1),
("2016-03-27 09:00:32", 2),
(null, 3),
(null, 4)).toDF("time", "value")
val df2 = Seq(
("2016-03-27 09:00:02", 3),
("2016-03-27 09:00:35", 6)).toDF("time", "othervalue")
checkAnswer(
df.select(window($"time", "10 seconds"), $"value").join(
df2.select(window($"time", "10 seconds"), $"othervalue"), Seq("window"))
.groupBy("window")
.agg((sum("value") + sum("othervalue")).as("total"))
.orderBy($"window.start".asc).select("total"),
Seq(Row(4), Row(8)))
}
test("negative timestamps") {
val df4 = Seq(
("1970-01-01 00:00:02", 1),
("1970-01-01 00:00:12", 2)).toDF("time", "value")
checkAnswer(
df4.select(window($"time", "10 seconds", "10 seconds", "5 seconds"), $"value")
.orderBy($"window.start".asc)
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("1969-12-31 23:59:55", "1970-01-01 00:00:05", 1),
Row("1970-01-01 00:00:05", "1970-01-01 00:00:15", 2))
)
}
test("multiple time windows in a single operator throws nice exception") {
val df = Seq(
("2016-03-27 09:00:02", 3),
("2016-03-27 09:00:35", 6)).toDF("time", "value")
val e = intercept[AnalysisException] {
df.select(window($"time", "10 second"), window($"time", "15 second")).collect()
}
assert(e.getMessage.contains(
"Multiple time window expressions would result in a cartesian product"))
}
test("aliased windows") {
val df = Seq(
("2016-03-27 19:39:34", 1, Seq("a", "b")),
("2016-03-27 19:39:56", 2, Seq("a", "c", "d"))).toDF("time", "value", "ids")
checkAnswer(
df.select(window($"time", "10 seconds").as("time_window"), $"value")
.orderBy($"time_window.start".asc)
.select("value"),
Seq(Row(1), Row(2))
)
}
test("millisecond precision sliding windows") {
val df = Seq(
("2016-03-27 09:00:00.41", 3),
("2016-03-27 09:00:00.62", 6),
("2016-03-27 09:00:00.715", 8)).toDF("time", "value")
checkAnswer(
df.groupBy(window($"time", "200 milliseconds", "40 milliseconds", "0 milliseconds"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"counts"),
Seq(
Row("2016-03-27 09:00:00.24", "2016-03-27 09:00:00.44", 1),
Row("2016-03-27 09:00:00.28", "2016-03-27 09:00:00.48", 1),
Row("2016-03-27 09:00:00.32", "2016-03-27 09:00:00.52", 1),
Row("2016-03-27 09:00:00.36", "2016-03-27 09:00:00.56", 1),
Row("2016-03-27 09:00:00.4", "2016-03-27 09:00:00.6", 1),
Row("2016-03-27 09:00:00.44", "2016-03-27 09:00:00.64", 1),
Row("2016-03-27 09:00:00.48", "2016-03-27 09:00:00.68", 1),
Row("2016-03-27 09:00:00.52", "2016-03-27 09:00:00.72", 2),
Row("2016-03-27 09:00:00.56", "2016-03-27 09:00:00.76", 2),
Row("2016-03-27 09:00:00.6", "2016-03-27 09:00:00.8", 2),
Row("2016-03-27 09:00:00.64", "2016-03-27 09:00:00.84", 1),
Row("2016-03-27 09:00:00.68", "2016-03-27 09:00:00.88", 1))
)
}
private def withTempTable(f: String => Unit): Unit = {
val tableName = "temp"
Seq(
("2016-03-27 19:39:34", 1),
("2016-03-27 19:39:56", 2),
("2016-03-27 19:39:27", 4)).toDF("time", "value").createOrReplaceTempView(tableName)
try {
f(tableName)
} finally {
spark.catalog.dropTempView(tableName)
}
}
test("time window in SQL with single string expression") {
withTempTable { table =>
checkAnswer(
spark.sql(s"""select window(time, "10 seconds"), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:20", "2016-03-27 19:39:30", 4),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:50", "2016-03-27 19:40:00", 2)
)
)
}
}
test("time window in SQL with with two expressions") {
withTempTable { table =>
checkAnswer(
spark.sql(
s"""select window(time, "10 seconds", 10000000), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:20", "2016-03-27 19:39:30", 4),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:50", "2016-03-27 19:40:00", 2)
)
)
}
}
test("time window in SQL with with three expressions") {
withTempTable { table =>
checkAnswer(
spark.sql(
s"""select window(time, "10 seconds", 10000000, "5 seconds"), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 1),
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 4),
Row("2016-03-27 19:39:55", "2016-03-27 19:40:05", 2)
)
)
}
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala | Scala | apache-2.0 | 10,557 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import minitest.TestSuite
import monix.execution.schedulers.TestScheduler
import monix.reactive.{Observable, Observer}
import concurrent.duration._
object NeverObservableSuite extends TestSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(s: TestScheduler): Unit = {
assert(s.state.tasks.isEmpty, "Scheduler should be left with no pending tasks")
}
test("should never complete") { implicit s =>
Observable.never.unsafeSubscribeFn(new Observer[Any] {
def onNext(elem: Any) = throw new IllegalStateException()
def onComplete(): Unit = throw new IllegalStateException()
def onError(ex: Throwable) = throw new IllegalStateException()
})
s.tick(100.days)
assert(s.state.lastReportedError == null)
}
}
| alexandru/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/builders/NeverObservableSuite.scala | Scala | apache-2.0 | 1,479 |
package database
import com.datastax.driver.core.{Session, Cluster, querybuilder}
import com.datastax.driver.core.querybuilder._
import com.datastax.driver.core.utils._
import scala.collection.JavaConversions._
import java.util
import constants.Db._
import com.datastax.driver.mapping.MappingManager
import com.datastax.driver.mapping
object Cassandra {
var cluster : Cluster = _
var session: Session = _
var manager: MappingManager = _
def connect(node: String) = {
cluster = Cluster.builder()
.addContactPoint(node).build()
session = cluster.connect(CASSANDRA_KEYSPACE)
manager = new mapping.MappingManager(session)
}
def close = {
cluster.close()
}
} | lequangdzung/quora-clone | api-app/app/database/Cassandra.scala | Scala | gpl-2.0 | 697 |
package org.jetbrains.plugins.scala
package lang
package parameterInfo
import java.awt.Color
import com.intellij.codeInsight.CodeInsightBundle
import com.intellij.codeInsight.lookup.LookupElement
import com.intellij.lang.parameterInfo._
import com.intellij.psi._
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.ArrayUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScConstructorPattern, ScPattern, ScPatternArgumentList}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 22.02.2009
*/
class ScalaPatternParameterInfoHandler extends ParameterInfoHandlerWithTabActionSupport[ScPatternArgumentList, Any, ScPattern] {
def getArgListStopSearchClasses: java.util.Set[_ <: Class[_]] = {
java.util.Collections.singleton(classOf[PsiMethod]) //todo: ?
}
def getParameterCloseChars: String = "{},);\n"
def couldShowInLookup: Boolean = true
def getActualParameterDelimiterType: IElementType = ScalaTokenTypes.tCOMMA
def getActualParameters(patternArgumentList: ScPatternArgumentList): Array[ScPattern] = patternArgumentList.patterns.toArray
def getArgumentListClass: Class[ScPatternArgumentList] = classOf[ScPatternArgumentList]
def getActualParametersRBraceType: IElementType = ScalaTokenTypes.tRBRACE
def getArgumentListAllowedParentClasses: java.util.Set[Class[_]] = {
val set = new java.util.HashSet[Class[_]]()
set.add(classOf[ScConstructorPattern])
set
}
def findElementForParameterInfo(context: CreateParameterInfoContext): ScPatternArgumentList = {
findCall(context)
}
def findElementForUpdatingParameterInfo(context: UpdateParameterInfoContext): ScPatternArgumentList = {
findCall(context)
}
def getParametersForDocumentation(p: Any, context: ParameterInfoContext): Array[Object] = ArrayUtil.EMPTY_OBJECT_ARRAY
def getParametersForLookup(item: LookupElement, context: ParameterInfoContext): Array[Object] = null
def updateUI(p: Any, context: ParameterInfoUIContext): Unit = {
if (context == null || context.getParameterOwner == null || !context.getParameterOwner.isValid) return
context.getParameterOwner match {
case args: ScPatternArgumentList => {
val color: Color = context.getDefaultParameterColor
val index = context.getCurrentParameterIndex
val buffer: StringBuilder = new StringBuilder("")
p match {
//todo: join this match statement with same in FunctionParameterHandler to fix code duplicate.
case (sign: PhysicalSignature, i: Int) => {
//i can be -1 (it's update method)
val methodName = sign.method.name
val subst = sign.substitutor
val returnType = sign.method match {
case function: ScFunction => subst.subst(function.returnType.getOrAny)
case method: PsiMethod => subst.subst(ScType.create(method.getReturnType, method.getProject))
}
val oneArgCaseClassMethod: Boolean = sign.method match {
case function: ScFunction => ScPattern.isOneArgCaseClassMethod(function)
case _ => false
}
val params = ScPattern.extractorParameters(returnType, args, oneArgCaseClassMethod).zipWithIndex
if (params.length == 0) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
buffer.append(params.map {
case (param, o) =>
val buffer: StringBuilder = new StringBuilder("")
buffer.append(ScType.presentableText(param))
val isSeq = methodName == "unapplySeq" && (ScType.extractClass(param) match {
case Some(clazz) => clazz.qualifiedName == "scala.Seq"
case _ => false
})
if (isSeq) {
buffer.delete(0, buffer.indexOf("[") + 1)
buffer.deleteCharAt(buffer.length - 1)
buffer.append("*")
}
val isBold = if (o == index || (isSeq && o <= index)) true
else {
//todo: check type
false
}
val paramTypeText = buffer.toString()
val paramText = paramTextFor(sign, o, paramTypeText)
if (isBold) "<b>" + paramText + "</b>" else paramText
}.mkString(", "))
}
}
case _ =>
}
val isGrey = buffer.indexOf("<g>")
if (isGrey != -1) buffer.replace(isGrey, isGrey + 3, "")
val startOffset = buffer.indexOf("<b>")
if (startOffset != -1) buffer.replace(startOffset, startOffset + 3, "")
val endOffset = buffer.indexOf("</b>")
if (endOffset != -1) buffer.replace(endOffset, endOffset + 4, "")
if (buffer.toString != "")
context.setupUIComponentPresentation(buffer.toString(), startOffset, endOffset, false, false, false, color)
else
context.setUIComponentEnabled(false)
}
case _ =>
}
}
/**
* @return 'paramName: ParamType' if `sign` is a synthetic unapply method; otherwise 'ParamType'
*/
private def paramTextFor(sign: PhysicalSignature, o: Int, paramTypeText: String): String = {
if (sign.method.name == "unapply") {
sign.method match {
case fun: ScFunction if fun.parameters.headOption.exists(_.name == "x$0") =>
val companionClass: Option[ScClass] = Option(fun.containingClass) match {
case Some(x: ScObject) => ScalaPsiUtil.getCompanionModule(x) match {
case Some(x: ScClass) => Some(x)
case _ => None
}
case _ => None
}
companionClass match {
case Some(cls) => ScalaPsiUtil.nthConstructorParam(cls, o) match {
case Some(param) =>
if (param.isRepeatedParameter) {
paramTypeText // Not handled yet.
} else {
param.name + ": " + paramTypeText // SCL-3006
}
case None => paramTypeText
}
case None => paramTypeText
}
case fun: ScFunction =>
// Look for a corresponding apply method beside the unapply method.
// TODO also check types correspond, allowing for overloading
val applyParam: Option[PsiParameter] = ScalaPsiUtil.getApplyMethods(fun.containingClass) match {
case Seq(sig) => sig.method.getParameterList.getParameters.lift(o)
case _ => None
}
applyParam match {
case Some(param) => param.getName + ": " + paramTypeText
case None => paramTypeText
}
case _ =>
paramTypeText
}
} else paramTypeText
}
def showParameterInfo(element: ScPatternArgumentList, context: CreateParameterInfoContext): Unit = {
context.showHint(element, element.getTextRange.getStartOffset, this)
}
def updateParameterInfo(o: ScPatternArgumentList, context: UpdateParameterInfoContext): Unit = {
if (context.getParameterOwner != o) context.removeHint()
val offset = context.getOffset
var child = o.getNode.getFirstChildNode
var i = 0
while (child != null && child.getStartOffset < offset) {
if (child.getElementType == ScalaTokenTypes.tCOMMA) i = i + 1
child = child.getTreeNext
}
context.setCurrentParameter(i)
}
def tracksParameterIndex: Boolean = true
private def findCall(context: ParameterInfoContext): ScPatternArgumentList = {
val (file, offset) = (context.getFile, context.getOffset)
val element = file.findElementAt(offset)
if (element == null) return null
val args: ScPatternArgumentList = PsiTreeUtil.getParentOfType(element, getArgumentListClass)
if (args != null) {
context match {
case context: CreateParameterInfoContext => {
args.getParent match {
case constr: ScConstructorPattern => {
val ref: ScStableCodeReferenceElement = constr.ref
val res: ArrayBuffer[Object] = new ArrayBuffer[Object]
if (ref != null) {
val name = ref.refName
val variants: Array[ResolveResult] = ref.multiResolve(false)
for (variant <- variants if variant.isInstanceOf[ScalaResolveResult]) {
val r = variant.asInstanceOf[ScalaResolveResult]
r.element match {
case fun: ScFunction if fun.parameters.nonEmpty =>
val substitutor = r.substitutor
val subst = if (fun.typeParameters.length == 0) substitutor
else {
val undefSubst = fun.typeParameters.foldLeft(ScSubstitutor.empty)((s, p) =>
s.bindT((p.name, ScalaPsiUtil.getPsiElementId(p)), ScUndefinedType(new ScTypeParameterType(p,
substitutor))))
val emptySubst: ScSubstitutor = fun.typeParameters.foldLeft(ScSubstitutor.empty)((s, p) =>
s.bindT((p.name, ScalaPsiUtil.getPsiElementId(p)), p.upperBound.getOrAny))
val result = fun.parameters(0).getType(TypingContext.empty)
if (result.isEmpty) substitutor
else {
val funType = undefSubst.subst(result.get)
constr.expectedType match {
case Some(tp) =>
val t = Conformance.conforms(tp, funType)
if (t) {
val undefSubst = Conformance.undefinedSubst(tp, funType)
undefSubst.getSubstitutor match {
case Some(newSubst) => newSubst.followed(substitutor)
case _ => substitutor
}
} else substitutor
case _ => substitutor
}
}
}
res += ((new PhysicalSignature(fun, subst), 0))
case _ =>
}
}
}
context.setItemsToShow(res.toArray)
}
case _ =>
}
}
case context: UpdateParameterInfoContext => {
var el = element
while (el.getParent != args) el = el.getParent
var index = 1
for (pattern <- args.patterns if pattern != el) index += 1
context.setCurrentParameter(index)
context.setHighlightedParameter(el)
}
case _ =>
}
}
args
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/parameterInfo/ScalaPatternParameterInfoHandler.scala | Scala | apache-2.0 | 11,487 |
package org.pfcoperez.dailyalgorithm.datastructures.graphs.directed
import org.scalatest.{FlatSpec, Inside, Matchers}
class DoubleLinkedListSpec extends FlatSpec with Matchers with Inside {
import DoubleLinkedList.{Empty => DlEmpty, Node => DlNode, lastNode}
import DoubleLinkedList.syntax._
def toListLeftToRight[T](dl: DoubleLinkedList[T], acc: List[T] = Nil): List[T] = dl match {
case DlEmpty => acc
case DlNode(_, v, right) => toListLeftToRight(right, acc :+ v)
}
def toListRightToLeft[T](dl: DoubleLinkedList[T]): List[T] = {
def toListFromLast(dl: DoubleLinkedList[T], acc: List[T]): List[T] = dl match {
case DlEmpty => acc
case DlNode(left, v, _) => toListFromLast(left, v :: acc)
}
toListFromLast(lastNode(dl), Nil)
}
val elements = 1 to 10
"An double linked list" should "allow to be generated by appending elements" in {
val lAppending = ((DlEmpty : DoubleLinkedList[Int]) /: elements) { (acc, element) =>
acc :+ element
}
toListLeftToRight(lAppending) shouldBe elements.toList
}
it should "allow to be generated by prepending elements" in {
val lPrepending = (elements :\\ (DlEmpty : DoubleLinkedList[Int])) { (element, acc) =>
element +: acc
}
toListLeftToRight(lPrepending) shouldBe elements.toList
}
it should "allow to move over it in both directions" in {
val l = (elements :\\ (DlEmpty : DoubleLinkedList[Int])) { (element, acc) =>
element +: acc
}
l.right.right.right.left.headOption shouldBe Some(3)
lastNode(l).left.right.right.headOption shouldBe None
lastNode(l).left.right.headOption shouldBe Some(10)
}
}
| pfcoperez/algorithmaday | src/test/scala/org/pfcoperez/dailyalgorithm/datastructures/graphs/directed/DoubleLinkedListSpec.scala | Scala | gpl-3.0 | 1,669 |
package io.udash.bindings.inputs
import io.udash._
import io.udash.testing.AsyncUdashFrontendTest
class RangeInputTest extends AsyncUdashFrontendTest {
"Input" should {
"synchronise state with property changes" in {
val p = Property[Double](7)
val input = RangeInput(p, 0d.toProperty, 100d.toProperty, 0.1.toProperty)()
val inputEl = input.render
inputEl.valueAsNumber should be(7.0)
p.set(15.5)
p.set(17.5)
p.set(85.2)
p.set(53.7)
inputEl.valueAsNumber should be(53.7)
p.set(0)
inputEl.valueAsNumber should be(0d)
p.set(100)
inputEl.valueAsNumber should be(100d)
p.set(120)
inputEl.valueAsNumber should be(100d)
p.set(-7.5)
inputEl.valueAsNumber should be(0)
p.listenersCount() should be(1)
input.kill()
p.listenersCount() should be(0)
}
"synchronise property with state changes" in {
val p = Property[Double](7)
val input = RangeInput(p, 0d.toProperty, 100d.toProperty, 0.1.toProperty)()
val inputEl = input.render
inputEl.valueAsNumber = 78.5
inputEl.onchange(null)
p.get should be(78.5)
inputEl.valueAsNumber = 18.5
inputEl.onchange(null)
p.get should be(18.5)
inputEl.valueAsNumber = 18
inputEl.onchange(null)
p.get should be(18)
p.listenersCount() should be(1)
input.kill()
p.listenersCount() should be(0)
}
"synchronise value on bound and step changes" in {
val p = Property[Double](8)
val min = Property(0d)
val max = Property(100d)
val step = Property(2d)
val input = RangeInput(p, min, max, step)()
p.get should be(8)
min.set(20)
p.get should be(20)
min.set(0)
max.set(10)
p.get should be(10)
max.set(100)
p.set(7)
step.set(20)
p.get should be(0)
p.listenersCount() should be(1)
input.kill()
p.listenersCount() should be(0)
}
}
}
| UdashFramework/udash-core | core/.js/src/test/scala/io/udash/bindings/inputs/RangeInputTest.scala | Scala | apache-2.0 | 2,006 |
package com.joshcough.minecraft.ermine
import com.clarifi.reporting.ermine._
import com.joshcough.minecraft.ScalaPlugin
import org.bukkit.command.{CommandSender, Command => BukkitCommand}
import org.bukkit.event.Listener
import org.bukkit.entity.Player
import com.clarifi.reporting.ermine.session.SessionEnv
import scalaz.{-\\/, \\/, \\/-, Show}
import scalaz.std.string._
import scalaz.syntax.std._
import com.clarifi.reporting.ermine.session.Session.{SourceFile, Resource}
import ReportsCache.ModuleExpr
class ErmineCraftPlugin extends ScalaPlugin {
lazy val moduleName = this.name
lazy val ermineModule = runErmine(moduleName, "plugin")()
override def onEnable {
super.onEnable()
// Print all the command names.
runMC("commandNames")(ermineModule).extract[List[String]].foreach(name => logInfo("command: " + name))
// Register all the listeners.
runMC("listeners")(ermineModule).extract[List[Listener]].foreach(registerListener)
}
override def onCommand(sender: CommandSender, cmd: BukkitCommand, commandName: String, args: Array[String]) = {
println(s"$name handling $commandName [${args.mkString(",")}]")
runIO(runMC("onCommand")(ermineModule, sender.asInstanceOf[Player], cmd, commandName, args.toList))
true
}
override def yml(author: String, version: String): String =
runMC("yml")(ermineModule, name, getClass.getName, author, version).extract[String]
// code for running ermine
lazy val cache = new LoaderReportsCache[String](classloader()) {
import ReportsCache.ModuleExpr
type Report = ModuleExpr[String]
lazy val loadPaths : List[String] = Nil
override protected def initialEnv = {
val e = new SessionEnv
e.loadFile = classloader()
e
}
override def preloads: List[String] = List("Minecraft.Minecraft")
def showReport = Show[Report]
protected def toME(r: Report) = r
}
/** Load from Java classloader, rooted at `root`. */
def classloader()(module: String): Option[SourceFile] = {
val path = ("modules" :: module.split('.').toList).mkString("/") + ".e"
Option(classOf[Resource].getClassLoader.getResource(path)).orElse(
Option(classOf[ErmineCraft].getClassLoader.getResource(path))).orElse(
Option(this.getClass.getClassLoader.getResource(path))).map(Resource(module, _))
}
def runErmine(module: String, expr: String)(args: AnyRef*): Runtime =
cache.getReport(ModuleExpr(module, expr)).toEither match {
case Right(r) => r(args.map(Prim(_)):_*)
case Left(e) => throw e
}
def runIO(r: Runtime) = runGlobal("IO.Unsafe","unsafePerformIO")().extract[Any]
def runMC(function: String)(args: AnyRef*): Runtime = runGlobal("Minecraft.Minecraft",function)(args:_*)
def runGlobal(m: String, f: String)(args: AnyRef*): Runtime =
cache.baseEnv.termNames.get(Global(m, f)).
flatMap(cache.baseEnv.env.get).map(_(args.map(Prim(_)):_*)).
getOrElse(sys.error(s"global not found: $m.$f"))
}
| joshcough/ErMinecraft | erminecraft/src/main/scala/com/joshcough/minecraft/ermine/ErmineCraftPlugin.scala | Scala | mit | 2,963 |
/*
* Copyright 2016 Coral realtime streaming analytics (http://coral-streaming.github.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.coral.cluster
import java.util.UUID
import akka.actor._
import akka.cluster.Member
import akka.util.Timeout
import io.coral.actors.RuntimeAdminActor
import io.coral.api.security.AuthInfo
import io.coral.api.{Runtime, CoralConfig}
import io.coral.api.security.Authenticator._
import io.coral.cluster.ClusterDistributor.{ResetRoundRobin, RestartRuntimes, InvalidateAllAuthenticators, CreateRuntimeLocally}
import io.coral.cluster.ClusterMonitor.{GetPlatformStatistics, GetAddresses}
import io.coral.utils.Utils
import org.json4s._
import org.uncommons.maths.random.MersenneTwisterRNG
import scaldi.Injector
import scala.concurrent.{Await, ExecutionContext, Future}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import scala.util.{Failure, Success}
import akka.pattern.ask
import scala.concurrent.duration._
import akka.pattern.pipe
object ClusterDistributor {
// Asks the cluster distributor to create a
// new runtime according to its strategy.
case class CreateRuntime(uniqueName: String,
jsonDef: JObject, owner: UUID)
// Asks the cluster distributor to create a new runtime
// as a child of its own, right where it is running.
case class CreateRuntimeLocally(uniqueName: String,
jsonDef: JObject, owner: UUID)
case class InvalidateAllAuthenticators()
// Received when a cluster is unreachable,
// according to the cluster monitor
case class RestartRuntimes(member: Member)
// Start one the first node again for round robin.
// For testing purposes.
case class ResetRoundRobin()
}
/**
* Distributes a runtime across the cluster with a certain criterium.
* In the case there is just one machine in the cluster, each of these
* methods just starts the runtime on that machine. Otherwise, this
* actor decides which machine(s) to distribute the runtime on according
* to the setting in the .config file ("config.coral.distributor.mode").
*
* It also can answer questions about which runtime is running on which
* machine, which is useful for restarting a machine on which runtimes
* were running.
*
* The cluster distributor lives as a child of the RuntimeAdminActor.
* Its path is thus always "/user/root/admin/clusterDistributor".
*/
class ClusterDistributor(implicit ec: ExecutionContext,
injector: Injector, config: CoralConfig) extends Actor with ActorLogging {
// A distributor is a function that returns a list of
// machines or a single machine from a given list of machines.
type distributor = Future[List[Machine]] => Future[List[Machine]]
// For round robin distribution
var previousMachine: Option[Machine] = None
// For random machine distribution
val rand = new MersenneTwisterRNG()
val clusterMonitor = context.actorSelection("/user/root/clusterMonitor")
implicit val formats = org.json4s.DefaultFormats
implicit val timeout = Timeout(5.seconds)
override def preStart() = {
}
override def receive = {
case InvalidateAllAuthenticators() =>
invalidateAllAuthenticators()
case ClusterDistributor.CreateRuntime(uniqueName, jsonDef, owner) =>
createRuntime(uniqueName, jsonDef, owner)
case RestartRuntimes(member: Member) =>
restartRuntimes(member)
case ResetRoundRobin() =>
previousMachine = None
case _ =>
}
/**
* Create a new runtime with the unique name, json definition and owner.
* @param uniqueName The unique name of the runtime
* @param jsonDef The JSON definition of the runtime.
* @param owner The owner of the runtime.
*/
def createRuntime(uniqueName: String, jsonDef: JObject, owner: UUID) {
val originalSender = sender()
log.info(s"""Valid runtime definition found. Delegating runtime creation of runtime "$uniqueName"""")
// Determine where this runtime actor should be placed
// and then send the request to the proper machine
val future = distributeRuntime(uniqueName, jsonDef, owner)
future onComplete {
case Success(answer) =>
if ((answer \ "success").extract[Boolean]) {
log.info("Succesfully created runtime.")
} else {
log.error("Failed to create runtime.")
//runtimeActor ! PoisonPill
}
case Failure(ex) =>
log.error("clusterDistributor failed to start runtime")
log.error(ex.getMessage())
}
future pipeTo originalSender
}
/**
* Distribute the runtime according to the distribution stategy as defined in
* the "distribution" section of the runtime JSON definition, or when this
* section is not defined, distribute it according to the settings in the
* CoralConfig object.
*
* First, a list of machines is obtained on which the runtime could start.
* This usually means all machines in the cluster.
* Then, look at the json definition of the runtime and run it according to the
* instructions in that definition. If not defined, then look at the instructions
* in the CoralConfig object.
*
* When a single machine is eventually returned to run the runtime on, the request
* is directed to the RuntimeAdminActor on that machine. A runtime can also be
* distributed across multipe machines, but currently that is not implemented yet.
* @param uniqueName The unique name of the runtime
* @param jsonDef The json definition of the runtime
* @param owner The owner UUID of the runtime
* @return A Future JSON object that contains the result of the operation.
*/
def distributeRuntime(uniqueName: String, jsonDef: JObject, owner: UUID): Future[JObject] = {
try {
val distributionSection = (jsonDef \ "distribution").extractOpt[JObject]
if (config.coral.cluster.enabled) {
val allMachines = getAllMachines()
// The distribution section overrides the settings in the configuration file
val machines = distributionSection match {
case Some(config) =>
log.info("Distribution section specified in runtime configuration. " +
"Distributing runtime over specified machines.")
getMachinesFromConfig(config, allMachines)
case None =>
log.info("No distribution section specified in runtime configuration. " +
"Distributing runtime with mode specified in configuration file.")
// The user has not specified distribution instructions.
// Deploy it according to the given settings in the configuration
distribute(config.coral.distributor.mode, allMachines)
}
distributeToMachines(machines, uniqueName, jsonDef, owner)
} else {
// Always run on the local machine, no matter what the configuration is
log.info("Distributing to local machine since platform is running in nocluster mode.")
distributeToMachines(Future.successful(List()), uniqueName, jsonDef, owner)
}
} catch {
case e: Exception =>
val message = e.getMessage
log.error(message)
Future.successful(
("success" -> false) ~
("reason" -> message))
}
}
/**
* Get the list of machines from the config object and extract
* a machine from that definition. In the case of a predefined distribution,
* extract the machine from the JSON object. In all other cases,
* call the corresponding distribution function.
*
* Examples of distribution sections in the runtime JSON definition:
*
* "distribution": {
* "mode": "predefined",
* "machine": {
* "ip": "127.0.0.1",
* "port": 2551
* }
* }
*
* OR
*
* "distribution": {
* "mode": "local/round-robin/random/least-nr-actors/least-nr-runtimes/least-busy"
* }
*
* @return A future list of machines on which the runtime will
* actually run. This can be a single machine or it can
* be multiple machines. Multiple machine deploys are
* currently not implemented yet.
*/
def getMachinesFromConfig(distribution: JObject, allMachines: Future[List[Machine]]): Future[List[Machine]] = {
val mode = (distribution \ "mode").extract[String]
mode match {
case "predefined" =>
Future {
val ip = (distribution \ "machine" \ "ip").extractOpt[String]
val port = (distribution \ "machine" \ "port").extractOpt[Int]
if (!ip.isDefined || !port.isDefined) {
log.error("Invalid predefined machine definition found")
List()
} else {
List(Machine(None, ip.get, port.get, List(), None))
}
}
case other =>
distribute(mode, allMachines)
}
}
/**
* Get the list of existing machines currently in the cluster
* from which a machine or multiple machines can be chosen.
*/
def getAllMachines(): Future[List[Machine]] = {
if (config.coral.cluster.enabled) {
clusterMonitor.ask(ClusterMonitor.GetClusterInfo())
.asInstanceOf[Future[JObject]].map(info => {
val success = (info \ "success").extractOpt[Boolean]
if (success.isDefined && success.get == false) {
// Running unit tests, config.coral.cluster.enable = true
// and akka.actor.provider = "akka.actor.LocalActorRefProvider"
List()
} else {
val members = (info \ "members").extract[JArray]
val result = members.arr.filter(x => !(x \ "roles").extract[List[String]]
.contains("debug-helper")).map(value => {
val ip = (value \ "address" \ "ip").extract[String]
val port = (value \ "address" \ "port").extract[Int]
val roles = (value \ "roles").extract[List[String]]
val status = (value \ "status").extractOpt[String]
// Alias is not relevant here
Machine(None, ip, port, roles, status)
})
result
}
})
} else {
// This can happen if coral.cluster.enable = true, but
// akka.actor.provider = "akka.actor.LocalActorRefProvider".
Future.successful(getLocalMachine())
}
}
/**
* Distribute a runtime over a number of machines.
* @param futureMachines The future list of machines to start the runtime on.
* @param uniqueName The unique name of the runtime
* @param jsonDef The JSON definition of the runtime
* @param owner The owner of the runtime.
* @return A future JSON object containing the result of the distribution operation.
*/
def distributeToMachines(futureMachines: Future[List[Machine]], uniqueName: String,
jsonDef: JObject, owner: UUID): Future[JObject] = {
futureMachines.flatMap(machines => {
// See if it has returned 0 machines, 1 machine or multiple machines
machines.size match {
case 0 =>
// This happens when config.coral.cluster.enable = false,
// or in unit tests when akka.actor.provider = LocalActorRefProvider.
val localAddress = "/user/root/admin"
context.actorSelection(localAddress).ask(CreateRuntimeLocally(uniqueName, jsonDef, owner))
.asInstanceOf[Future[JObject]]
case 1 =>
// Contact the cluster distributor on that machine,
// and ask it to create a runtime there
val m = machines(0)
// Ask the RuntimeAdminActor on that machine to create a runtime locally over there
val remoteAddress = s"akka.tcp://coral@${m.ip}:${m.port}/user/root/admin"
log.info(s"""Delegating runtime creation to local actor with address "$remoteAddress""""
+ s""" (currently on "${Utils.getFullSelfPath(self)}")""")
val overThere = context.actorSelection(remoteAddress)
overThere.ask(CreateRuntimeLocally(uniqueName, jsonDef, owner))
.asInstanceOf[Future[JObject]]
case n =>
// It is a list of machines, returned by the "split..." methods.
// Determine how to split the runtime now
// ...
Future.successful(JObject())
}
})
}
/**
* Fetches a list of machines on which the runtime should run, depending
* on the distribution mode specified.
* @param mode The distribution mode. Can be supplied through a "distribution"
* section or through the CoralConfig object.
* @param m The future list of all machines to choose from.
* @return A selection from this list according to the criterium.
*/
def distribute(mode: String, m: Future[List[Machine]]): Future[List[Machine]] = {
log.info(s"""Assembling list of machines based on distribution mode "$mode".""")
mode match {
// These return 1 machine:
case "local" => local(m)
case "round-robin" => roundRobin(m)
case "least-nr-runtimes" => leastNrRuntimes(m)
case "least-nr-actors" => leastNrActors(m)
case "random" => random(m)
case "least-busy" => leastBusy(m)
// These return n machines:
case "split-predefined" => splitPredefined(m)
case "split-least-busy" => splitLeastBusy(m)
case "split-random" => splitRandom(m)
case "split-least-nr-runtimes" => splitLeastNrRuntimes(m)
case "split-least-nr-actors" => splitLeastNrActors(m)
case other => Future.successful(List())
}
}
/**
* Invalidate all authenticator actors in the entire cluster.
* Returns a "combined" future with a single answer.
* If any of the separate nodes does not return true or does
* not return its answer in time, the result is Future(false).
*/
def invalidateAllAuthenticators() {
val originalSender = sender()
if (config.coral.cluster.enabled) {
// Future[List[String]] => Future[List[Boolean]] => Future[Boolean]
// future list of actors => future list of answers => future result
clusterMonitor.ask(GetAddresses("authenticator")).flatMap(list => {
val addresses = list.asInstanceOf[List[String]]
log.info("Sending Invalidate message to following actors: " + addresses.toString)
val clusterSize = addresses.size
val future = Future.sequence(addresses.map(a => {
context.actorSelection(a).ask(Invalidate())
})) recover {
// In case any invalidate fails to respond in time, it is a failure
case _ => List(InvalidationFailed())
}
future.map(list =>
if (list.contains(InvalidationFailed()) || list.size != clusterSize) {
log.error(s"Failed to invalidate $clusterSize authenticators.")
InvalidationFailed()
} else {
log.info(s"Succesfully invalidated $clusterSize authenticators.")
InvalidationComplete()
}) pipeTo originalSender
})
} else {
// If running in nocluster mode, only invalidate local authenticator
context.actorSelection("/user/root/authenticator").ask(Invalidate()) pipeTo originalSender
}
}
/**
* When an "unreachable member" message is received, check if
* there were any runtimes running on that node. If that is the case,
* restart those runtimes on another node.
* @param member The member that has become unreachable.
*/
def restartRuntimes(member: Member) {
val originalSender = sender()
log.info("Restarting stopped runtimes after node crash")
val authenticator = context.actorSelection("/user/root/authenticator")
val admin = context.actorSelection("/user/root/admin")
val uniqueAddress = member.uniqueAddress.address.toString
val machine = Utils.machineFromPath(uniqueAddress)
log.info(s"Node that crashed had the following properties: ${machine.toString}")
authenticator.ask(GetAllRuntimes()).asInstanceOf[Future[List[Runtime]]].map(list => {
val filtered: List[Runtime] = list.filter(r => {
val machine = Utils.machineFromPath(r.adminPath)
// Find all runtimes which were running on this node
member.address.host.isDefined &&
machine.ip == member.address.host.get &&
member.address.port.isDefined &&
machine.port == member.address.port.get &&
// There was no time to change the status if it crashed (1 = running)
r.status == 1
})
val results: List[Future[JObject]] = filtered.map(r => {
authenticator.ask(GetAuthInfoFromUUID(r.owner))
.asInstanceOf[Future[Option[AuthInfo]]].flatMap(authInfo => {
authInfo match {
case None => Future.successful(JObject())
case Some(a) => admin.ask(RuntimeAdminActor.CreateRuntime(r.jsonDef, a))
.asInstanceOf[Future[JObject]]
}
})
})
Future.sequence(results).map((result: List[JObject]) => {
log.info(s"There were ${filtered.length} runtimes running on the " +
s"crashed node: ${filtered.toString}")
// It is an overall success if all restarts are a success
val success = result.forall((r: JValue) => {
(r \ "success").extract[Boolean] == true
})
if (!success) {
log.error("Failed to restart runtime from unreachable member")
}
val individualSuccess = result.map((r: JValue) => {
// The individual results per runtime of the restart operation
val name = (r \ "name").extract[String]
val result = (r \ "success").extract[Boolean]
JField(name, result)
})
val answer =
("action" -> "Restart runtimes") ~
("success" -> success) ~
("individualSuccess" -> individualSuccess)
originalSender ! answer
})
})
}
/**
* Deploy the runtime on the same machine this cluster
* distributor is also running on. Useful for test purposes
* and single-node setups.
*/
def local: distributor = (list) => {
Future(getLocalMachine())
}
/**
* Returns the local machine in a list.
*/
def getLocalMachine(): List[Machine] = {
val path = Utils.getFullSelfPath(self)
List(Utils.machineFromPath(path))
}
/**
* Run the runtime on the next machine since the previous roundRobin
* distribution. This means that if the previous runtime was also
* distributed according to the roundRobin method and it was started
* on machine 1, start the next runtime on machine 2. If there are
* a total of 3 machines, runtimes are started on machine 1, 2, 3,
* 1, 2, 3, etc respectively.
* @return The selected machine.
*/
def roundRobin: distributor = (list) => {
log.info("Choosing next machine for round-robin distribution mode")
list.map(machines => {
previousMachine match {
case None =>
if (machines.size > 0) {
previousMachine = Some(machines(0))
List(machines(0))
} else {
List()
}
case Some(m) =>
if (machines.size > 0) {
log.info(s"Looking up index of machine $m")
val previousIndex = machines.indexOf(m)
val nextIndex = (previousIndex + 1) % machines.size
log.info(s"Previous index was $previousIndex, next index will be $nextIndex.")
val newPrevious = machines(nextIndex)
previousMachine = Some(newPrevious)
List(newPrevious)
} else {
List()
}
}
})
}
/**
* Run the runtime on the machine with the least number of runtimes
* currently already running on it. In the case multiple machines
* have the same number of runtimes running (or 0), the machine that
* responds the fastest to the query how many runtimes are running on
* it is chosen.
* @return The selected machine.
*/
def leastNrRuntimes: distributor = (list) => {
// Get a list of all machines and the number of runtimes
// currently running on them. Input list is ignored here
clusterMonitor.ask(GetPlatformStatistics()).mapTo[JObject].map(answer => {
val actors = (answer \ "nrActors").extract[Int]
})
Future.successful(List())
}
/**
* Run the runtime on the machine with the least number of actors
* currently running on it. In case multiple machines have the same
* number of actors, the one which responds the fastest is chosen.
* @return The result of the operation.
*/
def leastNrActors: distributor = (list) => {
// Get a list of all machines and the number of runtimes
// currently running on them. Input list is ignored here
clusterMonitor.ask(GetPlatformStatistics()).mapTo[JObject].map(answer => {
val counters = (answer \ "counters").extract[JObject]
val printed = pretty(render(answer))
})
// Get a list of all machines and the number of actors currently running on them
Future.successful(List())
}
/**
* Run the runtime on a random machine.
* @return The randomly selected machine.
*/
def random: distributor = (list) => {
list.map(machines => {
val index = rand.nextInt(machines.size)
List(machines(index))
})
}
/**
* Run the runtime on the least busy machine, according to
* some criterium for business.
* @return The result of the operation.
*/
def leastBusy: distributor = (list) => {
Future.successful(List())
}
/**
* Split the runtime and run it on the predefined list
* of machines. In case any of the machines is unreachable or
* not part of the cluster, that machine is left out.
* If the number of machines is larger than the number of
* actors in the runtime, the first N machines is taken from the list,
* where N is equal to the number of actors in the runtime.
* @return
*/
def splitPredefined: distributor = (list) => {
Future.successful(List())
}
/**
* Split the runtime and run it on the least busy N machines in the cluster.
* @return The result of the operation.
*/
def splitLeastBusy: distributor = (list) => {
Future.successful(List())
}
/**
* Split the runtime and run it on a random N number of machines.
* @return The result of the operation.
*/
def splitRandom: distributor = (list) => {
Future.successful(List())
}
/**
* Split the runtime and run it on the N machines with the least number
* of runtimes currently running on it.
* @return The result of the operation.
*/
def splitLeastNrRuntimes: distributor = (list) => {
Future.successful(List())
}
/**
* Split the runtime and run it on the N machines with the least number
* of actors currently running on it.
* @return The result of the operation.
*/
def splitLeastNrActors: distributor = (list) => {
Future.successful(List())
}
} | coral-streaming/coral | src/main/scala/io/coral/cluster/ClusterDistributor.scala | Scala | apache-2.0 | 22,005 |
package pl.newicom.dddd.cluster
import akka.actor._
import akka.cluster.client.ClusterClientReceptionist
import akka.cluster.sharding.{ClusterShardingSettings, ClusterSharding}
import akka.cluster.sharding.ShardRegion.Passivate
import pl.newicom.dddd.actor.{BusinessEntityActorFactory, PassivationConfig}
import pl.newicom.dddd.aggregate.BusinessEntity
import pl.newicom.dddd.office.{LocalOfficeId, OfficeFactory}
trait ShardingSupport {
implicit def globalOfficeFactory[A <: BusinessEntity : ShardResolution : BusinessEntityActorFactory: LocalOfficeId](implicit system: ActorSystem): OfficeFactory[A] = {
new OfficeFactory[A] {
val shardSettings = ClusterShardingSettings(system)
override def getOrCreate(): ActorRef = {
region().getOrElse {
startSharding(shardSettings)
region().get
}
}
private def region(): Option[ActorRef] = {
try {
Some(ClusterSharding(system).shardRegion(officeId.id))
} catch {
case ex: IllegalArgumentException => None
}
}
private def startSharding(shardSettings: ClusterShardingSettings): Unit = {
val entityFactory = implicitly[BusinessEntityActorFactory[A]]
val entityProps = entityFactory.props(new PassivationConfig(Passivate(PoisonPill), entityFactory.inactivityTimeout))
val sr = implicitly[ShardResolution[A]]
ClusterSharding(system).start(
typeName = officeId.id,
entityProps = entityProps,
settings = shardSettings,
extractEntityId = sr.idExtractor,
extractShardId = sr.shardResolver)
ClusterClientReceptionist(system).registerService(region().get)
}
}
}
} | odd/akka-ddd | akka-ddd-core/src/main/scala/pl/newicom/dddd/cluster/ShardingSupport.scala | Scala | mit | 1,730 |
package cn.gridx.scala.lang.io.network.ftp
import org.apache.commons.net.ftp
/**
* Created by tao on 9/3/15.
*/
class FtpClient {
}
| TaoXiao/Scala | lang/src/main/scala/cn/gridx/scala/lang/io/network/ftp/FtpClient.scala | Scala | apache-2.0 | 137 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code is a modified version of the original Spark 1.0.2 implementation.
*/
package com.massivedatascience.clusterer
import org.apache.spark.mllib.linalg.Vector
import org.scalatest.exceptions.TestFailedException
object TestingUtils {
val ABS_TOL_MSG = " using absolute tolerance"
val REL_TOL_MSG = " using relative tolerance"
/**
* Private helper function for comparing two values using relative tolerance.
* Note that if x or y is extremely close to zero, i.e., smaller than Double.MinPositiveValue,
* the relative tolerance is meaningless, so the exception will be raised to warn users.
*/
private def RelativeErrorComparison(x: Double, y: Double, eps: Double): Boolean = {
val absX = math.abs(x)
val absY = math.abs(y)
val diff = math.abs(x - y)
if (x == y) {
true
} else if (absX < Double.MinPositiveValue || absY < Double.MinPositiveValue) {
throw new TestFailedException(
s"$x or $y is extremely close to zero, so the relative tolerance is meaningless.", 0)
} else {
diff < eps * math.min(absX, absY)
}
}
/**
* Private helper function for comparing two values using absolute tolerance.
*/
private def AbsoluteErrorComparison(x: Double, y: Double, eps: Double): Boolean = {
math.abs(x - y) < eps
}
case class CompareDoubleRightSide(
fun: (Double, Double, Double) => Boolean, y: Double, eps: Double, method: String)
/**
* Implicit class for comparing two double values using relative tolerance or absolute tolerance.
*/
implicit class DoubleWithAlmostEquals(val x: Double) {
/**
* When the difference of two values are within eps, returns true; otherwise, returns false.
*/
def ~=(r: CompareDoubleRightSide): Boolean = r.fun(x, r.y, r.eps)
/**
* When the difference of two values are within eps, returns false; otherwise, returns true.
*/
def !~=(r: CompareDoubleRightSide): Boolean = !r.fun(x, r.y, r.eps)
/**
* Throws exception when the difference of two values are NOT within eps;
* otherwise, returns true.
*/
def ~==(r: CompareDoubleRightSide): Boolean = {
if (!r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Expected $x and ${r.y} to be within ${r.eps}${r.method}.", 0)
}
true
}
/**
* Throws exception when the difference of two values are within eps; otherwise, returns true.
*/
def !~==(r: CompareDoubleRightSide): Boolean = {
if (r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Did not expect $x and ${r.y} to be within ${r.eps}${r.method}.", 0)
}
true
}
/**
* Comparison using absolute tolerance.
*/
def absTol(eps: Double): CompareDoubleRightSide = CompareDoubleRightSide(AbsoluteErrorComparison,
x, eps, ABS_TOL_MSG)
/**
* Comparison using relative tolerance.
*/
def relTol(eps: Double): CompareDoubleRightSide = CompareDoubleRightSide(RelativeErrorComparison,
x, eps, REL_TOL_MSG)
override def toString = x.toString
}
case class CompareVectorRightSide(
fun: (Vector, Vector, Double) => Boolean, y: Vector, eps: Double, method: String)
/**
* Implicit class for comparing two vectors using relative tolerance or absolute tolerance.
*/
implicit class VectorWithAlmostEquals(val x: Vector) {
/**
* When the difference of two vectors are within eps, returns true; otherwise, returns false.
*/
def ~=(r: CompareVectorRightSide): Boolean = r.fun(x, r.y, r.eps)
/**
* When the difference of two vectors are within eps, returns false; otherwise, returns true.
*/
def !~=(r: CompareVectorRightSide): Boolean = !r.fun(x, r.y, r.eps)
/**
* Throws exception when the difference of two vectors are NOT within eps;
* otherwise, returns true.
*/
def ~==(r: CompareVectorRightSide): Boolean = {
if (!r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Expected $x and ${r.y} to be within ${r.eps}${r.method} for all elements.", 0)
}
true
}
/**
* Throws exception when the difference of two vectors are within eps; otherwise, returns true.
*/
def !~==(r: CompareVectorRightSide): Boolean = {
if (r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Did not expect $x and ${r.y} to be within ${r.eps}${r.method} for all elements.", 0)
}
true
}
/**
* Comparison using absolute tolerance.
*/
def absTol(eps: Double): CompareVectorRightSide = CompareVectorRightSide(
(x: Vector, y: Vector, eps: Double) => {
x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 absTol eps)
}, x, eps, ABS_TOL_MSG)
/**
* Comparison using relative tolerance. Note that comparing against sparse vector
* with elements having value of zero will raise exception because it involves with
* comparing against zero.
*/
def relTol(eps: Double): CompareVectorRightSide = CompareVectorRightSide(
(x: Vector, y: Vector, eps: Double) => {
x.toArray.zip(y.toArray).forall(x => x._1 ~= x._2 relTol eps)
}, x, eps, REL_TOL_MSG)
override def toString = x.toString
}
} | derrickburns/generalized-kmeans-clustering | src/test/scala/com/massivedatascience/clusterer/TestingUtils.scala | Scala | apache-2.0 | 6,070 |
package com.github.ustc_zzzz.timezone.asm
import net.minecraft.launchwrapper.{IClassTransformer, Launch, LaunchClassLoader}
import net.minecraftforge.fml.common.asm.transformers.deobf.FMLDeobfuscatingRemapper
import org.apache.logging.log4j.LogManager
import org.objectweb.asm._
import scala.collection.mutable
object TimeZoneTransformer {
private val classes: mutable.TreeSet[String] = mutable.TreeSet()
private val classLoader: LaunchClassLoader = Launch.classLoader
private def loadClass(c: String): Unit = try classLoader.findClass(c) catch {
case e: ClassNotFoundException =>
TimeZoneTransformer.logger.info("{}: skip class '{}'", Seq("TimeZoneTransformer", c): _*)
TimeZoneTransformer.logger.debug("TimeZoneTransformer: ", e)
}
private[asm] var enableRuntimeObf = false
private[asm] def logger = LogManager.getLogger("TimeZone")
private[asm] def loadClasses = classes foreach loadClass
}
trait TimeZoneTransformer extends IClassTransformer {
private val methods: mutable.HashMap[String, Map[String, MethodVisitor => MethodVisitor]] = mutable.HashMap()
private var currentMethod: String = ""
protected def hook(className: String, methodNames: String*)(methodProvider: MethodVisitor => MethodVisitor) = {
val origin = methods.getOrElse(className, Map.empty[String, MethodVisitor => MethodVisitor])
methods.put(className, (methodNames :\\ origin) { (s, m) => m + ((s, methodProvider)) })
TimeZoneTransformer.classes += className
()
}
protected def log(information: String) = {
if (!currentMethod.isEmpty && information != null) {
TimeZoneTransformer.logger.debug("- method '{}': {}", Seq(currentMethod, information): _*)
}
information
}
protected def log = {
if (!currentMethod.isEmpty) {
TimeZoneTransformer.logger.debug("- method '{}'", Seq(currentMethod): _*)
}
()
}
override def transform(name: String, transformedName: String, basicClass: Array[Byte]) = {
def generateVisitor(hooks: Map[String, MethodVisitor => MethodVisitor])(classWriter: ClassWriter): ClassVisitor = {
new ClassVisitor(Opcodes.ASM4, classWriter) {
val className = FMLDeobfuscatingRemapper.INSTANCE.unmap(name.replace('.', '/'))
override def visitMethod(a: Int, n: String, d: String, s: String, e: Array[String]) = {
val methodVisitor = super.visitMethod(a, n, d, s, e)
currentMethod = FMLDeobfuscatingRemapper.INSTANCE.mapMethodName(className, n, d)
if (TimeZoneTransformer.enableRuntimeObf) hooks.get(currentMethod) match {
case None => methodVisitor
case Some(methodProvider) => methodProvider(methodVisitor)
} else (hooks :\\ methodVisitor) {
case ((methodName, methodProvider), visitor) =>
val mappedName = FMLDeobfuscatingRemapper.INSTANCE.mapMethodName(className, methodName, d)
if (mappedName == currentMethod) methodProvider(visitor) else visitor
}
}
}
}
methods.get(transformedName) match {
case None => basicClass
case Some(hooks) =>
val classReader = new ClassReader(basicClass)
val classWriter = new ClassWriter(classReader, ClassWriter.COMPUTE_MAXS + ClassWriter.COMPUTE_FRAMES)
TimeZoneTransformer.logger.info("{}: inject codes into class '{}'", Seq(getClass.getSimpleName, transformedName): _*)
classReader.accept(generateVisitor(hooks)(classWriter), ClassReader.EXPAND_FRAMES)
classWriter.toByteArray
}
}
} | ustc-zzzz/TimeZone | src/main/scala/com/github/ustc_zzzz/timezone/asm/TimeZoneTransformer.scala | Scala | lgpl-3.0 | 3,538 |
package scuff.web
import javax.servlet._
import scuff._
import scala.concurrent.duration._
import scala.util.Try
import java.time.Clock
import java.time.OffsetDateTime
object CookieMonster {
private val SEP = "()<>@,;:\\\\\\"/[]?={}".toSet
private val NotSep = {
val isSep = (SEP.apply _)
isSep.negate
}
private final val SessionDuration: FiniteDuration = -1.seconds
sealed trait SameSite extends Enum.Value
object SameSite extends Enum[SameSite] {
val Lax, Strict, None, omit = new Val with SameSite
}
/** Convert Expires timestamp to MaxAge seconds, using current time. */
final def toMaxAge(expires: Long, unit: TimeUnit)(
implicit
clock: Clock): FiniteDuration = {
val expiresMillis = unit toMillis expires
val diff = expiresMillis - clock.millis
(diff / 1000).seconds
}
final def toMaxAge(expires: OffsetDateTime)(
implicit
clock: Clock): FiniteDuration =
toMaxAge(expires.toEpochSecond, SECONDS)
final def toMaxAge(expires: java.util.Date)(
implicit
clock: Clock): FiniteDuration =
toMaxAge(expires.getTime, MILLISECONDS)
}
/**
* Typed cookie definition.
*/
trait CookieMonster[T] {
/**
* Assign to `maxAge` for session cookies.
*/
final def SessionCookie: FiniteDuration = CookieMonster.SessionDuration
/** Max-age in seconds. Use `SessionCookie` for session cookie. */
protected def maxAge: FiniteDuration
protected def codec: Codec[T, String]
def name: String
/**
* HTTP only cookie? Defaults to `true`.
*/
protected def isHttpOnly = true
protected def SameSite = CookieMonster.SameSite
/** `SameSite` value. Defaults to `Lax`. */
protected def sameSite: CookieMonster.SameSite = SameSite.Lax
/**
* Secure cookie? Defaults to `false`.
*/
protected def isSecure = false
/**
* URL scope for cookie. Default is root.
*/
protected def path: String = null
/**
* Domain scope for cookie.
* Per the Cookie API: "By default, cookies are only returned to the server that sent them."
*/
protected def domain(req: http.HttpServletRequest): String = null
private lazy val validName: String = {
val name = this.name
require(name.length > 0, "Cookie name cannot be empty")
require(name.forall(c => c > 32 && c != 127), "Cookie name cannot contain spaces or CTL chars")
require(name.forall(CookieMonster.NotSep), "Cookie name cannot contain separator chars")
name
}
/**
* Set value as cookie on response.
* @param res Response object
* @param value Cookie value
* @param overrideMaxAge Optional Max-Age override
* @param overridePath Optional Path override
* @param req Implicit request object
*/
def set(res: http.HttpServletResponse, value: T, overrideMaxAge: FiniteDuration = this.maxAge, overridePath: String = this.path)(implicit req: http.HttpServletRequest): Unit = {
val encodedValue = codec encode value
val cookie = new java.lang.StringBuilder(validName.length + encodedValue.length + 200)
cookie append validName append '=' append encodedValue
if (sameSite != SameSite.omit) cookie append "; SameSite=" append sameSite
if (isSecure) cookie append "; Secure"
if (isHttpOnly) cookie append "; HttpOnly"
if (overrideMaxAge.length != SessionCookie.length) {
cookie append "; Max-Age=" append overrideMaxAge.toSeconds
}
domain(req) match {
case null => // Ignore
case domain => cookie append "; Domain=" append domain
}
if (overridePath != null) cookie append "; Path=" append overridePath
res.addHeader("Set-Cookie", cookie.toString)
}
/**
* Get value from cookie on request.
*/
def get(request: http.HttpServletRequest): Option[T] = {
Option(request.getCookies).flatMap { array =>
array.find(_.getName == name).flatMap { c =>
Try(codec.decode(c.getValue)).toOption
}
}
}
/**
* Remove cookie.
*/
def remove(res: http.HttpServletResponse): Unit = {
val cookie = new http.Cookie(name, "")
cookie.setMaxAge(0) // Remove cookie
res.addCookie(cookie)
}
}
trait HmacCookieMonster[T] extends CookieMonster[T] {
protected def hmac: Hmac[T, String]
protected def codec = hmac
}
| nilskp/scuff | src/main/scala/scuff/web/CookieMonster.scala | Scala | mit | 4,236 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_11.scalatest3_0_1
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.testingSupport.scalatest.IgnoredSpecTest
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 10.03.2017
*/
@Category(Array(classOf[SlowTests]))
class Scalatest2_11_3_0_1_IngoredTestTest extends Scalatest2_11_3_0_1_Base with IgnoredSpecTest
| triplequote/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_11/scalatest3_0_1/Scalatest2_11_3_0_1_IngoredTestTest.scala | Scala | apache-2.0 | 447 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{ByteArrayOutputStream, File}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.UUID
import scala.util.Random
import org.scalatest.Matchers._
import org.apache.spark.SparkException
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.Uuid
import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation
import org.apache.spark.sql.catalyst.plans.logical.{OneRowRelation, Union}
import org.apache.spark.sql.execution.{FilterExec, QueryExecution, WholeStageCodegenExec}
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSQLContext}
import org.apache.spark.sql.test.SQLTestData.{NullStrings, TestData2}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
import org.apache.spark.util.random.XORShiftRandom
class DataFrameSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("analysis error should be eagerly reported") {
intercept[Exception] { testData.select('nonExistentName) }
intercept[Exception] {
testData.groupBy('key).agg(Map("nonExistentName" -> "sum"))
}
intercept[Exception] {
testData.groupBy("nonExistentName").agg(Map("key" -> "sum"))
}
intercept[Exception] {
testData.groupBy($"abcd").agg(Map("key" -> "sum"))
}
}
test("dataframe toString") {
assert(testData.toString === "[key: int, value: string]")
assert(testData("key").toString === "key")
assert($"test".toString === "test")
}
test("rename nested groupby") {
val df = Seq((1, (1, 1))).toDF()
checkAnswer(
df.groupBy("_1").agg(sum("_2._1")).toDF("key", "total"),
Row(1, 1) :: Nil)
}
test("access complex data") {
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1)
}
test("table scan") {
checkAnswer(
testData,
testData.collect().toSeq)
}
test("empty data frame") {
assert(spark.emptyDataFrame.columns.toSeq === Seq.empty[String])
assert(spark.emptyDataFrame.count() === 0)
}
test("head and take") {
assert(testData.take(2) === testData.collect().take(2))
assert(testData.head(2) === testData.collect().take(2))
assert(testData.head(2).head.schema === testData.schema)
}
test("dataframe alias") {
val df = Seq(Tuple1(1)).toDF("c").as("t")
val dfAlias = df.alias("t2")
df.col("t.c")
dfAlias.col("t2.c")
}
test("Star Expansion - CreateStruct and CreateArray") {
val structDf = testData2.select("a", "b").as("record")
// CreateStruct and CreateArray in aggregateExpressions
assert(structDf.groupBy($"a").agg(min(struct($"record.*"))).first() == Row(3, Row(3, 1)))
assert(structDf.groupBy($"a").agg(min(array($"record.*"))).first() == Row(3, Seq(3, 1)))
// CreateStruct and CreateArray in project list (unresolved alias)
assert(structDf.select(struct($"record.*")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*")).first().getAs[Seq[Int]](0) === Seq(1, 1))
// CreateStruct and CreateArray in project list (alias)
assert(structDf.select(struct($"record.*").as("a")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*").as("a")).first().getAs[Seq[Int]](0) === Seq(1, 1))
}
test("Star Expansion - hash") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(hash($"a", $"*")),
structDf.groupBy($"a", $"b").agg(hash($"a", $"a", $"b")))
checkAnswer(
structDf.select(hash($"*")),
structDf.select(hash($"record.*")))
checkAnswer(
structDf.select(hash($"a", $"*")),
structDf.select(hash($"a", $"record.*")))
}
test("Star Expansion - xxhash64") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(xxhash64($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(xxhash64($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(xxhash64($"a", $"*")),
structDf.groupBy($"a", $"b").agg(xxhash64($"a", $"a", $"b")))
checkAnswer(
structDf.select(xxhash64($"*")),
structDf.select(xxhash64($"record.*")))
checkAnswer(
structDf.select(xxhash64($"a", $"*")),
structDf.select(xxhash64($"a", $"record.*")))
}
test("Star Expansion - explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv")
val e = intercept[AnalysisException] {
df.select(explode($"*"))
}
assert(e.getMessage.contains("Invalid usage of '*' in expression 'explode'"))
}
test("explode on output of array-valued function") {
val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv")
checkAnswer(
df.select(explode(split($"csv", ","))),
Row("1") :: Row("2") :: Row("4") :: Row("7") :: Row("8") :: Row("9") :: Nil)
}
test("Star Expansion - explode alias and star") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select(explode($"a").as("a"), $"*"),
Row("a", Seq("a"), 1) :: Nil)
}
test("sort after generate with join=true") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select($"*", explode($"a").as("c")).sortWithinPartitions("b", "c"),
Row(Seq("a"), 1, "a") :: Nil)
}
test("selectExpr") {
checkAnswer(
testData.selectExpr("abs(key)", "value"),
testData.collect().map(row => Row(math.abs(row.getInt(0)), row.getString(1))).toSeq)
}
test("selectExpr with alias") {
checkAnswer(
testData.selectExpr("key as k").select("k"),
testData.select("key").collect().toSeq)
}
test("selectExpr with udtf") {
val df = Seq((Map("1" -> 1), 1)).toDF("a", "b")
checkAnswer(
df.selectExpr("explode(a)"),
Row("1", 1) :: Nil)
}
test("filterExpr") {
val res = testData.collect().filter(_.getInt(0) > 90).toSeq
checkAnswer(testData.filter("key > 90"), res)
checkAnswer(testData.filter("key > 9.0e1"), res)
checkAnswer(testData.filter("key > .9e+2"), res)
checkAnswer(testData.filter("key > 0.9e+2"), res)
checkAnswer(testData.filter("key > 900e-1"), res)
checkAnswer(testData.filter("key > 900.0E-1"), res)
checkAnswer(testData.filter("key > 9.e+1"), res)
}
test("filterExpr using where") {
checkAnswer(
testData.where("key > 50"),
testData.collect().filter(_.getInt(0) > 50).toSeq)
}
test("repartition") {
intercept[IllegalArgumentException] {
testData.select('key).repartition(0)
}
checkAnswer(
testData.select('key).repartition(10).select('key),
testData.select('key).collect().toSeq)
}
test("repartition with SortOrder") {
// passing SortOrder expressions to .repartition() should result in an informative error
def checkSortOrderErrorMsg[T](data: => Dataset[T]): Unit = {
val ex = intercept[IllegalArgumentException](data)
assert(ex.getMessage.contains("repartitionByRange"))
}
checkSortOrderErrorMsg {
Seq(0).toDF("a").repartition(2, $"a".asc)
}
checkSortOrderErrorMsg {
Seq((0, 0)).toDF("a", "b").repartition(2, $"a".asc, $"b")
}
}
test("repartitionByRange") {
val data1d = Random.shuffle(0.to(9))
val data2d = data1d.map(i => (i, data1d.size - i))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, $"val".asc)
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, $"val".desc)
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, data1d.size - 1 - i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, lit(42))
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(0, i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, lit(null), $"val".asc, rand())
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, i)))
// .repartitionByRange() assumes .asc by default if no explicit sort order is specified
checkAnswer(
data2d.toDF("a", "b").repartitionByRange(data2d.size, $"a".desc, $"b")
.select(spark_partition_id().as("id"), $"a", $"b"),
data2d.toDF("a", "b").repartitionByRange(data2d.size, $"a".desc, $"b".asc)
.select(spark_partition_id().as("id"), $"a", $"b"))
// at least one partition-by expression must be specified
intercept[IllegalArgumentException] {
data1d.toDF("val").repartitionByRange(data1d.size)
}
intercept[IllegalArgumentException] {
data1d.toDF("val").repartitionByRange(data1d.size, Seq.empty: _*)
}
}
test("coalesce") {
intercept[IllegalArgumentException] {
testData.select('key).coalesce(0)
}
assert(testData.select('key).coalesce(1).rdd.partitions.size === 1)
checkAnswer(
testData.select('key).coalesce(1).select('key),
testData.select('key).collect().toSeq)
assert(spark.emptyDataFrame.coalesce(1).rdd.partitions.size === 1)
}
test("convert $\\"attribute name\\" into unresolved attribute") {
checkAnswer(
testData.where($"key" === lit(1)).select($"value"),
Row("1"))
}
test("convert Scala Symbol 'attrname into unresolved attribute") {
checkAnswer(
testData.where('key === lit(1)).select('value),
Row("1"))
}
test("select *") {
checkAnswer(
testData.select($"*"),
testData.collect().toSeq)
}
test("simple select") {
checkAnswer(
testData.where('key === lit(1)).select('value),
Row("1"))
}
test("select with functions") {
checkAnswer(
testData.select(sum('value), avg('value), count(lit(1))),
Row(5050.0, 50.5, 100))
checkAnswer(
testData2.select('a + 'b, 'a < 'b),
Seq(
Row(2, false),
Row(3, true),
Row(3, false),
Row(4, false),
Row(4, false),
Row(5, false)))
checkAnswer(
testData2.select(sumDistinct('a)),
Row(6))
}
test("sorting with null ordering") {
val data = Seq[java.lang.Integer](2, 1, null).toDF("key")
checkAnswer(data.orderBy('key.asc), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy('key.asc_nulls_first), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc_nulls_first("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy('key.asc_nulls_last), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy(asc_nulls_last("key")), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy('key.desc), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy('key.desc_nulls_first), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy(desc_nulls_first("key")), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy('key.desc_nulls_last), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc_nulls_last("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
}
test("global sorting") {
checkAnswer(
testData2.orderBy('a.asc, 'b.asc),
Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2)))
checkAnswer(
testData2.orderBy(asc("a"), desc("b")),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy('a.asc, 'b.desc),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy('a.desc, 'b.desc),
Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1)))
checkAnswer(
testData2.orderBy('a.desc, 'b.asc),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)))
checkAnswer(
arrayData.toDF().orderBy('data.getItem(0).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(0).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).reverse.toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(1).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(1).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).reverse.toSeq)
}
test("limit") {
checkAnswer(
testData.limit(10),
testData.take(10).toSeq)
checkAnswer(
arrayData.toDF().limit(1),
arrayData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
checkAnswer(
mapData.toDF().limit(1),
mapData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
// SPARK-12340: overstep the bounds of Int in SparkPlan.executeTake
checkAnswer(
spark.range(2).toDF().limit(2147483638),
Row(0) :: Row(1) :: Nil
)
}
test("udf") {
val foo = udf((a: Int, b: String) => a.toString + b)
checkAnswer(
// SELECT *, foo(key, value) FROM testData
testData.select($"*", foo('key, 'value)).limit(3),
Row(1, "1", "11") :: Row(2, "2", "22") :: Row(3, "3", "33") :: Nil
)
}
test("callUDF without Hive Support") {
val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
df.sparkSession.udf.register("simpleUDF", (v: Int) => v * v)
checkAnswer(
df.select($"id", callUDF("simpleUDF", $"value")),
Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil)
}
test("withColumn") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol"))
}
test("withColumns") {
val df = testData.toDF().withColumns(Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2))
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1, key + 2)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol1", "newCol2"))
val err = intercept[IllegalArgumentException] {
testData.toDF().withColumns(Seq("newCol1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(
err.getMessage.contains("The size of column names: 1 isn't equal to the size of columns: 2"))
val err2 = intercept[AnalysisException] {
testData.toDF().withColumns(Seq("newCol1", "newCOL1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(err2.getMessage.contains("Found duplicate column(s)"))
}
test("withColumns: case sensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val df = testData.toDF().withColumns(Seq("newCol1", "newCOL1"),
Seq(col("key") + 1, col("key") + 2))
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1, key + 2)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol1", "newCOL1"))
val err = intercept[AnalysisException] {
testData.toDF().withColumns(Seq("newCol1", "newCol1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(err.getMessage.contains("Found duplicate column(s)"))
}
}
test("withColumns: given metadata") {
def buildMetadata(num: Int): Seq[Metadata] = {
(0 until num).map { n =>
val builder = new MetadataBuilder
builder.putLong("key", n.toLong)
builder.build()
}
}
val df = testData.toDF().withColumns(
Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2),
buildMetadata(2))
df.select("newCol1", "newCol2").schema.zipWithIndex.foreach { case (col, idx) =>
assert(col.metadata.getLong("key").toInt === idx)
}
val err = intercept[IllegalArgumentException] {
testData.toDF().withColumns(
Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2),
buildMetadata(1))
}
assert(err.getMessage.contains(
"The size of column names: 2 isn't equal to the size of metadata elements: 1"))
}
test("replace column using withColumn") {
val df2 = sparkContext.parallelize(Array(1, 2, 3)).toDF("x")
val df3 = df2.withColumn("x", df2("x") + 1)
checkAnswer(
df3.select("x"),
Row(2) :: Row(3) :: Row(4) :: Nil)
}
test("replace column using withColumns") {
val df2 = sparkContext.parallelize(Array((1, 2), (2, 3), (3, 4))).toDF("x", "y")
val df3 = df2.withColumns(Seq("x", "newCol1", "newCol2"),
Seq(df2("x") + 1, df2("y"), df2("y") + 1))
checkAnswer(
df3.select("x", "newCol1", "newCol2"),
Row(2, 2, 3) :: Row(3, 3, 4) :: Row(4, 4, 5) :: Nil)
}
test("drop column using drop") {
val df = testData.drop("key")
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop columns using drop") {
val src = Seq((0, 2, 3)).toDF("a", "b", "c")
val df = src.drop("a", "b")
checkAnswer(df, Row(3))
assert(df.schema.map(_.name) === Seq("c"))
}
test("drop unknown column (no-op)") {
val df = testData.drop("random")
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop column using drop with column reference") {
val col = testData("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("SPARK-28189 drop column using drop with column reference with case-insensitive names") {
// With SQL config caseSensitive OFF, case insensitive column name should work
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val col1 = testData("KEY")
val df1 = testData.drop(col1)
checkAnswer(df1, testData.selectExpr("value"))
assert(df1.schema.map(_.name) === Seq("value"))
val col2 = testData("Key")
val df2 = testData.drop(col2)
checkAnswer(df2, testData.selectExpr("value"))
assert(df2.schema.map(_.name) === Seq("value"))
}
}
test("drop unknown column (no-op) with column reference") {
val col = Column("random")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop unknown column with same name with column reference") {
val col = Column("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop column after join with duplicate columns using column reference") {
val newSalary = salary.withColumnRenamed("personId", "id")
val col = newSalary("id")
// this join will result in duplicate "id" columns
val joinedDf = person.join(newSalary,
person("id") === newSalary("id"), "inner")
// remove only the "id" column that was associated with newSalary
val df = joinedDf.drop(col)
checkAnswer(
df,
joinedDf.collect().map {
case Row(id: Int, name: String, age: Int, idToDrop: Int, salary: Double) =>
Row(id, name, age, salary)
}.toSeq)
assert(df.schema.map(_.name) === Seq("id", "name", "age", "salary"))
assert(df("id") == person("id"))
}
test("drop top level columns that contains dot") {
val df1 = Seq((1, 2)).toDF("a.b", "a.c")
checkAnswer(df1.drop("a.b"), Row(2))
// Creates data set: {"a.b": 1, "a": {"b": 3}}
val df2 = Seq((1)).toDF("a.b").withColumn("a", struct(lit(3) as "b"))
// Not like select(), drop() parses the column name "a.b" literally without interpreting "."
checkAnswer(df2.drop("a.b").select("a.b"), Row(3))
// "`" is treated as a normal char here with no interpreting, "`a`b" is a valid column name.
assert(df2.drop("`a.b`").columns.size == 2)
}
test("drop(name: String) search and drop all top level columns that matchs the name") {
val df1 = Seq((1, 2)).toDF("a", "b")
val df2 = Seq((3, 4)).toDF("a", "b")
checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4))
// Finds and drops all columns that match the name (case insensitive).
checkAnswer(df1.crossJoin(df2).drop("A"), Row(2, 4))
}
test("withColumnRenamed") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
.withColumnRenamed("value", "valueRenamed")
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "valueRenamed", "newCol"))
}
private lazy val person2: DataFrame = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
test("describe") {
val describeResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("max", "David", "60", "192"))
val emptyDescribeResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val describeAllCols = person2.describe()
assert(getSchemaAsSeq(describeAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(describeAllCols, describeResult)
// All aggregate value should have been cast to string
describeAllCols.collect().foreach { row =>
row.toSeq.foreach { value =>
if (value != null) {
assert(value.isInstanceOf[String], "expected string but found " + value.getClass)
}
}
}
val describeOneCol = person2.describe("age")
assert(getSchemaAsSeq(describeOneCol) === Seq("summary", "age"))
checkAnswer(describeOneCol, describeResult.map { case Row(s, _, d, _) => Row(s, d)} )
val describeNoCol = person2.select().describe()
assert(getSchemaAsSeq(describeNoCol) === Seq("summary"))
checkAnswer(describeNoCol, describeResult.map { case Row(s, _, _, _) => Row(s)} )
val emptyDescription = person2.limit(0).describe()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptyDescribeResult)
}
test("summary") {
val summaryResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("25%", null, "16", "164"),
Row("50%", null, "24", "176"),
Row("75%", null, "32", "180"),
Row("max", "David", "60", "192"))
val emptySummaryResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("25%", null, null, null),
Row("50%", null, null, null),
Row("75%", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val summaryAllCols = person2.summary()
assert(getSchemaAsSeq(summaryAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(summaryAllCols, summaryResult)
// All aggregate value should have been cast to string
summaryAllCols.collect().foreach { row =>
row.toSeq.foreach { value =>
if (value != null) {
assert(value.isInstanceOf[String], "expected string but found " + value.getClass)
}
}
}
val summaryOneCol = person2.select("age").summary()
assert(getSchemaAsSeq(summaryOneCol) === Seq("summary", "age"))
checkAnswer(summaryOneCol, summaryResult.map { case Row(s, _, d, _) => Row(s, d)} )
val summaryNoCol = person2.select().summary()
assert(getSchemaAsSeq(summaryNoCol) === Seq("summary"))
checkAnswer(summaryNoCol, summaryResult.map { case Row(s, _, _, _) => Row(s)} )
val emptyDescription = person2.limit(0).summary()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptySummaryResult)
}
test("summary advanced") {
val stats = Array("count", "50.01%", "max", "mean", "min", "25%")
val orderMatters = person2.summary(stats: _*)
assert(orderMatters.collect().map(_.getString(0)) === stats)
val onlyPercentiles = person2.summary("0.1%", "99.9%")
assert(onlyPercentiles.count() === 2)
val fooE = intercept[IllegalArgumentException] {
person2.summary("foo")
}
assert(fooE.getMessage === "foo is not a recognised statistic")
val parseE = intercept[IllegalArgumentException] {
person2.summary("foo%")
}
assert(parseE.getMessage === "Unable to parse foo% as a percentile")
}
test("apply on query results (SPARK-5462)") {
val df = testData.sparkSession.sql("select key from testData")
checkAnswer(df.select(df("key")), testData.select('key).collect().toSeq)
}
test("inputFiles") {
Seq("csv", "").foreach { useV1List =>
withSQLConf(SQLConf.USE_V1_SOURCE_READER_LIST.key -> useV1List) {
withTempDir { dir =>
val df = Seq((1, 22)).toDF("a", "b")
val parquetDir = new File(dir, "parquet").getCanonicalPath
df.write.parquet(parquetDir)
val parquetDF = spark.read.parquet(parquetDir)
assert(parquetDF.inputFiles.nonEmpty)
val csvDir = new File(dir, "csv").getCanonicalPath
df.write.json(csvDir)
val csvDF = spark.read.json(csvDir)
assert(csvDF.inputFiles.nonEmpty)
val unioned = csvDF.union(parquetDF).inputFiles.sorted
val allFiles = (csvDF.inputFiles ++ parquetDF.inputFiles).distinct.sorted
assert(unioned === allFiles)
}
}
}
}
ignore("show") {
// This test case is intended ignored, but to make sure it compiles correctly
testData.select($"*").show()
testData.select($"*").show(1000)
}
test("getRows: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = Seq(
Seq("value"),
Seq("1"),
Seq("111111111111111111111"))
assert(df.getRows(10, 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = Seq(
Seq("value"),
Seq("1"),
Seq("11111111111111111..."))
assert(df.getRows(10, 20) === expectedAnswerForTrue)
}
test("getRows: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = Seq(
Seq("value"),
Seq("1"),
Seq("111"))
assert(df.getRows(10, 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = Seq(
Seq("value"),
Seq("1"),
Seq("11111111111111..."))
assert(df.getRows(10, 17) === expectedAnswerForTrue)
}
test("getRows: numRows = 0") {
val expectedAnswer = Seq(Seq("key", "value"), Seq("1", "1"))
assert(testData.select($"*").getRows(0, 20) === expectedAnswer)
}
test("getRows: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = Seq(
Seq("_1", "_2"),
Seq("[1, 2, 3]", "[1, 2, 3]"),
Seq("[2, 3, 4]", "[2, 3, 4]"))
assert(df.getRows(10, 20) === expectedAnswer)
}
test("getRows: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = Seq(
Seq("_1", "_2"),
Seq("[31 32]", "[41 42 43 2E]"),
Seq("[33 34]", "[31 32 33 34 36]"))
assert(df.getRows(10, 20) === expectedAnswer)
}
test("showString: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+---------------------+
||value |
|+---------------------+
||1 |
||111111111111111111111|
|+---------------------+
|""".stripMargin
assert(df.showString(10, truncate = 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+--------------------+
|| value|
|+--------------------+
|| 1|
||11111111111111111...|
|+--------------------+
|""".stripMargin
assert(df.showString(10, truncate = 20) === expectedAnswerForTrue)
}
test("showString: truncate = [0, 20], vertical = true") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = "-RECORD 0----------------------\\n" +
" value | 1 \\n" +
"-RECORD 1----------------------\\n" +
" value | 111111111111111111111 \\n"
assert(df.showString(10, truncate = 0, vertical = true) === expectedAnswerForFalse)
val expectedAnswerForTrue = "-RECORD 0---------------------\\n" +
" value | 1 \\n" +
"-RECORD 1---------------------\\n" +
" value | 11111111111111111... \\n"
assert(df.showString(10, truncate = 20, vertical = true) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+-----+
||value|
|+-----+
|| 1|
|| 111|
|+-----+
|""".stripMargin
assert(df.showString(10, truncate = 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+-----------------+
|| value|
|+-----------------+
|| 1|
||11111111111111...|
|+-----------------+
|""".stripMargin
assert(df.showString(10, truncate = 17) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17], vertical = true") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = "-RECORD 0----\\n" +
" value | 1 \\n" +
"-RECORD 1----\\n" +
" value | 111 \\n"
assert(df.showString(10, truncate = 3, vertical = true) === expectedAnswerForFalse)
val expectedAnswerForTrue = "-RECORD 0------------------\\n" +
" value | 1 \\n" +
"-RECORD 1------------------\\n" +
" value | 11111111111111... \\n"
assert(df.showString(10, truncate = 17, vertical = true) === expectedAnswerForTrue)
}
test("showString(negative)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(-1) === expectedAnswer)
}
test("showString(negative), vertical = true") {
val expectedAnswer = "(0 rows)\\n"
assert(testData.select($"*").showString(-1, vertical = true) === expectedAnswer)
}
test("showString(0)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(0) === expectedAnswer)
}
test("showString(Int.MaxValue)") {
val df = Seq((1, 2), (3, 4)).toDF("a", "b")
val expectedAnswer = """+---+---+
|| a| b|
|+---+---+
|| 1| 2|
|| 3| 4|
|+---+---+
|""".stripMargin
assert(df.showString(Int.MaxValue) === expectedAnswer)
}
test("showString(0), vertical = true") {
val expectedAnswer = "(0 rows)\\n"
assert(testData.select($"*").showString(0, vertical = true) === expectedAnswer)
}
test("showString: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = """+---------+---------+
|| _1| _2|
|+---------+---------+
||[1, 2, 3]|[1, 2, 3]|
||[2, 3, 4]|[2, 3, 4]|
|+---------+---------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: array, vertical = true") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = "-RECORD 0--------\\n" +
" _1 | [1, 2, 3] \\n" +
" _2 | [1, 2, 3] \\n" +
"-RECORD 1--------\\n" +
" _1 | [2, 3, 4] \\n" +
" _2 | [2, 3, 4] \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("showString: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = """+-------+----------------+
|| _1| _2|
|+-------+----------------+
||[31 32]| [41 42 43 2E]|
||[33 34]|[31 32 33 34 36]|
|+-------+----------------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: binary, vertical = true") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = "-RECORD 0---------------\\n" +
" _1 | [31 32] \\n" +
" _2 | [41 42 43 2E] \\n" +
"-RECORD 1---------------\\n" +
" _1 | [33 34] \\n" +
" _2 | [31 32 33 34 36] \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("showString: minimum column width") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = """+---+---+
|| _1| _2|
|+---+---+
|| 1| 1|
|| 2| 2|
|+---+---+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: minimum column width, vertical = true") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = "-RECORD 0--\\n" +
" _1 | 1 \\n" +
" _2 | 1 \\n" +
"-RECORD 1--\\n" +
" _1 | 2 \\n" +
" _2 | 2 \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("SPARK-7319 showString") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|""".stripMargin
assert(testData.select($"*").showString(1) === expectedAnswer)
}
test("SPARK-7319 showString, vertical = true") {
val expectedAnswer = "-RECORD 0----\\n" +
" key | 1 \\n" +
" value | 1 \\n" +
"only showing top 1 row\\n"
assert(testData.select($"*").showString(1, vertical = true) === expectedAnswer)
}
test("SPARK-23023 Cast rows to strings in showString") {
val df1 = Seq(Seq(1, 2, 3, 4)).toDF("a")
assert(df1.showString(10) ===
s"""+------------+
|| a|
|+------------+
||[1, 2, 3, 4]|
|+------------+
|""".stripMargin)
val df2 = Seq(Map(1 -> "a", 2 -> "b")).toDF("a")
assert(df2.showString(10) ===
s"""+----------------+
|| a|
|+----------------+
||[1 -> a, 2 -> b]|
|+----------------+
|""".stripMargin)
val df3 = Seq(((1, "a"), 0), ((2, "b"), 0)).toDF("a", "b")
assert(df3.showString(10) ===
s"""+------+---+
|| a| b|
|+------+---+
||[1, a]| 0|
||[2, b]| 0|
|+------+---+
|""".stripMargin)
}
test("SPARK-7327 show with empty dataFrame") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|""".stripMargin
assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer)
}
test("SPARK-7327 show with empty dataFrame, vertical = true") {
assert(testData.select($"*").filter($"key" < 0).showString(1, vertical = true) === "(0 rows)\\n")
}
test("SPARK-18350 show with session local timezone") {
val d = Date.valueOf("2016-12-01")
val ts = Timestamp.valueOf("2016-12-01 00:00:00")
val df = Seq((d, ts)).toDF("d", "ts")
val expectedAnswer = """+----------+-------------------+
||d |ts |
|+----------+-------------------+
||2016-12-01|2016-12-01 00:00:00|
|+----------+-------------------+
|""".stripMargin
assert(df.showString(1, truncate = 0) === expectedAnswer)
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "GMT") {
val expectedAnswer = """+----------+-------------------+
||d |ts |
|+----------+-------------------+
||2016-12-01|2016-12-01 08:00:00|
|+----------+-------------------+
|""".stripMargin
assert(df.showString(1, truncate = 0) === expectedAnswer)
}
}
test("SPARK-18350 show with session local timezone, vertical = true") {
val d = Date.valueOf("2016-12-01")
val ts = Timestamp.valueOf("2016-12-01 00:00:00")
val df = Seq((d, ts)).toDF("d", "ts")
val expectedAnswer = "-RECORD 0------------------\\n" +
" d | 2016-12-01 \\n" +
" ts | 2016-12-01 00:00:00 \\n"
assert(df.showString(1, truncate = 0, vertical = true) === expectedAnswer)
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "GMT") {
val expectedAnswer = "-RECORD 0------------------\\n" +
" d | 2016-12-01 \\n" +
" ts | 2016-12-01 08:00:00 \\n"
assert(df.showString(1, truncate = 0, vertical = true) === expectedAnswer)
}
}
test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") {
val rowRDD = sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0))))
val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false)))
val df = spark.createDataFrame(rowRDD, schema)
df.rdd.collect()
}
test("SPARK-6899: type should match when using codegen") {
checkAnswer(decimalData.agg(avg('a)), Row(new java.math.BigDecimal(2)))
}
test("SPARK-7133: Implement struct, array, and map field accessor") {
assert(complexData.filter(complexData("a")(0) === 2).count() == 1)
assert(complexData.filter(complexData("m")("1") === 1).count() == 1)
assert(complexData.filter(complexData("s")("key") === 1).count() == 1)
assert(complexData.filter(complexData("m")(complexData("s")("value")) === 1).count() == 1)
assert(complexData.filter(complexData("a")(complexData("s")("key")) === 1).count() == 1)
}
test("SPARK-7551: support backticks for DataFrame attribute resolution") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
val df = spark.read.json(Seq("""{"a.b": {"c": {"d..e": {"f": 1}}}}""").toDS())
checkAnswer(
df.select(df("`a.b`.c.`d..e`.`f`")),
Row(1)
)
val df2 = spark.read.json(Seq("""{"a b": {"c": {"d e": {"f": 1}}}}""").toDS())
checkAnswer(
df2.select(df2("`a b`.c.d e.f")),
Row(1)
)
def checkError(testFun: => Unit): Unit = {
val e = intercept[org.apache.spark.sql.AnalysisException] {
testFun
}
assert(e.getMessage.contains("syntax error in attribute name:"))
}
checkError(df("`abc.`c`"))
checkError(df("`abc`..d"))
checkError(df("`a`.b."))
checkError(df("`a.b`.c.`d"))
}
}
test("SPARK-7324 dropDuplicates") {
val testData = sparkContext.parallelize(
(2, 1, 2) :: (1, 1, 1) ::
(1, 2, 1) :: (2, 1, 2) ::
(2, 2, 2) :: (2, 2, 1) ::
(2, 1, 1) :: (1, 1, 2) ::
(1, 2, 2) :: (1, 2, 1) :: Nil).toDF("key", "value1", "value2")
checkAnswer(
testData.dropDuplicates(),
Seq(Row(2, 1, 2), Row(1, 1, 1), Row(1, 2, 1),
Row(2, 2, 2), Row(2, 1, 1), Row(2, 2, 1),
Row(1, 1, 2), Row(1, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key", "value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("value1", "value2")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value2")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates("key", "value1"),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
}
test("SPARK-8621: support empty string column name") {
val df = Seq(Tuple1(1)).toDF("").as("t")
// We should allow empty string as column name
df.col("")
df.col("t.``")
}
test("SPARK-8797: sort by float column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Float.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toFloat))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("SPARK-8797: sort by double column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Double.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toDouble))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("NaN is greater than all other non-NaN numeric values") {
val maxDouble = Seq(Double.NaN, Double.PositiveInfinity, Double.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Double.isNaN(maxDouble.getDouble(0)))
val maxFloat = Seq(Float.NaN, Float.PositiveInfinity, Float.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Float.isNaN(maxFloat.getFloat(0)))
}
test("SPARK-8072: Better Exception for Duplicate Columns") {
// only one duplicate column present
val e = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3), (2, 3, 4), (3, 4, 5)).toDF("column1", "column2", "column1")
.write.format("parquet").save("temp")
}
assert(e.getMessage.contains("Found duplicate column(s) when inserting into"))
assert(e.getMessage.contains("column1"))
assert(!e.getMessage.contains("column2"))
// multiple duplicate columns present
val f = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3, 4, 5), (2, 3, 4, 5, 6), (3, 4, 5, 6, 7))
.toDF("column1", "column2", "column3", "column1", "column3")
.write.format("json").save("temp")
}
assert(f.getMessage.contains("Found duplicate column(s) when inserting into"))
assert(f.getMessage.contains("column1"))
assert(f.getMessage.contains("column3"))
assert(!f.getMessage.contains("column2"))
}
test("SPARK-6941: Better error message for inserting into RDD-based Table") {
withTempDir { dir =>
val tempParquetFile = new File(dir, "tmp_parquet")
val tempJsonFile = new File(dir, "tmp_json")
val df = Seq(Tuple1(1)).toDF()
val insertion = Seq(Tuple1(2)).toDF("col")
// pass case: parquet table (HadoopFsRelation)
df.write.mode(SaveMode.Overwrite).parquet(tempParquetFile.getCanonicalPath)
val pdf = spark.read.parquet(tempParquetFile.getCanonicalPath)
pdf.createOrReplaceTempView("parquet_base")
insertion.write.insertInto("parquet_base")
// pass case: json table (InsertableRelation)
df.write.mode(SaveMode.Overwrite).json(tempJsonFile.getCanonicalPath)
val jdf = spark.read.json(tempJsonFile.getCanonicalPath)
jdf.createOrReplaceTempView("json_base")
insertion.write.mode(SaveMode.Overwrite).insertInto("json_base")
// error cases: insert into an RDD
df.createOrReplaceTempView("rdd_base")
val e1 = intercept[AnalysisException] {
insertion.write.insertInto("rdd_base")
}
assert(e1.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into a logical plan that is not a LeafNode
val indirectDS = pdf.select("_1").filter($"_1" > 5)
indirectDS.createOrReplaceTempView("indirect_ds")
val e2 = intercept[AnalysisException] {
insertion.write.insertInto("indirect_ds")
}
assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into an OneRowRelation
Dataset.ofRows(spark, OneRowRelation()).createOrReplaceTempView("one_row")
val e3 = intercept[AnalysisException] {
insertion.write.insertInto("one_row")
}
assert(e3.getMessage.contains("Inserting into an RDD-based table is not allowed."))
}
}
test("SPARK-8608: call `show` on local DataFrame with random columns should return same value") {
val df = testData.select(rand(33))
assert(df.showString(5) == df.showString(5))
// We will reuse the same Expression object for LocalRelation.
val df1 = (1 to 10).map(Tuple1.apply).toDF().select(rand(33))
assert(df1.showString(5) == df1.showString(5))
}
test("SPARK-8609: local DataFrame with random columns should return same value after sort") {
checkAnswer(testData.sort(rand(33)), testData.sort(rand(33)))
// We will reuse the same Expression object for LocalRelation.
val df = (1 to 10).map(Tuple1.apply).toDF()
checkAnswer(df.sort(rand(33)), df.sort(rand(33)))
}
test("SPARK-9083: sort with non-deterministic expressions") {
val seed = 33
val df = (1 to 100).map(Tuple1.apply).toDF("i").repartition(1)
val random = new XORShiftRandom(seed)
val expected = (1 to 100).map(_ -> random.nextDouble()).sortBy(_._2).map(_._1)
val actual = df.sort(rand(seed)).collect().map(_.getInt(0))
assert(expected === actual)
}
test("Sorting columns are not in Filter and Project") {
checkAnswer(
upperCaseData.filter('N > 1).select('N).filter('N < 6).orderBy('L.asc),
Row(2) :: Row(3) :: Row(4) :: Row(5) :: Nil)
}
test("SPARK-9323: DataFrame.orderBy should support nested column name") {
val df = spark.read.json(Seq("""{"a": {"b": 1}}""").toDS())
checkAnswer(df.orderBy("a.b"), Row(Row(1)))
}
test("SPARK-9950: correctly analyze grouping/aggregating on struct fields") {
val df = Seq(("x", (1, 1)), ("y", (2, 2))).toDF("a", "b")
checkAnswer(df.groupBy("b._1").agg(sum("b._2")), Row(1, 1) :: Row(2, 2) :: Nil)
}
test("SPARK-10093: Avoid transformations on executors") {
val df = Seq((1, 1)).toDF("a", "b")
df.where($"a" === 1)
.select($"a", $"b", struct($"b"))
.orderBy("a")
.select(struct($"b"))
.collect()
}
test("SPARK-10185: Read multiple Hadoop Filesystem paths and paths with a comma in it") {
withTempDir { dir =>
val df1 = Seq((1, 22)).toDF("a", "b")
val dir1 = new File(dir, "dir,1").getCanonicalPath
df1.write.format("json").save(dir1)
val df2 = Seq((2, 23)).toDF("a", "b")
val dir2 = new File(dir, "dir2").getCanonicalPath
df2.write.format("json").save(dir2)
checkAnswer(spark.read.format("json").load(dir1, dir2),
Row(1, 22) :: Row(2, 23) :: Nil)
checkAnswer(spark.read.format("json").load(dir1),
Row(1, 22) :: Nil)
}
}
test("Alias uses internally generated names 'aggOrder' and 'havingCondition'") {
val df = Seq(1 -> 2).toDF("i", "j")
val query1 = df.groupBy('i)
.agg(max('j).as("aggOrder"))
.orderBy(sum('j))
checkAnswer(query1, Row(1, 2))
// In the plan, there are two attributes having the same name 'havingCondition'
// One is a user-provided alias name; another is an internally generated one.
val query2 = df.groupBy('i)
.agg(max('j).as("havingCondition"))
.where(sum('j) > 0)
.orderBy('havingCondition.asc)
checkAnswer(query2, Row(1, 2))
}
test("SPARK-10316: respect non-deterministic expressions in PhysicalOperation") {
withTempDir { dir =>
(1 to 10).toDF("id").write.mode(SaveMode.Overwrite).json(dir.getCanonicalPath)
val input = spark.read.json(dir.getCanonicalPath)
val df = input.select($"id", rand(0).as('r))
df.as("a").join(df.filter($"r" < 0.5).as("b"), $"a.id" === $"b.id").collect().foreach { row =>
assert(row.getDouble(1) - row.getDouble(3) === 0.0 +- 0.001)
}
}
}
test("SPARK-10743: keep the name of expression if possible when do cast") {
val df = (1 to 10).map(Tuple1.apply).toDF("i").as("src")
assert(df.select($"src.i".cast(StringType)).columns.head === "i")
assert(df.select($"src.i".cast(StringType).cast(IntegerType)).columns.head === "i")
}
test("SPARK-11301: fix case sensitivity for filter on partitioned columns") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("year").parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
checkAnswer(df.filter($"yEAr" > 2000).select($"val"), Row("a"))
}
}
}
/**
* Verifies that there is no Exchange between the Aggregations for `df`
*/
private def verifyNonExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
atFirstAgg = !atFirstAgg
case _ =>
if (atFirstAgg) {
fail("Should not have operators between the two aggregations")
}
}
}
/**
* Verifies that there is an Exchange between the Aggregations for `df`
*/
private def verifyExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
if (atFirstAgg) {
fail("Should not have back to back Aggregates")
}
atFirstAgg = true
case e: ShuffleExchangeExec => atFirstAgg = false
case _ =>
}
}
test("distributeBy and localSort") {
val original = testData.repartition(1)
assert(original.rdd.partitions.length == 1)
val df = original.repartition(5, $"key")
assert(df.rdd.partitions.length == 5)
checkAnswer(original.select(), df.select())
val df2 = original.repartition(10, $"key")
assert(df2.rdd.partitions.length == 10)
checkAnswer(original.select(), df2.select())
// Group by the column we are distributed by. This should generate a plan with no exchange
// between the aggregates
val df3 = testData.repartition($"key").groupBy("key").count()
verifyNonExchangingAgg(df3)
verifyNonExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key", "value").count())
// Grouping by just the first distributeBy expr, need to exchange.
verifyExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key").count())
val data = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData2(i % 10, i))).toDF()
// Distribute and order by.
val df4 = data.repartition($"a").sortWithinPartitions($"b".desc)
// Walk each partition and verify that it is sorted descending and does not contain all
// the values.
df4.rdd.foreachPartition { p =>
// Skip empty partition
if (p.hasNext) {
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue < v) throw new SparkException("Partition is not ordered.")
if (v + 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be globally ordered")
}
}
// Distribute and order by with multiple order bys
val df5 = data.repartition(2, $"a").sortWithinPartitions($"b".asc, $"a".asc)
// Walk each partition and verify that it is sorted ascending
df5.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be all sequential")
}
// Distribute into one partition and order by. This partition should contain all the values.
val df6 = data.repartition(1, $"a").sortWithinPartitions("b")
// Walk each partition and verify that it is sorted ascending and not globally sorted.
df6.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (!allSequential) throw new SparkException("Partition should contain all sequential values")
}
}
test("fix case sensitivity of partition by") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val p = path.getAbsolutePath
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("yEAr").parquet(p)
checkAnswer(spark.read.parquet(p).select("YeaR"), Row(2012))
}
}
}
// This test case is to verify a bug when making a new instance of LogicalRDD.
test("SPARK-11633: LogicalRDD throws TreeNode Exception: Failed to Copy Node") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val rdd = sparkContext.makeRDD(Seq(Row(1, 3), Row(2, 1)))
val df = spark.createDataFrame(
rdd,
new StructType().add("f1", IntegerType).add("f2", IntegerType))
.select($"F1", $"f2".as("f2"))
val df1 = df.as("a")
val df2 = df.as("b")
checkAnswer(df1.join(df2, $"a.f2" === $"b.f2"), Row(1, 3, 1, 3) :: Row(2, 1, 2, 1) :: Nil)
}
}
test("SPARK-10656: completely support special chars") {
val df = Seq(1 -> "a").toDF("i_$.a", "d^'a.")
checkAnswer(df.select(df("*")), Row(1, "a"))
checkAnswer(df.withColumnRenamed("d^'a.", "a"), Row(1, "a"))
}
test("SPARK-11725: correctly handle null inputs for ScalaUDF") {
val df = sparkContext.parallelize(Seq(
java.lang.Integer.valueOf(22) -> "John",
null.asInstanceOf[java.lang.Integer] -> "Lucy")).toDF("age", "name")
// passing null into the UDF that could handle it
val boxedUDF = udf[java.lang.Integer, java.lang.Integer] {
(i: java.lang.Integer) => if (i == null) -10 else null
}
checkAnswer(df.select(boxedUDF($"age")), Row(null) :: Row(-10) :: Nil)
spark.udf.register("boxedUDF",
(i: java.lang.Integer) => (if (i == null) -10 else null): java.lang.Integer)
checkAnswer(sql("select boxedUDF(null), boxedUDF(-1)"), Row(-10, null) :: Nil)
val primitiveUDF = udf((i: Int) => i * 2)
checkAnswer(df.select(primitiveUDF($"age")), Row(44) :: Row(null) :: Nil)
}
test("SPARK-12398 truncated toString") {
val df1 = Seq((1L, "row1")).toDF("id", "name")
assert(df1.toString() === "[id: bigint, name: string]")
val df2 = Seq((1L, "c2", false)).toDF("c1", "c2", "c3")
assert(df2.toString === "[c1: bigint, c2: string ... 1 more field]")
val df3 = Seq((1L, "c2", false, 10)).toDF("c1", "c2", "c3", "c4")
assert(df3.toString === "[c1: bigint, c2: string ... 2 more fields]")
val df4 = Seq((1L, Tuple2(1L, "val"))).toDF("c1", "c2")
assert(df4.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string>]")
val df5 = Seq((1L, Tuple2(1L, "val"), 20.0)).toDF("c1", "c2", "c3")
assert(df5.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 1 more field]")
val df6 = Seq((1L, Tuple2(1L, "val"), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(df6.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 2 more fields]")
val df7 = Seq((1L, Tuple3(1L, "val", 2), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df7.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 1 more field> ... 2 more fields]")
val df8 = Seq((1L, Tuple7(1L, "val", 2, 3, 4, 5, 6), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df8.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 5 more fields> ... 2 more fields]")
val df9 =
Seq((1L, Tuple4(1L, Tuple4(1L, 2L, 3L, 4L), 2L, 3L), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df9.toString ===
"[c1: bigint, c2: struct<_1: bigint," +
" _2: struct<_1: bigint," +
" _2: bigint ... 2 more fields> ... 2 more fields> ... 2 more fields]")
}
test("reuse exchange") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2") {
val df = spark.range(100).toDF()
val join = df.join(df, "id")
val plan = join.queryExecution.executedPlan
checkAnswer(join, df)
assert(
join.queryExecution.executedPlan.collect { case e: ShuffleExchangeExec => true }.size === 1)
assert(
join.queryExecution.executedPlan.collect { case e: ReusedExchangeExec => true }.size === 1)
val broadcasted = broadcast(join)
val join2 = join.join(broadcasted, "id").join(broadcasted, "id")
checkAnswer(join2, df)
assert(
join2.queryExecution.executedPlan.collect { case e: ShuffleExchangeExec => true }.size == 1)
assert(
join2.queryExecution.executedPlan
.collect { case e: BroadcastExchangeExec => true }.size === 1)
assert(
join2.queryExecution.executedPlan.collect { case e: ReusedExchangeExec => true }.size == 4)
}
}
test("sameResult() on aggregate") {
val df = spark.range(100)
val agg1 = df.groupBy().count()
val agg2 = df.groupBy().count()
// two aggregates with different ExprId within them should have same result
assert(agg1.queryExecution.executedPlan.sameResult(agg2.queryExecution.executedPlan))
val agg3 = df.groupBy().sum()
assert(!agg1.queryExecution.executedPlan.sameResult(agg3.queryExecution.executedPlan))
val df2 = spark.range(101)
val agg4 = df2.groupBy().count()
assert(!agg1.queryExecution.executedPlan.sameResult(agg4.queryExecution.executedPlan))
}
test("SPARK-12512: support `.` in column name for withColumn()") {
val df = Seq("a" -> "b").toDF("col.a", "col.b")
checkAnswer(df.select(df("*")), Row("a", "b"))
checkAnswer(df.withColumn("col.a", lit("c")), Row("c", "b"))
checkAnswer(df.withColumn("col.c", lit("c")), Row("a", "b", "c"))
}
test("SPARK-12841: cast in filter") {
checkAnswer(
Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"),
Row(1, "a"))
}
test("SPARK-12982: Add table name validation in temp table registration") {
val df = Seq("foo", "bar").map(Tuple1.apply).toDF("col")
// invalid table names
Seq("11111", "t~", "#$@sum", "table!#").foreach { name =>
val m = intercept[AnalysisException](df.createOrReplaceTempView(name)).getMessage
assert(m.contains(s"Invalid view name: $name"))
}
// valid table names
Seq("table1", "`11111`", "`t~`", "`#$@sum`", "`table!#`").foreach { name =>
df.createOrReplaceTempView(name)
}
}
test("assertAnalyzed shouldn't replace original stack trace") {
val e = intercept[AnalysisException] {
spark.range(1).select('id as 'a, 'id as 'b).groupBy('a).agg('b)
}
assert(e.getStackTrace.head.getClassName != classOf[QueryExecution].getName)
}
test("SPARK-13774: Check error message for non existent path without globbed paths") {
val uuid = UUID.randomUUID().toString
val baseDir = Utils.createTempDir()
try {
val e = intercept[AnalysisException] {
spark.read.format("csv").load(
new File(baseDir, "file").getAbsolutePath,
new File(baseDir, "file2").getAbsolutePath,
new File(uuid, "file3").getAbsolutePath,
uuid).rdd
}
assert(e.getMessage.startsWith("Path does not exist"))
} finally {
}
}
test("SPARK-13774: Check error message for not existent globbed paths") {
// Non-existent initial path component:
val nonExistentBasePath = "/" + UUID.randomUUID().toString
assert(!new File(nonExistentBasePath).exists())
val e = intercept[AnalysisException] {
spark.read.format("text").load(s"$nonExistentBasePath/*")
}
assert(e.getMessage.startsWith("Path does not exist"))
// Existent initial path component, but no matching files:
val baseDir = Utils.createTempDir()
val childDir = Utils.createTempDir(baseDir.getAbsolutePath)
assert(childDir.exists())
try {
val e1 = intercept[AnalysisException] {
spark.read.json(s"${baseDir.getAbsolutePath}/*/*-xyz.json").rdd
}
assert(e1.getMessage.startsWith("Path does not exist"))
} finally {
Utils.deleteRecursively(baseDir)
}
}
test("SPARK-15230: distinct() does not handle column name with dot properly") {
val df = Seq(1, 1, 2).toDF("column.with.dot")
checkAnswer(df.distinct(), Row(1) :: Row(2) :: Nil)
}
test("SPARK-16181: outer join with isNull filter") {
val left = Seq("x").toDF("col")
val right = Seq("y").toDF("col").withColumn("new", lit(true))
val joined = left.join(right, left("col") === right("col"), "left_outer")
checkAnswer(joined, Row("x", null, null))
checkAnswer(joined.filter($"new".isNull), Row("x", null, null))
}
test("SPARK-16664: persist with more than 200 columns") {
val size = 201L
val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(Seq.range(0, size))))
val schemas = List.range(0, size).map(a => StructField("name" + a, LongType, true))
val df = spark.createDataFrame(rdd, StructType(schemas))
assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100)
}
test("SPARK-17409: Do Not Optimize Query in CTAS (Data source tables) More Than Once") {
withTable("bar") {
withTempView("foo") {
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
sql("select 0 as id").createOrReplaceTempView("foo")
val df = sql("select * from foo group by id")
// If we optimize the query in CTAS more than once, the following saveAsTable will fail
// with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])`
df.write.mode("overwrite").saveAsTable("bar")
checkAnswer(spark.table("bar"), Row(0) :: Nil)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar"))
assert(tableMetadata.provider == Some("json"),
"the expected table is a data source table using json")
}
}
}
}
test("copy results for sampling with replacement") {
val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b")
val sampleDf = df.sample(true, 2.00)
val d = sampleDf.withColumn("c", monotonically_increasing_id).select($"c").collect
assert(d.size == d.distinct.size)
}
private def verifyNullabilityInFilterExec(
df: DataFrame,
expr: String,
expectedNonNullableColumns: Seq[String]): Unit = {
val dfWithFilter = df.where(s"isnotnull($expr)").selectExpr(expr)
dfWithFilter.queryExecution.executedPlan.collect {
// When the child expression in isnotnull is null-intolerant (i.e. any null input will
// result in null output), the involved columns are converted to not nullable;
// otherwise, no change should be made.
case e: FilterExec =>
assert(e.output.forall { o =>
if (expectedNonNullableColumns.contains(o.name)) !o.nullable else o.nullable
})
}
}
test("SPARK-17957: no change on nullability in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> java.lang.Integer.valueOf(3),
java.lang.Integer.valueOf(1) -> null.asInstanceOf[java.lang.Integer],
java.lang.Integer.valueOf(2) -> java.lang.Integer.valueOf(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, _2)", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, 0) + Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "cast(coalesce(cast(coalesce(_1, _2) as double), 0.0) as int)",
expectedNonNullableColumns = Seq.empty[String])
}
test("SPARK-17957: set nullability to false in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> java.lang.Integer.valueOf(3),
java.lang.Integer.valueOf(1) -> null.asInstanceOf[java.lang.Integer],
java.lang.Integer.valueOf(2) -> java.lang.Integer.valueOf(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "_1 + _2 * 3", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1 + _2", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1", expectedNonNullableColumns = Seq("_1"))
// `constructIsNotNullConstraints` infers the IsNotNull(_2) from IsNotNull(_2 + Rand())
// Thus, we are able to set nullability of _2 to false.
// If IsNotNull(_2) is not given from `constructIsNotNullConstraints`, the impl of
// isNullIntolerant in `FilterExec` needs an update for more advanced inference.
verifyNullabilityInFilterExec(df,
expr = "_2 + Rand()", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "_2 * 3 + coalesce(_1, 0)", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "cast((_1 + _2) as boolean)", expectedNonNullableColumns = Seq("_1", "_2"))
}
test("SPARK-17897: Fixed IsNotNull Constraint Inference Rule") {
val data = Seq[java.lang.Integer](1, null).toDF("key")
checkAnswer(data.filter(!$"key".isNotNull), Row(null))
checkAnswer(data.filter(!(- $"key").isNotNull), Row(null))
}
test("SPARK-17957: outer join + na.fill") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
val df1 = Seq((1, 2), (2, 3)).toDF("a", "b")
val df2 = Seq((2, 5), (3, 4)).toDF("a", "c")
val joinedDf = df1.join(df2, Seq("a"), "outer").na.fill(0)
val df3 = Seq((3, 1)).toDF("a", "d")
checkAnswer(joinedDf.join(df3, "a"), Row(3, 0, 4, 1))
}
}
test("SPARK-18070 binary operator should not consider nullability when comparing input types") {
val rows = Seq(Row(Seq(1), Seq(1)))
val schema = new StructType()
.add("array1", ArrayType(IntegerType))
.add("array2", ArrayType(IntegerType, containsNull = false))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
assert(df.filter($"array1" === $"array2").count() == 1)
}
test("SPARK-17913: compare long and string type column may return confusing result") {
val df = Seq(123L -> "123", 19157170390056973L -> "19157170390056971").toDF("i", "j")
checkAnswer(df.select($"i" === $"j"), Row(true) :: Row(false) :: Nil)
}
test("SPARK-19691 Calculating percentile of decimal column fails with ClassCastException") {
val df = spark.range(1).selectExpr("CAST(id as DECIMAL) as x").selectExpr("percentile(x, 0.5)")
checkAnswer(df, Row(BigDecimal(0)) :: Nil)
}
test("SPARK-20359: catalyst outer join optimization should not throw npe") {
val df1 = Seq("a", "b", "c").toDF("x")
.withColumn("y", udf{ (x: String) => x.substring(0, 1) + "!" }.apply($"x"))
val df2 = Seq("a", "b").toDF("x1")
df1
.join(df2, df1("x") === df2("x1"), "left_outer")
.filter($"x1".isNotNull || !$"y".isin("a!"))
.count
}
// The fix of SPARK-21720 avoid an exception regarding JVM code size limit
// TODO: When we make a threshold of splitting statements (1024) configurable,
// we will re-enable this with max threshold to cause an exception
// See https://github.com/apache/spark/pull/18972/files#r150223463
ignore("SPARK-19372: Filter can be executed w/o generated code due to JVM code size limit") {
val N = 400
val rows = Seq(Row.fromSeq(Seq.fill(N)("string")))
val schema = StructType(Seq.tabulate(N)(i => StructField(s"_c$i", StringType)))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
val filter = (0 until N)
.foldLeft(lit(false))((e, index) => e.or(df.col(df.columns(index)) =!= "string"))
withSQLConf(SQLConf.CODEGEN_FALLBACK.key -> "true") {
df.filter(filter).count()
}
withSQLConf(SQLConf.CODEGEN_FALLBACK.key -> "false") {
val e = intercept[SparkException] {
df.filter(filter).count()
}.getMessage
assert(e.contains("grows beyond 64 KiB"))
}
}
test("SPARK-20897: cached self-join should not fail") {
// force to plan sort merge join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0") {
val df = Seq(1 -> "a").toDF("i", "j")
val df1 = df.as("t1")
val df2 = df.as("t2")
assert(df1.join(df2, $"t1.i" === $"t2.i").cache().count() == 1)
}
}
test("order-by ordinal.") {
checkAnswer(
testData2.select(lit(7), 'a, 'b).orderBy(lit(1), lit(2), lit(3)),
Seq(Row(7, 1, 1), Row(7, 1, 2), Row(7, 2, 1), Row(7, 2, 2), Row(7, 3, 1), Row(7, 3, 2)))
}
test("SPARK-22271: mean overflows and returns null for some decimal variables") {
val d = 0.034567890
val df = Seq(d, d, d, d, d, d, d, d, d, d).toDF("DecimalCol")
val result = df.select('DecimalCol cast DecimalType(38, 33))
.select(col("DecimalCol")).describe()
val mean = result.select("DecimalCol").where($"summary" === "mean")
assert(mean.collect().toSet === Set(Row("0.0345678900000000000000000000000000000")))
}
test("SPARK-22520: support code generation for large CaseWhen") {
val N = 30
var expr1 = when($"id" === lit(0), 0)
var expr2 = when($"id" === lit(0), 10)
(1 to N).foreach { i =>
expr1 = expr1.when($"id" === lit(i), -i)
expr2 = expr2.when($"id" === lit(i + 10), i)
}
val df = spark.range(1).select(expr1, expr2.otherwise(0))
checkAnswer(df, Row(0, 10) :: Nil)
assert(df.queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
}
test("SPARK-24165: CaseWhen/If - nullability of nested types") {
val rows = new java.util.ArrayList[Row]()
rows.add(Row(true, ("x", 1), Seq("x", "y"), Map(0 -> "x")))
rows.add(Row(false, (null, 2), Seq(null, "z"), Map(0 -> null)))
val schema = StructType(Seq(
StructField("cond", BooleanType, true),
StructField("s", StructType(Seq(
StructField("val1", StringType, true),
StructField("val2", IntegerType, false)
)), false),
StructField("a", ArrayType(StringType, true)),
StructField("m", MapType(IntegerType, StringType, true))
))
val sourceDF = spark.createDataFrame(rows, schema)
def structWhenDF: DataFrame = sourceDF
.select(when('cond, struct(lit("a").as("val1"), lit(10).as("val2"))).otherwise('s) as "res")
.select('res.getField("val1"))
def arrayWhenDF: DataFrame = sourceDF
.select(when('cond, array(lit("a"), lit("b"))).otherwise('a) as "res")
.select('res.getItem(0))
def mapWhenDF: DataFrame = sourceDF
.select(when('cond, map(lit(0), lit("a"))).otherwise('m) as "res")
.select('res.getItem(0))
def structIfDF: DataFrame = sourceDF
.select(expr("if(cond, struct('a' as val1, 10 as val2), s)") as "res")
.select('res.getField("val1"))
def arrayIfDF: DataFrame = sourceDF
.select(expr("if(cond, array('a', 'b'), a)") as "res")
.select('res.getItem(0))
def mapIfDF: DataFrame = sourceDF
.select(expr("if(cond, map(0, 'a'), m)") as "res")
.select('res.getItem(0))
def checkResult(): Unit = {
checkAnswer(structWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(arrayWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(mapWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(structIfDF, Seq(Row("a"), Row(null)))
checkAnswer(arrayIfDF, Seq(Row("a"), Row(null)))
checkAnswer(mapIfDF, Seq(Row("a"), Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
checkResult()
// Test with cached relation, the Project will be evaluated with codegen
sourceDF.cache()
checkResult()
}
test("Uuid expressions should produce same results at retries in the same DataFrame") {
val df = spark.range(1).select($"id", new Column(Uuid()))
checkAnswer(df, df.collect())
}
test("SPARK-24313: access map with binary keys") {
val mapWithBinaryKey = map(lit(Array[Byte](1.toByte)), lit(1))
checkAnswer(spark.range(1).select(mapWithBinaryKey.getItem(Array[Byte](1.toByte))), Row(1))
}
test("SPARK-24781: Using a reference from Dataset in Filter/Sort") {
val df = Seq(("test1", 0), ("test2", 1)).toDF("name", "id")
val filter1 = df.select(df("name")).filter(df("id") === 0)
val filter2 = df.select(col("name")).filter(col("id") === 0)
checkAnswer(filter1, filter2.collect())
val sort1 = df.select(df("name")).orderBy(df("id"))
val sort2 = df.select(col("name")).orderBy(col("id"))
checkAnswer(sort1, sort2.collect())
}
test("SPARK-24781: Using a reference not in aggregation in Filter/Sort") {
withSQLConf(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key -> "false") {
val df = Seq(("test1", 0), ("test2", 1)).toDF("name", "id")
val aggPlusSort1 = df.groupBy(df("name")).agg(count(df("name"))).orderBy(df("name"))
val aggPlusSort2 = df.groupBy(col("name")).agg(count(col("name"))).orderBy(col("name"))
checkAnswer(aggPlusSort1, aggPlusSort2.collect())
val aggPlusFilter1 = df.groupBy(df("name")).agg(count(df("name"))).filter(df("name") === 0)
val aggPlusFilter2 = df.groupBy(col("name")).agg(count(col("name"))).filter(col("name") === 0)
checkAnswer(aggPlusFilter1, aggPlusFilter2.collect())
}
}
test("SPARK-25159: json schema inference should only trigger one job") {
withTempPath { path =>
// This test is to prove that the `JsonInferSchema` does not use `RDD#toLocalIterator` which
// triggers one Spark job per RDD partition.
Seq(1 -> "a", 2 -> "b").toDF("i", "p")
// The data set has 2 partitions, so Spark will write at least 2 json files.
// Use a non-splittable compression (gzip), to make sure the json scan RDD has at least 2
// partitions.
.write.partitionBy("p").option("compression", "gzip").json(path.getCanonicalPath)
var numJobs = 0
sparkContext.addSparkListener(new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
numJobs += 1
}
})
val df = spark.read.json(path.getCanonicalPath)
assert(df.columns === Array("i", "p"))
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
assert(numJobs == 1)
}
}
test("SPARK-25402 Null handling in BooleanSimplification") {
val schema = StructType.fromDDL("a boolean, b int")
val rows = Seq(Row(null, 1))
val rdd = sparkContext.parallelize(rows)
val df = spark.createDataFrame(rdd, schema)
checkAnswer(df.where("(NOT a) OR a"), Seq.empty)
}
test("SPARK-25714 Null handling in BooleanSimplification") {
withSQLConf(SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> ConvertToLocalRelation.ruleName) {
val df = Seq(("abc", 1), (null, 3)).toDF("col1", "col2")
checkAnswer(
df.filter("col1 = 'abc' OR (col1 != 'abc' AND col2 == 3)"),
Row ("abc", 1))
}
}
test("SPARK-25816 ResolveReferences works with nested extractors") {
val df = Seq((1, Map(1 -> "a")), (2, Map(2 -> "b"))).toDF("key", "map")
val swappedDf = df.select($"key".as("map"), $"map".as("key"))
checkAnswer(swappedDf.filter($"key"($"map") > "a"), Row(2, Map(2 -> "b")))
}
test("SPARK-26057: attribute deduplication on already analyzed plans") {
withTempView("a", "b", "v") {
val df1 = Seq(("1-1", 6)).toDF("id", "n")
df1.createOrReplaceTempView("a")
val df3 = Seq("1-1").toDF("id")
df3.createOrReplaceTempView("b")
spark.sql(
"""
|SELECT a.id, n as m
|FROM a
|WHERE EXISTS(
| SELECT 1
| FROM b
| WHERE b.id = a.id)
""".stripMargin).createOrReplaceTempView("v")
val res = spark.sql(
"""
|SELECT a.id, n, m
| FROM a
| LEFT OUTER JOIN v ON v.id = a.id
""".stripMargin)
checkAnswer(res, Row("1-1", 6, 6))
}
}
test("SPARK-27671: Fix analysis exception when casting null in nested field in struct") {
val df = sql("SELECT * FROM VALUES (('a', (10, null))), (('b', (10, 50))), " +
"(('c', null)) AS tab(x, y)")
checkAnswer(df, Row("a", Row(10, null)) :: Row("b", Row(10, 50)) :: Row("c", null) :: Nil)
val cast = sql("SELECT cast(struct(1, null) AS struct<a:int,b:int>)")
checkAnswer(cast, Row(Row(1, null)) :: Nil)
}
test("SPARK-27439: Explain result should match collected result after view change") {
withTempView("test", "test2", "tmp") {
spark.range(10).createOrReplaceTempView("test")
spark.range(5).createOrReplaceTempView("test2")
spark.sql("select * from test").createOrReplaceTempView("tmp")
val df = spark.sql("select * from tmp")
spark.sql("select * from test2").createOrReplaceTempView("tmp")
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
df.explain(extended = true)
}
checkAnswer(df, spark.range(10).toDF)
val output = captured.toString
assert(output.contains(
"""== Parsed Logical Plan ==
|'Project [*]
|+- 'UnresolvedRelation [tmp]""".stripMargin))
assert(output.contains(
"""== Physical Plan ==
|*(1) Range (0, 10, step=1, splits=2)""".stripMargin))
}
}
}
| actuaryzhang/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala | Scala | apache-2.0 | 83,331 |
package at.forsyte.apalache.tla.lir
import java.util.concurrent.atomic.AtomicLong
/**
* This is a basic class for keeping expression identifiers. The most important feature is the method unique
* in the companion object, which allows us to assign unique identifiers to different expressions.
*
* @param id the value of the identifier.
*/
class UID protected(val id: Long) extends Serializable {
override def hashCode(): Int = id.hashCode()
def canEqual(other: Any): Boolean = other.isInstanceOf[UID]
override def equals(other: Any): Boolean = other match {
case that: UID =>
id == that.id
case _ => false
}
override def toString: String = id.toString
}
object UID {
/**
* The value of the id that will be assigned by the next call to unique(). We start with 1, to omit the default value 0.
* By using AtomicLong, we make sure that unique() is assigning unique identifiers in the concurrent setting.
*/
private var nextId: AtomicLong = new AtomicLong(1)
// TODO: remove this method in the future, as it allows one to work around uniqueness
def apply(id: Long) = new UID(id)
/**
* Create a unique identifier, provided that all identifiers have been created only by calling this method.
* This method is thread-safe.
*
* @return a new unique identifier
*/
def unique: UID = {
val newId = nextId.getAndAdd(1)
if (newId == Long.MaxValue) {
throw new IllegalStateException("Too many identifiers, change the underlying representation of UID.")
}
new UID(newId)
}
}
trait Identifiable extends Ordered[Identifiable] {
val ID : UID = UID.unique
override def compare(that: Identifiable): Int = ID.id.compareTo(that.ID.id)
} | konnov/apalache | tlair/src/main/scala/at/forsyte/apalache/tla/lir/identifiers.scala | Scala | apache-2.0 | 1,731 |
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.project.events
import com.esofthead.mycollab.eventmanager.ApplicationEvent
/**
* @author MyCollab Ltd.
* @since 5.0.3
*/
object TaskEvent {
class Search(source: AnyRef, data: AnyRef) extends ApplicationEvent(source, data) {}
class GotoAdd(source: AnyRef, data: AnyRef) extends ApplicationEvent(source, data) {}
class GotoEdit(source: AnyRef, data: AnyRef) extends ApplicationEvent(source, data) {}
class GotoRead(source: AnyRef, data: AnyRef) extends ApplicationEvent(source, data) {}
class GotoGanttChart(source: AnyRef, data: AnyRef) extends ApplicationEvent(source, data) {}
}
| uniteddiversity/mycollab | mycollab-web/src/main/scala/com/esofthead/mycollab/module/project/events/TaskEvent.scala | Scala | agpl-3.0 | 1,334 |
import java.net.URLEncoder
import cats.instances.future._
import cats.syntax.functor._
import com.bot4s.telegram.Implicits._
import com.bot4s.telegram.api.declarative._
import com.bot4s.telegram.api.ChatActions
import com.bot4s.telegram.future.Polling
import com.bot4s.telegram.methods._
import com.bot4s.telegram.models._
import scala.concurrent.Future
/**
* Text-to-speech bot (using Google TTS API)
*
* Google will rightfully block your IP in case of abuse.
* '''Usage:''' /speak Hello World
* '''Inline mode:''' @YourBot This is awesome
*/
class TextToSpeechBot(token: String)
extends ExampleBot(token)
with Polling
with Commands[Future]
with InlineQueries[Future]
with ChatActions[Future] {
def ttsUrl(text: String): String =
s"http://translate.google.com/translate_tts?client=tw-ob&tl=en-us&q=${URLEncoder.encode(text, "UTF-8")}"
onCommand("speak" | "say" | "talk") { implicit msg =>
withArgs { args =>
val text = args.mkString(" ")
for {
r <- Future(scalaj.http.Http(ttsUrl(text)).asBytes)
if r.isSuccess
bytes = r.body
_ <- uploadingAudio // hint the user
voiceMp3 = InputFile("voice.mp3", bytes)
_ <- request(SendVoice(msg.source, voiceMp3))
} yield ()
}
}
def nonEmptyQuery(iq: InlineQuery): Boolean = iq.query.nonEmpty
whenOrElse(onInlineQuery, nonEmptyQuery) { implicit iq =>
answerInlineQuery(
Seq(
// Inline "playable" preview
InlineQueryResultVoice("inline: " + iq.query, ttsUrl(iq.query), iq.query),
// Redirection to /speak command
InlineQueryResultArticle(
"command: " + iq.query,
iq.query,
inputMessageContent = InputTextMessageContent("/speak " + iq.query),
description = "/speak " + iq.query
)
)
).void
} /* empty query */ {
answerInlineQuery(Seq())(_).void
}
}
| mukel/telegrambot4s | examples/src-jvm/TextToSpeechBot.scala | Scala | apache-2.0 | 1,927 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.view
import javax.swing.border.TitledBorder
import javax.swing.{BorderFactory, SwingConstants}
import scalismo.ui.control.SlicingPosition
import scalismo.ui.event.ScalismoPublisher
import scalismo.ui.model.{Axis, BoundingBox, Scene}
import scalismo.ui.rendering.{RendererPanel, RendererState}
import scalismo.ui.resources.icons.BundledIcon
import scalismo.ui.util.FileIoMetadata
import scalismo.ui.view.action.SaveAction
import scalismo.ui.view.util.{AxisColor, ScalableUI}
import scala.swing._
import scala.swing.event.{Event, ValueChanged}
object ViewportPanel {
object event {
case class BoundingBoxChanged(source: ViewportPanel) extends Event
case class Detached(source: ViewportPanel) extends Event
}
}
sealed abstract class ViewportPanel(val frame: ScalismoFrame) extends BorderPanel with ScalismoPublisher {
def name: String
def scene: Scene = frame.scene
val rendererPanel = new RendererPanel(this)
protected val toolBar: ToolBar = new ToolBar()
toolBar.peer.setFloatable(false)
toolBar.peer.setRollover(true)
toolBar.peer.setOrientation(java.awt.Adjustable.HORIZONTAL)
def setupToolBar(): Unit = {
toolBar.contents += new Button(new Action(null) {
override def apply(): Unit = rendererPanel.resetCamera()
}) {
tooltip = "Reset Camera"
icon = BundledIcon.Reset.standardSized()
}
toolBar.contents += new Button(new Action(null) {
override def apply(): Unit = {
new SaveAction(rendererPanel.screenshot, FileIoMetadata.Png, "Save screenshot")(frame).apply()
}
}) {
tooltip = "Screenshot"
icon = BundledIcon.Screenshot.standardSized()
}
}
def setupLayout(): Unit = {
layout(toolBar) = BorderPanel.Position.North
layout(rendererPanel) = BorderPanel.Position.Center
}
def setAttached(attached: Boolean): Unit = {
if (!attached) {
publishEvent(ViewportPanel.event.Detached(this))
}
rendererPanel.setAttached(attached)
}
def currentBoundingBox: BoundingBox = rendererPanel.currentBoundingBox
def rendererState: RendererState = rendererPanel.rendererState
override def toString: String = name
// constructor
border = new TitledBorder(name)
setupToolBar()
setupLayout()
}
class ViewportPanel3D(frame: ScalismoFrame, override val name: String = "3D") extends ViewportPanel(frame) {
override def setupToolBar(): Unit = {
super.setupToolBar()
List(Axis.X, Axis.Y, Axis.Z).foreach { axis =>
val button = new Button(new Action(axis.toString) {
override def apply(): Unit = {
rendererPanel.setCameraToAxis(axis)
}
}) {
foreground = AxisColor.forAxis(axis).darker()
}
toolBar.contents += button
}
}
}
class ViewportPanel2D(frame: ScalismoFrame, val axis: Axis) extends ViewportPanel(frame) {
override def name: String = axis.toString
private lazy val positionSlider = new Slider {
peer.setOrientation(SwingConstants.VERTICAL)
}
lazy val positionPlusButton = new Button(new Action("+") {
override def apply(): Unit = {
if (positionSlider.value < positionSlider.max) {
positionSlider.value = positionSlider.value + 1
}
}
})
lazy val positionMinusButton = new Button(new Action("-") {
override def apply(): Unit = {
if (positionSlider.value > positionSlider.min) {
positionSlider.value = positionSlider.value - 1
}
}
})
private lazy val sliderPanel = new BorderPanel {
layout(positionPlusButton) = BorderPanel.Position.North
layout(positionSlider) = BorderPanel.Position.Center
layout(positionMinusButton) = BorderPanel.Position.South
}
override def setupLayout(): Unit = {
super.setupLayout()
layout(sliderPanel) = BorderPanel.Position.East
}
// constructor
border match {
case titled: TitledBorder => titled.setTitleColor(AxisColor.forAxis(axis).darker())
case _ => // unexpected, can't handle
}
rendererPanel.border = BorderFactory.createLineBorder(AxisColor.forAxis(axis), ScalableUI.scale(3))
listenTo(frame.sceneControl.slicingPosition, positionSlider)
def updateSliderValue(p: scalismo.geometry.Point3D): Unit = {
val v = axis match {
case Axis.X => p.x
case Axis.Y => p.y
case Axis.Z => p.z
}
deafTo(positionSlider)
positionSlider.value = Math.round(v).toInt
listenTo(positionSlider)
}
def updateSliderMinMax(b: BoundingBox): Unit = {
val (min, max) = axis match {
case Axis.X => (b.xMin, b.xMax)
case Axis.Y => (b.yMin, b.yMax)
case Axis.Z => (b.zMin, b.zMax)
}
deafTo(positionSlider)
positionSlider.min = Math.floor(min).toInt
positionSlider.max = Math.ceil(max).toInt
listenTo(positionSlider)
}
def sliderValueChanged(): Unit = {
val pos = frame.sceneControl.slicingPosition
axis match {
case Axis.X => pos.x = positionSlider.value.toFloat
case Axis.Y => pos.y = positionSlider.value.toFloat
case Axis.Z => pos.z = positionSlider.value.toFloat
}
}
reactions += {
case SlicingPosition.event.PointChanged(_, _, current) => updateSliderValue(current)
case SlicingPosition.event.BoundingBoxChanged(s) => updateSliderMinMax(s.boundingBox)
case SlicingPosition.event.PerspectiveChanged(s) =>
updateSliderMinMax(s.boundingBox)
updateSliderValue(s.point)
case ValueChanged(s) if s eq positionSlider => sliderValueChanged()
}
}
| unibas-gravis/scalismo-ui | src/main/scala-2.13+/scalismo/ui/view/ViewportPanel.scala | Scala | gpl-3.0 | 6,237 |
package scala.meta
package internal
package prettyprinters
import org.scalameta.show.{Show, enquote, SingleQuotes, DoubleQuotes, TripleQuotes}
import org.scalameta.adt._
import org.scalameta.unreachable
import Show.{ sequence => s, repeat => r, indent => i, newline => n }
import scala.compat.Platform.EOL
import scala.annotation.implicitNotFound
import scala.collection.mutable.StringBuilder
import scala.Console._
import scala.meta.prettyprinters._
@root trait PositionStyle
object PositionStyle {
@leaf object BlackAndWhite extends PositionStyle
@leaf object Colorful extends PositionStyle
implicit val default: PositionStyle = BlackAndWhite
}
@implicitNotFound(msg = "don't know how to show[Positions] for ${T} (if you're prettyprinting a tree, be sure to import a dialect, e.g. scala.meta.dialects.Scala211)")
trait Positions[T] extends Show[T]
object Positions {
def apply[T](f: T => Show.Result): Positions[T] = new Positions[T] { def apply(input: T) = f(input) }
implicit val Colorful: PositionStyle = PositionStyle.Colorful
implicit def positionsTree[T <: Tree : Syntax](implicit style: PositionStyle): Positions[T] = Positions { x =>
def loopTree(x: Tree): Show.Result = {
implicit class XtensionString(s: String) {
def colored(color: String) = if (style == PositionStyle.Colorful) (color + s + RESET) else s
}
def loopField(x: Any, color: String): Show.Result = x match {
case el: String => s(enquote(el, DoubleQuotes).colored(color))
case el: Tree => loopTree(el)
case el: Nil.type => s("Nil".colored(color))
case el @ List(List()) => s("List(List())".colored(color))
case el: ::[_] => s("List(".colored(color), r(el.map(el => loopField(el, color)), ", ".colored(color)), ")".colored(color))
case el: None.type => s("None".colored(color))
case el: Some[_] => s("Some(".colored(color), loopField(el.get, color), ")".colored(color))
case el => s(el.toString.colored(color))
}
def position(x: Tree): String = {
if (x.tokens.isAuthentic) s"[${x.start.offset}..${x.end.offset}]" else ""
}
def color(x: Tree): String = if (x.tokens.isAuthentic) GREEN else RED
val prefix = (x.productPrefix + position(x) + "(").colored(color(x))
val fields = r(x.productIterator.toList.map(el => loopField(el, color(x))), ", ".colored(color(x)))
val suffix = ")".colored(color(x))
s(prefix, fields, suffix)
}
loopTree(x)
}
}
| beni55/scalameta | scalameta/trees/src/main/scala/scala/meta/internal/prettyprinters/TreePositions.scala | Scala | bsd-3-clause | 2,490 |
package net.francesbagual.tdc.monads.in.practice.main
import java.sql.ResultSet
import java.sql.DriverManager
import java.sql.Connection
import java.sql.Statement
object JdbcTemplate {
def withStatement[T](user: String, password: String)(sql: String, f: ResultSet => T): T = {
var connection: Connection = null
try {
connection = DriverManager.getConnection("jdbc:myDriver:myDatabase", user, password)
val statement: Statement = connection.createStatement();
val resultSet: ResultSet = statement.executeQuery(sql)
f(resultSet)
} finally {
if (connection != null) connection.close()
}
}
}
| toff63/monads-in-practice-tdc | src/main/scala/net/francesbagual/tdc/monads/in/practice/main/JdbcTemplate.scala | Scala | unlicense | 642 |
// Wei Chen - Extreme Learning
// 2020-03-08
package com.scalaml.algorithm
import com.scalaml.general.MatrixFunc._
class ExtremeLearning(val neuronNumber: Int, val featureNumber: Int, val outputNumber: Int) {
var wIn = matrixrandom(featureNumber, neuronNumber, -1, 1)
var wOut = matrixrandom(neuronNumber, outputNumber, -1, 1)
def clear() = {
wIn = matrixrandom(featureNumber, neuronNumber, -1, 1)
wOut = matrixrandom(neuronNumber, outputNumber, -1, 1)
}
clear()
private def reluLayer(x: Array[Array[Double]]): Array[Array[Double]] = {
matrixdot(x, wIn).map(arr => arr.map(v => math.max(0, v)))
}
def train(x: Array[Array[Double]], y: Array[Array[Double]]) {
val outX = reluLayer(x)
val tranX = outX.transpose
wOut = matrixdot(inverse(matrixdot(tranX, outX)), matrixdot(tranX, y))
}
def predict(x: Array[Array[Double]]): Array[Array[Double]] = {
val outX = reluLayer(x)
matrixdot(outX, wOut)
}
}
| Wei-1/Scala-Machine-Learning | src/main/scala/algorithm/classification/ExtremeLearning.scala | Scala | mit | 1,010 |
package depthblur
import scalafx.application.JFXApp
import scalafx.application.JFXApp.PrimaryStage
import scalafx.geometry.Insets
import scalafx.Includes._
import scalafx.scene.Scene
import scalafx.scene.control.Label
import scalafx.scene.layout.BorderPane
import scalafx.scene.image.{Image, ImageView}
import scalafx.scene.control.{Button, RadioButton, ToggleGroup, Slider}
import scalafx.scene.input.MouseEvent
import scalafx.scene.layout.{VBox, HBox}
import scalafx.scene.input.MouseEvent
import scalafx.stage.FileChooser
import scalafx.stage.FileChooser.ExtensionFilter
import javafx.scene.control.{RadioButton => JfxRadioBtn}
import scalafx.event.ActionEvent
import depthblur.DepthBlurAlg
import depthblur.FilterType._
import java.awt.image.BufferedImage
import java.io.File
import javafx.embed.swing.SwingFXUtils
import javax.imageio.ImageIO
object DepthBlur extends JFXApp {
val stageWidth = 800
val stageHeight = 600
stage = new PrimaryStage {
width = stageWidth
height = stageHeight
resizable = false
title = "DepthBlur"
var img = new Image("file:resources/placeholder.png")
var dpt = new Image("file:resources/placeholder.png")
var showDepth = false
val fileChooser = new FileChooser {
extensionFilters ++= Seq(
new ExtensionFilter("Image Files", Seq("*.png", "*.jpg"))
)
initialFileName = "image.png"
}
scene = new Scene{
val defaultInfoMessage = "Click image to apply filter"
val defaultDepthSigma = 60
val defaultSpatialSigma = 60
val mapToggle = new Button("Show depth map")
val reset = new Button("Reset")
val save = new Button("Save image")
val load = new Button("Load image")
val loadDepth = new Button("Load depth map")
loadDepth.disable = true
mapToggle.disable = true
val filterGroup = new ToggleGroup()
val rbBoxFilter = new RadioButton("Box filter")
val rbBilateralFilter = new RadioButton("Bilateral filter")
val rbNegation = new RadioButton("Negation")
rbBoxFilter.setToggleGroup(filterGroup)
rbBilateralFilter.setToggleGroup(filterGroup)
rbNegation.setToggleGroup(filterGroup)
rbNegation.setSelected(true)
rbBoxFilter.disable = true
rbBilateralFilter.disable = true
val spatialSigma = new Slider(1, 255, defaultSpatialSigma)
val depthSigma = new Slider(1, 255, defaultDepthSigma)
val spatialSigmaL = new Label(s"Spatial sigma: $defaultSpatialSigma")
val depthSigmaL = new Label(s"Depth sigma: $defaultDepthSigma")
val buttons = new HBox(5.0, reset, save, load, loadDepth, mapToggle)
val info = new Label(defaultInfoMessage)
val display = new ImageView(img)
display.fitWidth = stageWidth.toDouble * 0.8
display.fitHeight = stageHeight.toDouble * 0.8
display.preserveRatio = true
val displayControl = new VBox(5.0, display, info, buttons)
val bilateralControl = new VBox(3.0, rbBilateralFilter,
spatialSigmaL, spatialSigma,
depthSigmaL, depthSigma)
val filterSelect = new VBox(5.0, rbNegation,
rbBoxFilter,
bilateralControl)
def resetScene {
showDepth = false
display.image = img
mapToggle.text = "Show depth map"
info.text =defaultInfoMessage
}
def loadImage : Option[(Image, String)] = {
val file = fileChooser.showOpenDialog(stage)
if(file != null) {
return Option((new Image(s"file:$file"), file.toString))
}
return Option(null)
}
spatialSigma.valueProperty.addListener {
(o: javafx.beans.value.ObservableValue[_ <: Number], oldVal: Number, newVal: Number) =>
val s = newVal.doubleValue
spatialSigmaL.text = f"Spatial sigma: $s%.2f"
}
depthSigma.valueProperty.addListener {
(o: javafx.beans.value.ObservableValue[_ <: Number], oldVal: Number, newVal: Number) =>
val s = newVal.doubleValue
depthSigmaL.text = f"Depth sigma: $s%.2f"
}
mapToggle.onAction = handle{
if(showDepth == true) {
mapToggle.text = "Show depth map"
display.image = img
showDepth = false
}
else {
mapToggle.text = "Show original image"
display.image = dpt
showDepth = true
}
}
reset.onAction = handle {
resetScene
}
save.onAction = handle {
val imageFile = fileChooser.showSaveDialog(stage)
if(imageFile != null) {
val bImage = SwingFXUtils.fromFXImage(display.image.value, null)
ImageIO.write(bImage, "png", imageFile)
println(s"saved in: $imageFile")
info.text = s"Picture saved in: $imageFile"
}
}
load.onAction = handle {
loadImage match {
case Some(image) => {
img = image._1
val filename = image._2
loadDepth.disable = false
println("loaded new image")
resetScene
info.text = s"Loaded image from: $filename"
rbBilateralFilter.disable = true
rbBoxFilter.disable = true
rbNegation.setSelected(true)
mapToggle.disable = true
}
case None => println("no image specified")
}
}
loadDepth.onAction = handle {
loadImage match{
case Some(image) => {
val w = image._1.width.toInt
val h = image._1.height.toInt
if(h == img.height.toInt && w == img.width.toInt) {
dpt = image._1
val filename = image._2
println("loaded new depth map")
info.text = s"Loaded depth map from: $filename"
rbBilateralFilter.disable = false
rbBoxFilter.disable = false
mapToggle.disable = false
}
else {
info.text = "Image and depth map size does not match"
println("no matching image/depth map")
}
}
case None => println("no depthmap specified")
}
}
display.onMouseClicked = (event: MouseEvent) => {
//we need to scale coordinates back to original image size
val bounds = display.layoutBounds()
val scaleX = bounds.width.toDouble / img.width.toDouble
val scaleY = bounds.height.toDouble / img.height.toDouble
val x = (event.sceneX / scaleX).toInt
val y = (event.sceneY / scaleY).toInt
//cast to java.scene.control.RadioButton
//since I need .getText() method
val filterBtn = filterGroup.selectedToggle().asInstanceOf[JfxRadioBtn]
val filterName = filterBtn.getText
filterName match {
case "Box filter" => {
display.image = DepthBlurAlg.blurFilter(x, y,
img, dpt, BoxFilter)
}
case "Bilateral filter" => {
display.image = DepthBlurAlg.blurFilter(x, y,
img, dpt, BilateralFilter,
spatialSigma.value.toDouble, depthSigma.value.toDouble)
}
case "Negation" => {
display.image = DepthBlurAlg.negation(img)
}
}
info.text = s"Applying $filterName at [$x, $y]."
}
content = new HBox(5.0, displayControl, filterSelect)
}
}
} | ggljzr/mi-psl | src/main/scala/depthblur/DepthBlur.scala | Scala | mit | 7,527 |
package com.twitter.finagle.filter
import com.twitter.util.{Monitor, Future}
import com.twitter.finagle.{SimpleFilter, Service}
/*
* A filter that handles all exceptions (incl. raw) subsequent of
* this request to the given monitor.
*/
class MonitorFilter[Req, Rep](monitor: Monitor)
extends SimpleFilter[Req, Rep]
{
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] =
Future.monitored {
service(request)
} onFailure { exc =>
monitor.handle(exc)
}
}
| firebase/finagle | finagle-core/src/main/scala/com/twitter/finagle/filter/MonitorFilter.scala | Scala | apache-2.0 | 500 |
package net.liftmodules.extras
import net.liftweb._
import common._
import http._
import http.js._
import JsCmds._
import JE._
import json._
import JsExtras.IIFE
/**
* Contains Scala JsExps for Angular behaviors.
*
* These functions are meant to be combined using the ~> operator. For
* example:
*
* <pre>NgModule("LiftServer", Nil) ~> NgFactory(name, AnonFunc(...))</pre>
*
*/
object NgJE {
/**
* Calls angular.element function with the parameter in query.
*
* Used to get a set of elements to apply the other NgJE expressions to.
*
*/
case class NgElement(query: JsExp) extends JsExp {
override def toJsCmd = "angular.element(" + query.toJsCmd + ")"
}
/**
* An Angular query for an element based on the id of the element
*/
case class NgId(id: JsExp) extends JsExp {
override def toJsCmd = "angular.element('#'+" + id.toJsCmd + ")"
}
case class NgModule(name: String, dependencies: Seq[String]) extends JsExp {
def toJsCmd = {
"angular.module('%s', [%s])".format(name, dependencies.map(s => "'%s'".format(s)).mkString(", "))
}
}
case class NgFactory(name: String, func: AnonFunc) extends JsExp with JsMember {
def toJsCmd = {
"factory('%s', %s)".format(name, func.toJsCmd)
}
}
case class NgService(name: String, func: AnonFunc) extends JsExp with JsMember {
def toJsCmd = {
"service('%s', %s)".format(name, func.toJsCmd)
}
}
case class NgProvider(name: String, func: AnonFunc) extends JsExp with JsMember {
def toJsCmd = {
"provider('%s', %s)".format(name, func.toJsCmd)
}
}
case class NgConstant(name: String, value: JsExp) extends JsExp with JsMember {
def toJsCmd = {
"constant('%s', %s)".format(name, value.toJsCmd)
}
}
case class NgValue(name: String, value: JsExp) extends JsExp with JsMember {
def toJsCmd = {
"value('%s', %s)".format(name, value.toJsCmd)
}
}
case class NgConfig(func: AnonFunc) extends JsExp with JsMember {
def toJsCmd = {
"config(%s)".format(func.toJsCmd)
}
}
}
object NgJsCmds {
private implicit def boxedJValueToJsExp(in: Box[JValue]): JsExp = in.map(jv => new JsExp {
def toJsCmd = compactRender(jv)
}).openOr(JsNull)
/**
* Call `$scope.$apply` on the passed elementId
*/
case class NgApply(elementId: String, cmd: JsCmd) extends JsCmd {
def toJsCmd = WithScopeVar(elementId, Call("scope.$apply", AnonFunc(cmd))).toJsCmd
}
/**
* Call `$scope.$broadcast` on the passed elementId
*/
case class NgBroadcast(elementId: String, event: String, json: Box[JValue] = Empty) extends JsCmd {
def toJsCmd = WithScopeVar(elementId, Call("scope.$broadcast", event, json)).toJsCmd
}
/**
* Set a local variable to the scope of an elementId and execute the cmd. All of which is enclosed in an IIFE.
*/
case class WithScopeVar(elementId: String, cmd: JsCmd) extends JsCmd {
def toJsCmd = IIFE(
JsCrVar("scope", Call("angular.element('#%s').scope".format(elementId))) &
cmd
).toJsCmd
}
}
| eltimn/lift-extras | src/main/scala/net/liftmodules/extras/NgJsCmds.scala | Scala | apache-2.0 | 3,103 |
/*******************************************************************************
Copyright (c) 2013, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.Tizen
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr}
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.models.AbsInternalFunc
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.models.builtin.BuiltinDate
object TIZENContactAnniversary extends Tizen {
private val name = "ContactAnniversary"
/* predefined locations */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
/* constructor or object*/
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T))),
("@scope", AbsConstValue(PropValue(Value(NullTop)))),
("@construct", AbsInternalFunc("tizen.ContactAnniversary.constructor")),
("@hasinstance", AbsConstValue(PropValue(Value(NullTop)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prototype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("CallbackObject")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue)))
)
override def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_cons, prop_cons), (loc_proto, prop_proto)
)
override def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("tizen.ContactAnniversary.constructor" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
val v_1 = getArgValue(h, ctx, args, "0")
val n_arglen = Operator.ToUInt32(getArgValue(h, ctx, args, "length"))
val (b_1, es) = TizenHelper.instanceOf(h, v_1, Value(BuiltinDate.ProtoLoc))
val es_1 =
if (b_1._1._3 <= F) Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_new = ObjEmpty.
update("@class", PropValue(AbsString.alpha("Object"))).
update("@proto", PropValue(ObjectValue(Value(TIZENContactAnniversary.loc_proto), F, F, F))).
update("@extensible", PropValue(T)).
update("date", PropValue(ObjectValue(Value(v_1._2), T, T, T)))
val (h_2, es_2) = n_arglen match {
case UIntSingle(n) if n == 1 =>
val o_new2 = o_new.update("label", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val h_2 = lset_this.foldLeft(h)((_h, l) => _h.update(l, o_new2))
(h_2, TizenHelper.TizenExceptionBot)
case UIntSingle(n) if n == 2 =>
val v_2 = getArgValue(h, ctx, args, "1")
val o_new2 = o_new.update("label", PropValue(ObjectValue(Value(Helper.toString(v_2._1)), T, T, T)))
val h_2 = lset_this.foldLeft(h)((_h, l) => _h.update(l, o_new2))
(h_2, TizenHelper.TizenExceptionBot)
case _ => {
(h, TizenHelper.TizenExceptionBot)
}
}
val (h_e, ctx_e) = TizenHelper.TizenRaiseException(h, ctx, es ++ es_1 ++ es_2)
((Helper.ReturnStore(h_2, Value(lset_this)), ctx), (he + h_e, ctxe + ctx_e))
}
))
)
}
override def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
override def getDefMap(): Map[String, AccessFun] = {
Map()
}
override def getUseMap(): Map[String, AccessFun] = {
Map()
}
} | daejunpark/jsaf | src/kr/ac/kaist/jsaf/analysis/typing/models/Tizen/TIZENContactAnniversary.scala | Scala | bsd-3-clause | 4,502 |
package views.html
package tournament
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import controllers.routes
object faq {
import trans.arena._
def page(implicit ctx: Context) =
views.html.base.layout(
title = trans.tournamentFAQ.txt(),
moreCss = cssTag("page")
) {
main(cls := "page-small box box-pad page")(
h1(
a(href := routes.Tournament.home, dataIcon := "", cls := "text"),
trans.tournamentFAQ()
),
div(cls := "body")(apply())
)
}
def apply(rated: Option[Boolean] = None, privateId: Option[String] = None)(implicit ctx: Context) =
frag(
privateId.map { id =>
frag(
h2(trans.arena.thisIsPrivate()),
p(trans.arena.shareUrl(s"$netBaseUrl${routes.Tournament.show(id)}")) // XXX
)
},
p(trans.arena.willBeNotified()),
h2(trans.arena.isItRated()),
rated match {
case Some(true) => p(trans.arena.isRated())
case Some(false) => p(trans.arena.isNotRated())
case None => p(trans.arena.someRated())
},
h2(howAreScoresCalculated()),
p(howAreScoresCalculatedAnswer()),
h2(berserk()),
p(berserkAnswer()),
h2(howIsTheWinnerDecided()),
p(howIsTheWinnerDecidedAnswer()),
h2(howDoesPairingWork()),
p(howDoesPairingWorkAnswer()),
h2(howDoesItEnd()),
p(howDoesItEndAnswer()),
h2(otherRules()),
p(thereIsACountdown()),
p(drawingWithinNbMoves.pluralSame(10)),
p(drawStreak(30))
)
}
| luanlv/lila | app/views/tournament/faq.scala | Scala | mit | 1,614 |
/*
* Copyright 2015 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.couchdb.api
import com.ibm.couchdb.api.builders.{ListQueryBuilder, ShowQueryBuilder, ViewQueryBuilder}
import com.ibm.couchdb.core.Client
import scalaz.Scalaz._
class Query(client: Client, db: String) {
def view[K, V](design: String, view: String)
(implicit
kr: upickle.Reader[K],
kw: upickle.Writer[K],
vr: upickle.Reader[V]): Option[ViewQueryBuilder[K, V]] = {
if (design.isEmpty || view.isEmpty) none[ViewQueryBuilder[K, V]]
else ViewQueryBuilder[K, V](client, db, design, view).some
}
def show(design: String, show: String): Option[ShowQueryBuilder] = {
if (design.isEmpty || show.isEmpty) none[ShowQueryBuilder]
else ShowQueryBuilder(client, db, design, show).some
}
def list(design: String, list: String): Option[ListQueryBuilder] = {
if (design.isEmpty || list.isEmpty) none[ListQueryBuilder]
else ListQueryBuilder(client, db, design, list).some
}
}
| KimStebel/couchdb-scala | src/main/scala/com/ibm/couchdb/api/Query.scala | Scala | apache-2.0 | 1,581 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
/**
* Some elements of this were copied from:
*
* https://gist.github.com/casualjim/1819496
*/
package play.it.http.websocket
import java.net.URI
import java.util.concurrent.atomic.AtomicBoolean
import akka.stream.scaladsl._
import akka.stream.stage._
import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
import akka.util.ByteString
import com.typesafe.netty.{ HandlerPublisher, HandlerSubscriber }
import io.netty.bootstrap.Bootstrap
import io.netty.buffer.{ ByteBufHolder, Unpooled }
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.handler.codec.http._
import io.netty.handler.codec.http.websocketx._
import io.netty.util.ReferenceCountUtil
import play.api.http.websocket._
import play.it.http.websocket.WebSocketClient.ExtendedMessage
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ Future, Promise }
import scala.language.implicitConversions
/**
* A basic WebSocketClient. Basically wraps Netty's WebSocket support into something that's much easier to use and much
* more Scala friendly.
*/
trait WebSocketClient {
/**
* Connect to the given URI.
*
* @return A future that will be redeemed when the connection is closed.
*/
def connect(url: URI, version: WebSocketVersion = WebSocketVersion.V13)(onConnect: Flow[ExtendedMessage, ExtendedMessage, _] => Unit): Future[_]
/**
* Shutdown the client and release all associated resources.
*/
def shutdown()
}
object WebSocketClient {
trait ExtendedMessage {
def finalFragment: Boolean
}
object ExtendedMessage {
implicit def messageToExtendedMessage(message: Message): ExtendedMessage =
SimpleMessage(message, finalFragment = true)
}
case class SimpleMessage(message: Message, finalFragment: Boolean) extends ExtendedMessage
case class ContinuationMessage(data: ByteString, finalFragment: Boolean) extends ExtendedMessage
def create(): WebSocketClient = new DefaultWebSocketClient
def apply[T](block: WebSocketClient => T) = {
val client = WebSocketClient.create()
try {
block(client)
} finally {
client.shutdown()
}
}
private implicit class ToFuture(chf: ChannelFuture) {
def toScala: Future[Channel] = {
val promise = Promise[Channel]()
chf.addListener(new ChannelFutureListener {
def operationComplete(future: ChannelFuture) = {
if (future.isSuccess) {
promise.success(future.channel())
} else if (future.isCancelled) {
promise.failure(new RuntimeException("Future cancelled"))
} else {
promise.failure(future.cause())
}
}
})
promise.future
}
}
private class DefaultWebSocketClient extends WebSocketClient {
val eventLoop = new NioEventLoopGroup()
val client = new Bootstrap()
.group(eventLoop)
.channel(classOf[NioSocketChannel])
.option(ChannelOption.AUTO_READ, java.lang.Boolean.FALSE)
.handler(new ChannelInitializer[SocketChannel] {
def initChannel(ch: SocketChannel) = {
ch.pipeline().addLast(new HttpClientCodec, new HttpObjectAggregator(8192))
}
})
/**
* Connect to the given URI
*/
def connect(url: URI, version: WebSocketVersion)(onConnected: (Flow[ExtendedMessage, ExtendedMessage, _]) => Unit) = {
val normalized = url.normalize()
val tgt = if (normalized.getPath == null || normalized.getPath.trim().isEmpty) {
new URI(normalized.getScheme, normalized.getAuthority, "/", normalized.getQuery, normalized.getFragment)
} else normalized
val disconnected = Promise[Unit]()
client.connect(tgt.getHost, tgt.getPort).toScala.map { channel =>
val handshaker = WebSocketClientHandshakerFactory.newHandshaker(tgt, version, null, false, new DefaultHttpHeaders())
channel.pipeline().addLast("supervisor", new WebSocketSupervisor(disconnected, handshaker, onConnected))
handshaker.handshake(channel)
channel.read()
}.onFailure {
case t => disconnected.tryFailure(t)
}
disconnected.future
}
def shutdown() = eventLoop.shutdownGracefully()
}
private class WebSocketSupervisor(disconnected: Promise[Unit], handshaker: WebSocketClientHandshaker,
onConnected: Flow[ExtendedMessage, ExtendedMessage, _] => Unit) extends ChannelInboundHandlerAdapter {
override def channelRead(ctx: ChannelHandlerContext, msg: Object) {
msg match {
case resp: HttpResponse if handshaker.isHandshakeComplete =>
throw new WebSocketException("Unexpected HttpResponse (status=" + resp.getStatus + ")")
case resp: FullHttpResponse =>
// Setup the pipeline
val publisher = new HandlerPublisher(ctx.executor, classOf[WebSocketFrame])
val subscriber = new HandlerSubscriber[WebSocketFrame](ctx.executor)
ctx.pipeline.addAfter(ctx.executor, ctx.name, "websocket-subscriber", subscriber)
ctx.pipeline.addAfter(ctx.executor, ctx.name, "websocket-publisher", publisher)
// Now remove ourselves from the chain
ctx.pipeline.remove(ctx.name)
handshaker.finishHandshake(ctx.channel(), resp)
val clientConnection = Flow.fromSinkAndSource(Sink.fromSubscriber(subscriber), Source.fromPublisher(publisher))
onConnected(webSocketProtocol(clientConnection))
case _ => throw new WebSocketException("Unexpected message: " + msg)
}
}
val serverInitiatedClose = new AtomicBoolean
def webSocketProtocol(clientConnection: Flow[WebSocketFrame, WebSocketFrame, _]): Flow[ExtendedMessage, ExtendedMessage, _] = {
val clientInitiatedClose = new AtomicBoolean
val captureClientClose = Flow[WebSocketFrame].via(new GraphStage[FlowShape[WebSocketFrame, WebSocketFrame]] {
val in = Inlet[WebSocketFrame]("WebSocketFrame.in")
val out = Outlet[WebSocketFrame]("WebSocketFrame.out")
val shape: FlowShape[WebSocketFrame, WebSocketFrame] = FlowShape.of(in, out)
def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = {
grab(in) match {
case close: CloseWebSocketFrame =>
clientInitiatedClose.set(true)
push(out, close)
case other => push(out, other)
}
}
def onPull(): Unit = pull(in)
setHandlers(in, out, this)
}
})
val messagesToFrames = Flow[ExtendedMessage].map {
case SimpleMessage(TextMessage(data), finalFragment) => new TextWebSocketFrame(finalFragment, 0, data)
case SimpleMessage(BinaryMessage(data), finalFragment) => new BinaryWebSocketFrame(finalFragment, 0, Unpooled.wrappedBuffer(data.asByteBuffer))
case SimpleMessage(PingMessage(data), finalFragment) => new PingWebSocketFrame(finalFragment, 0, Unpooled.wrappedBuffer(data.asByteBuffer))
case SimpleMessage(PongMessage(data), finalFragment) => new PongWebSocketFrame(finalFragment, 0, Unpooled.wrappedBuffer(data.asByteBuffer))
case SimpleMessage(CloseMessage(statusCode, reason), finalFragment) => new CloseWebSocketFrame(finalFragment, 0, statusCode.getOrElse(CloseCodes.NoStatus), reason)
case ContinuationMessage(data, finalFragment) => new ContinuationWebSocketFrame(finalFragment, 0, Unpooled.wrappedBuffer(data.asByteBuffer))
}
val framesToMessages = Flow[WebSocketFrame].map { frame =>
val message = frame match {
case text: TextWebSocketFrame => SimpleMessage(TextMessage(text.text()), text.isFinalFragment)
case binary: BinaryWebSocketFrame => SimpleMessage(BinaryMessage(toByteString(binary)), binary.isFinalFragment)
case ping: PingWebSocketFrame => SimpleMessage(PingMessage(toByteString(ping)), ping.isFinalFragment)
case pong: PongWebSocketFrame => SimpleMessage(PongMessage(toByteString(pong)), pong.isFinalFragment)
case close: CloseWebSocketFrame => SimpleMessage(CloseMessage(Some(close.statusCode()), close.reasonText()), close.isFinalFragment)
case continuation: ContinuationWebSocketFrame => ContinuationMessage(toByteString(continuation), continuation.isFinalFragment)
}
ReferenceCountUtil.release(frame)
message
}
messagesToFrames via captureClientClose via Flow.fromGraph(GraphDSL.create[FlowShape[WebSocketFrame, WebSocketFrame]]() { implicit b =>
import GraphDSL.Implicits._
val broadcast = b.add(Broadcast[WebSocketFrame](2))
val merge = b.add(Merge[WebSocketFrame](2, eagerComplete = true))
val handleServerClose = Flow[WebSocketFrame].filter { frame =>
if (frame.isInstanceOf[CloseWebSocketFrame] && !clientInitiatedClose.get()) {
serverInitiatedClose.set(true)
true
} else {
// If we're going to drop it, we need to release it first
ReferenceCountUtil.release(frame)
false
}
}
val handleConnectionTerminated = Flow[WebSocketFrame].via(new GraphStage[FlowShape[WebSocketFrame, WebSocketFrame]] {
val in = Inlet[WebSocketFrame]("WebSocketFrame.in")
val out = Outlet[WebSocketFrame]("WebSocketFrame.out")
val shape: FlowShape[WebSocketFrame, WebSocketFrame] = FlowShape.of(in, out)
def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = {
push(out, grab(in))
}
override def onUpstreamFinish(): Unit = {
disconnected.trySuccess(())
super.onUpstreamFinish()
}
override def onUpstreamFailure(cause: Throwable): Unit = {
if (serverInitiatedClose.get()) {
disconnected.trySuccess(())
completeStage()
} else {
disconnected.tryFailure(cause)
fail(out, cause)
}
}
def onPull(): Unit = pull(in)
setHandlers(in, out, this)
}
})
/**
* Since we've got two consumers of the messages when we broadcast, we need to ensure that they get retained for each.
*/
val retainForBroadcast = Flow[WebSocketFrame].map { frame =>
ReferenceCountUtil.retain(frame)
frame
}
merge.out ~> clientConnection ~> handleConnectionTerminated ~> retainForBroadcast ~> broadcast.in
merge.in(0) <~ handleServerClose <~ broadcast.out(0)
FlowShape(merge.in(1), broadcast.out(1))
}) via framesToMessages
}
def toByteString(data: ByteBufHolder) = {
val builder = ByteString.newBuilder
data.content().readBytes(builder.asOutputStream, data.content().readableBytes())
val bytes = builder.result()
bytes
}
override def exceptionCaught(ctx: ChannelHandlerContext, e: Throwable) {
if (serverInitiatedClose.get()) {
disconnected.trySuccess(())
} else {
disconnected.tryFailure(e)
}
ctx.channel.close()
ctx.fireExceptionCaught(e)
}
override def channelInactive(ctx: ChannelHandlerContext) = {
disconnected.trySuccess(())
}
}
class WebSocketException(s: String, th: Throwable) extends java.io.IOException(s, th) {
def this(s: String) = this(s, null)
}
}
| aradchykov/playframework | framework/src/play-integration-test/src/test/scala/play/it/http/websocket/WebSocketClient.scala | Scala | apache-2.0 | 11,759 |
package nl.cowbird.sparkbenchmark
import nl.cowbird.streamingbenchmarkcommon._
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.codehaus.jettison.json.JSONObject
/**
* Created by gdibernardo on 26/05/2017.
*/
object Consumer {
val CACHED_STREAM_URI = "s3n://emr-cluster-spark-bucket/cache_stream.txt"
val CHECKPOINT_URI = "s3n://emr-cluster-spark-bucket/checkpoint/"
def updateMessages(key: String,
value: Option[Message],
state: State[StreamState]): Option[StreamState] = {
def updateMessagesStream(newMessage: Message): Option[StreamState] = {
val currentState = state.getOption().getOrElse(new StreamState(newMessage.getId))
currentState.appendMessage(newMessage)
var updatedStream: Option[StreamState] = None
if(currentState.size() >= newMessage.getValues) {
currentState.isReadyForReduction = true
updatedStream = Some(currentState)
state.remove()
} else {
state.update(currentState)
}
return updatedStream
}
value match {
case Some(newMessage) => updateMessagesStream(newMessage)
case _ if state.isTimingOut() => state.getOption()
}
}
def applyMeanReduction(stream: StreamState): ResultMessage = {
val firstIngestionTime = stream.firstTimestampInStream()
val id = stream.getId
val resultValue = stream.sum()/stream.size()
val currentTime = System.currentTimeMillis()
val deltaTime = currentTime - firstIngestionTime
return new ResultMessage(id, resultValue, deltaTime, "MEAN")
}
def applyMaxReduction(stream: StreamState): ResultMessage = {
val firstIngestionTime = stream.firstTimestampInStream()
val id = stream.getId
val max = stream.max()
val currentTime = System.currentTimeMillis()
val deltaTime = currentTime - firstIngestionTime
return new ResultMessage(id, max, deltaTime, "MAX")
}
def applyMinReduction(stream: StreamState): ResultMessage = {
val firstIngestionTime = stream.firstTimestampInStream()
val id = stream.getId
val min = stream.min()
val currentTime = System.currentTimeMillis()
val deltaTime = currentTime - firstIngestionTime
return new ResultMessage(id, min, deltaTime, "MIN")
}
def applySumReduction(stream: StreamState): ResultMessage = {
val firstIngestionTime = stream.firstTimestampInStream()
val id = stream.getId()
val sum = stream.sum()
val currentTime = System.currentTimeMillis()
val deltaTime = currentTime - firstIngestionTime
return new ResultMessage(id, sum, deltaTime, "SUM")
}
def applyReduction(stream: StreamState): Option[ResultMessage] = {
if(!stream.isReadyForReduction) {
return None
}
val operator = stream.reductionOperator()
operator match {
case "MEAN" => Some(applyMeanReduction(stream))
case "MAX" => Some(applyMaxReduction(stream))
case "MIN" => Some(applyMinReduction(stream))
case "SUM" => Some(applySumReduction(stream))
case _ => None
}
}
def main(args: Array[String]): Unit = {
if (args.length < 3) {
System.exit(1)
}
var joinOperationEnabled = false
var joinTopic: String = ""
if(args.length == 4) {
joinOperationEnabled = true
joinTopic = args(3)
}
val broker = args(0)
val inputTopic = args(1)
val outputTopic = args(2)
val sparkConf = new SparkConf().setAppName("SparkConsumer")
val streamingContext = new StreamingContext(sparkConf, Seconds(1))
streamingContext.checkpoint(CHECKPOINT_URI)
val stateSpec = StateSpec.function(updateMessages _).timeout(Minutes(5))
val kafkaParameters = Map[String, Object](
"bootstrap.servers" -> broker,
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "use_a_separate_group_id_for_each_stream",
"auto.offset.reset" -> "latest",
"enable.auto.commit" -> (false: java.lang.Boolean)
)
val messageStream = KafkaUtils.createDirectStream[String, String](
streamingContext,
PreferConsistent,
Subscribe[String, String](Array(inputTopic), kafkaParameters)
)
val messages = messageStream.map(message => {
val jsonMessage = new JSONObject(message.value())
(jsonMessage.getString("id"), new Message(jsonMessage.getString("id"), message.timestamp(), System.currentTimeMillis(), jsonMessage.getDouble("payload"), jsonMessage.getString("reduction_mode"), jsonMessage.getInt("values")))
})
var readyForMapMessages = messages
if(joinOperationEnabled) {
val joinedMessageStream = KafkaUtils.createDirectStream[String, String](
streamingContext,
PreferConsistent,
Subscribe[String, String](Array(joinTopic), kafkaParameters)).map(line => {
val jsonMessage = new JSONObject(line.value())
(jsonMessage.getString("id"), new Message(jsonMessage.getString("id"), System.currentTimeMillis(), System.currentTimeMillis(), jsonMessage.getDouble("payload"), "-", 10000))
})
readyForMapMessages = messages.join(joinedMessageStream).map(element => {
val firstTupleMessage = element._2._1
val message = Message.initFromMessage(firstTupleMessage)
message.setPayload((message.getPayload + element._2._2.getPayload)/2)
/* We can add some kind of reduction. */
(element._1, message)
})
}
val messagesWithState = readyForMapMessages.mapWithState(stateSpec)
val readyForReductionMessages = messagesWithState.filter(!_.isEmpty).map(_.get).filter(_.isReadyForReduction == true)
val defaults = ClientProducer.defaultProperties()
defaults.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, broker)
readyForReductionMessages.foreachRDD(rdd => {
rdd.foreachPartition(partition => {
val producer = new KafkaProducer[String, String](defaults)
partition.foreach(element => {
val result = applyReduction(element)
if(!result.isEmpty) {
val unwrappedResult = result.get
val jsonPayload = new JSONObject()
jsonPayload.put("id", unwrappedResult.getId)
jsonPayload.put("result_value", unwrappedResult.getResultValue)
jsonPayload.put("processing_time", unwrappedResult.getProcessingTime)
val message = new ProducerRecord[String, String](outputTopic, jsonPayload.toString)
producer.send(message)
}
})
producer.close()
})
})
/* Start the streaming context. */
streamingContext.start()
streamingContext.awaitTermination()
}
}
| gdibernardo/streaming-engines-benchmark | spark-streaming/sparkbenchmark-kafka010/src/main/scala/nl/cowbird/sparkbenchmark/Consumer.scala | Scala | apache-2.0 | 7,095 |
package info.fotm.clustering.implementations
import info.fotm.clustering.Clusterer
import scala.util.Random
class RandomClusterer extends Clusterer {
import Clusterer._
val rng = new Random()
override def clusterize(input: Cluster, groupSize: Int): Set[Cluster] =
rng.shuffle(input).grouped(groupSize).toSet
}
| Groz/fotm-info | core/src/main/scala/info/fotm/clustering/implementations/RandomClusterer.scala | Scala | mit | 324 |
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._ // for implicit conversations
//import org.apache.spark.sql._
object SQL01 {
// register case class external to main
case class Employee(EmployeeID : Int,
LastName : String, FirstName : String, Title : String,
BirthDate : String, HireDate : String,
City : String, State : String, Zip : String, Country : String,
ReportsTo : String)
//
def main(args: Array[String]): Unit = {
val sc = new SparkContext("local","Chapter 7")
println(s"Running Spark Version ${sc.version}")
//
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
//import sqlContext.createSchemaRDD // to implicitly convert an RDD to a SchemaRDD.
import sqlContext.implicits._
//import sqlContext._
//import sqlContext.createDataFrame
//import sqlContext.createExternalTable
//
val employeeFile = sc.textFile("/Volumes/sdxc-01/fdps-vii/data/NW-Employees-NoHdr.csv")
println("Employee File has %d Lines.".format(employeeFile.count()))
val employees = employeeFile.map(_.split(",")).
map(e => Employee( e(0).trim.toInt,
e(1), e(2), e(3),
e(4), e(5),
e(6), e(7), e(8), e(9), e(10)))
println(employees.count)
employees.toDF().registerTempTable("Employees")
var result = sqlContext.sql("SELECT * from Employees")
result.foreach(println)
result = sqlContext.sql("SELECT * from Employees WHERE State = 'WA'")
result.foreach(println)
System.out.println("** Done **")
}
} | dineshpackt/Fast-Data-Processing-with-Spark-2 | code/SQL01.scala | Scala | mit | 1,554 |
package com.twitter.zipkin.config
import com.sun.net.httpserver.HttpExchange
import com.twitter.ostrich.admin.CustomHttpHandler
import com.twitter.zipkin.config.sampler.AdjustableRateConfig
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSuite, Matchers}
/**
* Test endpoints for getting and setting configurations for sample rate and storage request rate
*/
class ConfigRequestHandlerSpec extends FunSuite with Matchers with MockitoSugar {
val sampleRateConfig = mock[AdjustableRateConfig]
val exchange = mock[HttpExchange]
val customHttpHandler = mock[CustomHttpHandler]
val handler = new ConfigRequestHandler(sampleRateConfig) {
override def render(body: String, exchange: HttpExchange, code: Int) {
customHttpHandler.render(body, exchange, code)
}
}
test("sampleRate get") {
when(exchange.getRequestMethod) thenReturn "GET"
when(sampleRateConfig.get) thenReturn 0.5
handler.handle(exchange, List("config", "sampleRate"), List.empty[(String, String)])
verify(customHttpHandler).render("0.5", exchange, 200)
}
test("sampleRate set") {
when(exchange.getRequestMethod) thenReturn "POST"
handler.handle(exchange, List("config", "sampleRate"), List(("value", "0.3")))
verify(sampleRateConfig).set(0.3)
verify(customHttpHandler).render("success", exchange, 200)
}
}
| srijs/zipkin | zipkin-collector-service/src/test/scala/com/twitter/zipkin/config/ConfigRequestHandlerSpec.scala | Scala | apache-2.0 | 1,384 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.optim.aggregator
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.util.MLUtils
/**
* LogisticAggregator computes the gradient and loss for binary or multinomial logistic (softmax)
* loss function, as used in classification for instances in sparse or dense vector in an online
* fashion.
*
* Two LogisticAggregators can be merged together to have a summary of loss and gradient of
* the corresponding joint dataset.
*
* For improving the convergence rate during the optimization process and also to prevent against
* features with very large variances exerting an overly large influence during model training,
* packages like R's GLMNET perform the scaling to unit variance and remove the mean in order to
* reduce the condition number. The model is then trained in this scaled space, but returns the
* coefficients in the original scale. See page 9 in
* http://cran.r-project.org/web/packages/glmnet/glmnet.pdf
*
* However, we don't want to apply the [[org.apache.spark.ml.feature.StandardScaler]] on the
* training dataset, and then cache the standardized dataset since it will create a lot of overhead.
* As a result, we perform the scaling implicitly when we compute the objective function (though
* we do not subtract the mean).
*
* Note that there is a difference between multinomial (softmax) and binary loss. The binary case
* uses one outcome class as a "pivot" and regresses the other class against the pivot. In the
* multinomial case, the softmax loss function is used to model each class probability
* independently. Using softmax loss produces `K` sets of coefficients, while using a pivot class
* produces `K - 1` sets of coefficients (a single coefficient vector in the binary case). In the
* binary case, we can say that the coefficients are shared between the positive and negative
* classes. When regularization is applied, multinomial (softmax) loss will produce a result
* different from binary loss since the positive and negative don't share the coefficients while the
* binary regression shares the coefficients between positive and negative.
*
* The following is a mathematical derivation for the multinomial (softmax) loss.
*
* The probability of the multinomial outcome $y$ taking on any of the K possible outcomes is:
*
* <blockquote>
* $$
* P(y_i=0|\\vec{x}_i, \\beta) = \\frac{e^{\\vec{x}_i^T \\vec{\\beta}_0}}{\\sum_{k=0}^{K-1}
* e^{\\vec{x}_i^T \\vec{\\beta}_k}} \\\\
* P(y_i=1|\\vec{x}_i, \\beta) = \\frac{e^{\\vec{x}_i^T \\vec{\\beta}_1}}{\\sum_{k=0}^{K-1}
* e^{\\vec{x}_i^T \\vec{\\beta}_k}}\\\\
* P(y_i=K-1|\\vec{x}_i, \\beta) = \\frac{e^{\\vec{x}_i^T \\vec{\\beta}_{K-1}}\\,}{\\sum_{k=0}^{K-1}
* e^{\\vec{x}_i^T \\vec{\\beta}_k}}
* $$
* </blockquote>
*
* The model coefficients $\\beta = (\\beta_0, \\beta_1, \\beta_2, ..., \\beta_{K-1})$ become a matrix
* which has dimension of $K \\times (N+1)$ if the intercepts are added. If the intercepts are not
* added, the dimension will be $K \\times N$.
*
* Note that the coefficients in the model above lack identifiability. That is, any constant scalar
* can be added to all of the coefficients and the probabilities remain the same.
*
* <blockquote>
* $$
* \\begin{align}
* \\frac{e^{\\vec{x}_i^T \\left(\\vec{\\beta}_0 + \\vec{c}\\right)}}{\\sum_{k=0}^{K-1}
* e^{\\vec{x}_i^T \\left(\\vec{\\beta}_k + \\vec{c}\\right)}}
* = \\frac{e^{\\vec{x}_i^T \\vec{\\beta}_0}e^{\\vec{x}_i^T \\vec{c}}\\,}{e^{\\vec{x}_i^T \\vec{c}}
* \\sum_{k=0}^{K-1} e^{\\vec{x}_i^T \\vec{\\beta}_k}}
* = \\frac{e^{\\vec{x}_i^T \\vec{\\beta}_0}}{\\sum_{k=0}^{K-1} e^{\\vec{x}_i^T \\vec{\\beta}_k}}
* \\end{align}
* $$
* </blockquote>
*
* However, when regularization is added to the loss function, the coefficients are indeed
* identifiable because there is only one set of coefficients which minimizes the regularization
* term. When no regularization is applied, we choose the coefficients with the minimum L2
* penalty for consistency and reproducibility. For further discussion see:
*
* Friedman, et al. "Regularization Paths for Generalized Linear Models via Coordinate Descent"
*
* The loss of objective function for a single instance of data (we do not include the
* regularization term here for simplicity) can be written as
*
* <blockquote>
* $$
* \\begin{align}
* \\ell\\left(\\beta, x_i\\right) &= -log{P\\left(y_i \\middle| \\vec{x}_i, \\beta\\right)} \\\\
* &= log\\left(\\sum_{k=0}^{K-1}e^{\\vec{x}_i^T \\vec{\\beta}_k}\\right) - \\vec{x}_i^T \\vec{\\beta}_y\\\\
* &= log\\left(\\sum_{k=0}^{K-1} e^{margins_k}\\right) - margins_y
* \\end{align}
* $$
* </blockquote>
*
* where ${margins}_k = \\vec{x}_i^T \\vec{\\beta}_k$.
*
* For optimization, we have to calculate the first derivative of the loss function, and a simple
* calculation shows that
*
* <blockquote>
* $$
* \\begin{align}
* \\frac{\\partial \\ell(\\beta, \\vec{x}_i, w_i)}{\\partial \\beta_{j, k}}
* &= x_{i,j} \\cdot w_i \\cdot \\left(\\frac{e^{\\vec{x}_i \\cdot \\vec{\\beta}_k}}{\\sum_{k'=0}^{K-1}
* e^{\\vec{x}_i \\cdot \\vec{\\beta}_{k'}}\\,} - I_{y=k}\\right) \\\\
* &= x_{i, j} \\cdot w_i \\cdot multiplier_k
* \\end{align}
* $$
* </blockquote>
*
* where $w_i$ is the sample weight, $I_{y=k}$ is an indicator function
*
* <blockquote>
* $$
* I_{y=k} = \\begin{cases}
* 1 & y = k \\\\
* 0 & else
* \\end{cases}
* $$
* </blockquote>
*
* and
*
* <blockquote>
* $$
* multiplier_k = \\left(\\frac{e^{\\vec{x}_i \\cdot \\vec{\\beta}_k}}{\\sum_{k=0}^{K-1}
* e^{\\vec{x}_i \\cdot \\vec{\\beta}_k}} - I_{y=k}\\right)
* $$
* </blockquote>
*
* If any of margins is larger than 709.78, the numerical computation of multiplier and loss
* function will suffer from arithmetic overflow. This issue occurs when there are outliers in
* data which are far away from the hyperplane, and this will cause the failing of training once
* infinity is introduced. Note that this is only a concern when max(margins) > 0.
*
* Fortunately, when max(margins) = maxMargin > 0, the loss function and the multiplier can
* easily be rewritten into the following equivalent numerically stable formula.
*
* <blockquote>
* $$
* \\ell\\left(\\beta, x\\right) = log\\left(\\sum_{k=0}^{K-1} e^{margins_k - maxMargin}\\right) -
* margins_{y} + maxMargin
* $$
* </blockquote>
*
* Note that each term, $(margins_k - maxMargin)$ in the exponential is no greater than zero; as a
* result, overflow will not happen with this formula.
*
* For $multiplier$, a similar trick can be applied as the following,
*
* <blockquote>
* $$
* multiplier_k = \\left(\\frac{e^{\\vec{x}_i \\cdot \\vec{\\beta}_k - maxMargin}}{\\sum_{k'=0}^{K-1}
* e^{\\vec{x}_i \\cdot \\vec{\\beta}_{k'} - maxMargin}} - I_{y=k}\\right)
* $$
* </blockquote>
*
*
* @param bcCoefficients The broadcast coefficients corresponding to the features.
* @param bcFeaturesStd The broadcast standard deviation values of the features.
* @param numClasses the number of possible outcomes for k classes classification problem in
* Multinomial Logistic Regression.
* @param fitIntercept Whether to fit an intercept term.
* @param multinomial Whether to use multinomial (softmax) or binary loss
* @note In order to avoid unnecessary computation during calculation of the gradient updates
* we lay out the coefficients in column major order during training. This allows us to
* perform feature standardization once, while still retaining sequential memory access
* for speed. We convert back to row major order when we create the model,
* since this form is optimal for the matrix operations used for prediction.
*/
private[ml] class LogisticAggregator(
bcFeaturesStd: Broadcast[Array[Double]],
numClasses: Int,
fitIntercept: Boolean,
multinomial: Boolean)(bcCoefficients: Broadcast[Vector])
extends DifferentiableLossAggregator[Instance, LogisticAggregator] with Logging {
private val numFeatures = bcFeaturesStd.value.length
private val numFeaturesPlusIntercept = if (fitIntercept) numFeatures + 1 else numFeatures
private val coefficientSize = bcCoefficients.value.size
protected override val dim: Int = coefficientSize
if (multinomial) {
require(numClasses == coefficientSize / numFeaturesPlusIntercept, s"The number of " +
s"coefficients should be ${numClasses * numFeaturesPlusIntercept} but was $coefficientSize")
} else {
require(coefficientSize == numFeaturesPlusIntercept, s"Expected $numFeaturesPlusIntercept " +
s"coefficients but got $coefficientSize")
require(numClasses == 1 || numClasses == 2, s"Binary logistic aggregator requires numClasses " +
s"in {1, 2} but found $numClasses.")
}
@transient private lazy val coefficientsArray: Array[Double] = bcCoefficients.value match {
case DenseVector(values) => values
case _ => throw new IllegalArgumentException(s"coefficients only supports dense vector but " +
s"got type ${bcCoefficients.value.getClass}.)")
}
if (multinomial && numClasses <= 2) {
logInfo(s"Multinomial logistic regression for binary classification yields separate " +
s"coefficients for positive and negative classes. When no regularization is applied, the" +
s"result will be effectively the same as binary logistic regression. When regularization" +
s"is applied, multinomial loss will produce a result different from binary loss.")
}
/** Update gradient and loss using binary loss function. */
private def binaryUpdateInPlace(features: Vector, weight: Double, label: Double): Unit = {
val localFeaturesStd = bcFeaturesStd.value
val localCoefficients = coefficientsArray
val localGradientArray = gradientSumArray
val margin = - {
var sum = 0.0
features.foreachActive { (index, value) =>
if (localFeaturesStd(index) != 0.0 && value != 0.0) {
sum += localCoefficients(index) * value / localFeaturesStd(index)
}
}
if (fitIntercept) sum += localCoefficients(numFeaturesPlusIntercept - 1)
sum
}
val multiplier = weight * (1.0 / (1.0 + math.exp(margin)) - label)
features.foreachActive { (index, value) =>
if (localFeaturesStd(index) != 0.0 && value != 0.0) {
localGradientArray(index) += multiplier * value / localFeaturesStd(index)
}
}
if (fitIntercept) {
localGradientArray(numFeaturesPlusIntercept - 1) += multiplier
}
if (label > 0) {
// The following is equivalent to log(1 + exp(margin)) but more numerically stable.
lossSum += weight * MLUtils.log1pExp(margin)
} else {
lossSum += weight * (MLUtils.log1pExp(margin) - margin)
}
}
/** Update gradient and loss using multinomial (softmax) loss function. */
private def multinomialUpdateInPlace(features: Vector, weight: Double, label: Double): Unit = {
// TODO: use level 2 BLAS operations
/*
Note: this can still be used when numClasses = 2 for binary
logistic regression without pivoting.
*/
val localFeaturesStd = bcFeaturesStd.value
val localCoefficients = coefficientsArray
val localGradientArray = gradientSumArray
// marginOfLabel is margins(label) in the formula
var marginOfLabel = 0.0
var maxMargin = Double.NegativeInfinity
val margins = new Array[Double](numClasses)
features.foreachActive { (index, value) =>
val stdValue = value / localFeaturesStd(index)
var j = 0
while (j < numClasses) {
margins(j) += localCoefficients(index * numClasses + j) * stdValue
j += 1
}
}
var i = 0
while (i < numClasses) {
if (fitIntercept) {
margins(i) += localCoefficients(numClasses * numFeatures + i)
}
if (i == label.toInt) marginOfLabel = margins(i)
if (margins(i) > maxMargin) {
maxMargin = margins(i)
}
i += 1
}
/**
* When maxMargin is greater than 0, the original formula could cause overflow.
* We address this by subtracting maxMargin from all the margins, so it's guaranteed
* that all of the new margins will be smaller than zero to prevent arithmetic overflow.
*/
val multipliers = new Array[Double](numClasses)
val sum = {
var temp = 0.0
var i = 0
while (i < numClasses) {
if (maxMargin > 0) margins(i) -= maxMargin
val exp = math.exp(margins(i))
temp += exp
multipliers(i) = exp
i += 1
}
temp
}
margins.indices.foreach { i =>
multipliers(i) = multipliers(i) / sum - (if (label == i) 1.0 else 0.0)
}
features.foreachActive { (index, value) =>
if (localFeaturesStd(index) != 0.0 && value != 0.0) {
val stdValue = value / localFeaturesStd(index)
var j = 0
while (j < numClasses) {
localGradientArray(index * numClasses + j) += weight * multipliers(j) * stdValue
j += 1
}
}
}
if (fitIntercept) {
var i = 0
while (i < numClasses) {
localGradientArray(numFeatures * numClasses + i) += weight * multipliers(i)
i += 1
}
}
val loss = if (maxMargin > 0) {
math.log(sum) - marginOfLabel + maxMargin
} else {
math.log(sum) - marginOfLabel
}
lossSum += weight * loss
}
/**
* Add a new training instance to this LogisticAggregator, and update the loss and gradient
* of the objective function.
*
* @param instance The instance of data point to be added.
* @return This LogisticAggregator object.
*/
def add(instance: Instance): this.type = {
instance match { case Instance(label, weight, features) =>
require(numFeatures == features.size, s"Dimensions mismatch when adding new instance." +
s" Expecting $numFeatures but got ${features.size}.")
require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0")
if (weight == 0.0) return this
if (multinomial) {
multinomialUpdateInPlace(features, weight, label)
} else {
binaryUpdateInPlace(features, weight, label)
}
weightSum += weight
this
}
}
}
| mike0sv/spark | mllib/src/main/scala/org/apache/spark/ml/optim/aggregator/LogisticAggregator.scala | Scala | apache-2.0 | 15,145 |
package org.airpnp.airplay.protocol
import org.airpnp.http.Response._
import scala.concurrent.ExecutionContext.Implicits.global
import org.airpnp.http.RouteHandler
import org.airpnp.http.Response
import org.airpnp.airplay.AirPlayDevice
import org.airpnp.http.Request
import org.airpnp.plist.PropertyList
import org.airpnp.plist.BinaryPropertyListDecoder
import org.airpnp.plist.Dict
import java.io.InputStream
import java.util.Properties
import java.io.InputStreamReader
import org.airpnp.Util
import scala.util.Success
import scala.util.Failure
import java.io.ByteArrayInputStream
import org.airpnp.Logging
private[protocol] object RouteHelper {
def internalServerError(response: Response, t: Throwable) = {
val msg = t.getMessage match {
case s if s != null => s
case _ => "Unknown error"
}
response.respond(withText(msg).andStatusCode(500))
}
}
class PhotoRoute(private val apDevice: AirPlayDevice) extends RouteHandler with Logging {
override def handlePUT(request: Request, response: Response) = {
val assetKey = request.getHeader("X-Apple-AssetKey").headOption match {
case Some(key) => trace("Photo has asset key {}.", key)
case None =>
}
val transList = request.getHeader("X-Apple-Transition")
val transition = transList.headOption.getOrElse("")
val length = request.getHeader("Content-Length").headOption.getOrElse("0").toInt
//TODO: For now, we consume all data and then publish streams off of it. In the
// future, do these two things concurrently!
val data = Util.readAllBytes(request.getInputStream)
apDevice.showPhoto(() => new ByteArrayInputStream(data), length, transition).onComplete {
case Success(_) => response.respond(withStatus(200))
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
}
class PlaybackInfoRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handleGET(request: Request, response: Response) = {
val fScrub = apDevice.getScrub
val fPlaying = apDevice.isPlaying
val waiting = for {
scrub <- fScrub
isPlaying <- fPlaying
} yield (scrub, isPlaying)
waiting.onComplete {
case Success(result) => {
val pi = new PlaybackInfo(result._1, result._2)
val plist = pi.get
response.respond(withUtf8Text(plist.toXml).andContentType(PropertyList.CT_TEXT_PLIST))
}
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
}
class PlayRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handlePOST(request: Request, response: Response) = {
//TODO: error handling
val ct = request.getHeader("Content-Type").head
val (loc, pos) = ct match {
case PropertyList.CT_BINARY_PLIST => readBinaryPropertyList(request.getInputStream)
case _ => readTextValues(request.getInputStream)
}
apDevice.play(loc, pos).onComplete {
case Success(_) => response.respond(withStatus(200))
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
private def readBinaryPropertyList(inputStream: InputStream): (String, Double) = {
val decoder = new BinaryPropertyListDecoder(inputStream)
val plist = decoder.decode
val d = plist.root.asInstanceOf[Dict].getValue()
val loc = d.get("Content-Location").get.toString
val pos = d.get("Start-Position").get.asInstanceOf[Double]
(loc, pos)
}
private def readTextValues(inputStream: InputStream) = {
val props = new Properties
props.load(new InputStreamReader(inputStream, "ASCII"))
val loc = props.get("Content-Location").toString
val startPos = props.get("Start-Position")
val pos = if (startPos != null) java.lang.Double.parseDouble(startPos.toString()) else 0.0d
(loc, pos)
}
}
class RateRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handlePOST(request: Request, response: Response) = {
//TODO: error checking
val value = request.getArgument("value").head
apDevice.setRate(java.lang.Double.parseDouble(value)).onComplete {
case Success(_) => response.respond(withStatus(200))
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
}
class ScrubRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handleGET(request: Request, response: Response) = {
apDevice.getScrub.onComplete {
case Success(scrub) => {
val data = "duration: " + scrub.duration + "\nposition: " + scrub.position
response.respond(withText(data))
}
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
override def handlePOST(request: Request, response: Response) = {
//TODO: error checking
val pos = request.getArgument("position").head
apDevice.setScrub(java.lang.Double.parseDouble(pos)).onComplete {
case Success(_) => response.respond(withStatus(200))
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
}
class ServerInfoRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handleGET(request: Request, response: Response) = {
//TODO: We can extract X-apple-client-name here
val si = new ServerInfo(apDevice.getDeviceId, apDevice.getFeatures, apDevice.getModel)
val plist = si.get
response.respond(withUtf8Text(plist.toXml).andContentType(PropertyList.CT_TEXT_PLIST))
}
}
class SetPropertyRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handlePUT(request: Request, response: Response) = {
//TODO: Check CT + error handling
val decoder = new BinaryPropertyListDecoder(request.getInputStream)
val plist = decoder.decode
val propName = request.getArgument("").head // unnamed arg
val map = plist.root.asInstanceOf[Dict].getValue
apDevice.setProperty(propName, map.get("value").get).onComplete {
case Success(_) => response.respond(withStatus(200))
case Failure(t) => RouteHelper.internalServerError(response, t)
}
}
}
class SlideshowFeaturesRoute extends RouteHandler {
override def handleGET(request: Request, response: Response) = {
val sf = new SlideshowFeatures
val plist = sf.get
response.respond(withUtf8Text(plist.toXml).andContentType(PropertyList.CT_TEXT_PLIST))
}
}
class StopRoute(private val apDevice: AirPlayDevice) extends RouteHandler {
override def handlePOST(request: Request, response: Response) = {
apDevice.stop()
response.respond(withStatus(200))
}
}
class ReverseRoute(apDevice: AirPlayDevice) extends RouteHandler {
override def handlePOST(request: Request, response: Response) {
//Upgrade: PTTH/1.0
//X-apple-device-id: 0x............
//Content-length: 0
//Connection: Upgrade
//X-apple-purpose: event
//User-agent: AirPlay/160.10 (Photos)
//X-apple-session-id: <guid>
//X-apple-client-name: ...
//NOTE: We cannot do anything with the connection, because Oracle's HTTP
// server doesn't allow us to do anything beyond responding with 101. If
// we only could get hold of the socket channel...
response.dontClose()
response.addHeader("Upgrade", "PTTH/1.0")
response.addHeader("Connection", "Upgrade")
response.respond(withStatus(101))
}
} | provegard/ScAirPnp | src/main/scala/org/airpnp/airplay/protocol/Routes.scala | Scala | mit | 7,315 |
package org.me.hotel
case class Room(number: Int, guest: Option[Guest] = None){ room =>
def isFree(): Boolean =
guest.isEmpty
def checkin(guest: Guest): Room = {
require(room.guest.isEmpty, "Room is occupied")
Room(number,Some(guest))
}
def checkout(): Room = {
require(guest.isDefined,"Room is already free")
Room(number,None)
}
}
| Scalera/scalatest-handson-introduction | hotel-management/src/main/scala/org/me/hotel/Room.scala | Scala | apache-2.0 | 368 |
package org.scalameter
import org.scalameter.api._
import org.scalameter.execution.JvmRunner
import org.scalatest.{FunSuite, Matchers}
abstract class MeasurerTest[V, M <: Measurer[V]] extends FunSuite with Matchers {
def measureWith(measurer: M)(snippet: => Any)(f: V => Any): Any = {
val ctx = measurer.prepareContext(Context.topLevel)
val runner = new JvmRunner
val dummy: Unit => Any = _ => ()
measurer.beforeExecution(ctx)
val result = runner.run(ctx) {
measurer.measure(ctx, 1, dummy, dummy, () => (), (_: Unit) => snippet).map(_.value)
}
measurer.afterExecution(ctx)
result.isSuccess should === (true)
result.get.length should === (1)
f(result.get.head)
}
}
| storm-enroute/scalameter | src/test/scala/org/scalameter/MeasurerTest.scala | Scala | bsd-3-clause | 723 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.dsl.javaapi.functions
import org.apache.gearpump.streaming.dsl.api.functions.MapFunction
/**
* Assigns the input value into a group.
*
* @tparam T Input value type
* @tparam GROUP Group value type
*/
abstract class GroupByFunction[T, GROUP] extends MapFunction[T, GROUP] {
override def map(t: T): GROUP = {
groupBy(t)
}
def groupBy(t: T): GROUP
}
| manuzhang/incubator-gearpump | streaming/src/main/scala/org/apache/gearpump/streaming/dsl/javaapi/functions/GroupByFunction.scala | Scala | apache-2.0 | 1,211 |
package models
import java.util.UUID
import java.time.Year
import MajorType._
case class Major(id: UUID, nameKo: String, nameEn: Option[String], majorType: MajorType, year: Year) {
//TODO move this to other appropriate place
def required: Int = year match {
case i if i.getValue <= 2014 => majorType match {
case FirstMajor => 54
case SecondMajor => 54
case FirstMajorMinor => 75
case Minor => 21
case MultipleMajor => 0
case TeacherCourse => 0
case LiberalArts => 26
case FreeCourse => 0
case PracticalFL => 4
}
case _ => 0
}
}
| yoo-haemin/hufs-planner | project/app/models/Major.scala | Scala | agpl-3.0 | 604 |
/*
* Copyright (C) 2009-2020 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.pattern
import scala.concurrent.{ ExecutionContextExecutor, Future }
import akka.testkit.Await
import scala.concurrent.duration._
import language.postfixOps
import akka.actor.Scheduler
import akka.testkit.AkkaSpec
class RetrySpec extends AkkaSpec with RetrySupport {
implicit val ec: ExecutionContextExecutor = system.dispatcher
implicit val scheduler: Scheduler = system.scheduler
"pattern.retry" must {
"run a successful Future immediately" in {
val retried = retry(() => Future.successful(5), 5, 1 second)
within(3 seconds) {
Await.result(retried, remaining) should ===(5)
}
}
"run a successful Future only once" in {
@volatile var counter = 0
val retried = retry(
() =>
Future.successful({
counter += 1
counter
}),
5,
1 second)
within(3 seconds) {
Await.result(retried, remaining) should ===(1)
}
}
"eventually return a failure for a Future that will never succeed" in {
val retried = retry(() => Future.failed(new IllegalStateException("Mexico")), 5, 100 milliseconds)
within(3 second) {
intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("Mexico")
}
}
"return a success for a Future that succeeds eventually" in {
@volatile var failCount = 0
def attempt() = {
if (failCount < 5) {
failCount += 1
Future.failed(new IllegalStateException(failCount.toString))
} else Future.successful(5)
}
val retried = retry(() => attempt(), 10, 100 milliseconds)
within(3 seconds) {
Await.result(retried, remaining) should ===(5)
}
}
"return a failure for a Future that would have succeeded but retires were exhausted" in {
@volatile var failCount = 0
def attempt() = {
if (failCount < 10) {
failCount += 1
Future.failed(new IllegalStateException(failCount.toString))
} else Future.successful(5)
}
val retried = retry(() => attempt(), 5, 100 milliseconds)
within(3 seconds) {
intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("6")
}
}
"return a failure for a Future that would have succeeded but retires were exhausted with delay function" in {
@volatile var failCount = 0
@volatile var attemptedCount = 0;
def attempt() = {
if (failCount < 10) {
failCount += 1
Future.failed(new IllegalStateException(failCount.toString))
} else Future.successful(5)
}
val retried = retry(() => attempt(), 5, attempted => {
attemptedCount = attempted
Some(100.milliseconds * attempted)
})
within(30000000 seconds) {
intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("6")
attemptedCount shouldBe 5
}
}
// "retry can be attempted without any delay" in {
// @volatile var failCount = 0
// def attempt() = {
// if (failCount < 1000) {
// failCount += 1
// Future.failed(new IllegalStateException(failCount.toString))
// } else Future.successful(1)
// }
// val start = System.currentTimeMillis()
// val retried = retry(() => attempt(), 999)
// within(1 seconds) {
// intercept[IllegalStateException] {
// Await.result(retried, remaining)
// }.getMessage should ===("1000")
// val elapse = System.currentTimeMillis() - start
// elapse <= 100 shouldBe true
// }
// }
}
}
| unicredit/akka.js | akka-js-actor-tests/js/src/test/scala/akka/pattern/RetrySpec.scala | Scala | bsd-3-clause | 3,790 |
package com.alanjz.meerkat.pieces
import com.alanjz.meerkat.pieces.Color._
/**
* The base class of all colored pieces.
*/
sealed trait Piece {
/**
* The name of this piece.
*/
val name : String
/**
* The color of this piece.
*/
val color : Color
}
object Piece {
/**
* All pieces which can be promoted to.
*/
sealed trait Promoted
/**
* A pawn.
* @param color a color.
*/
case class Pawn(val color : Color) extends Piece {
val name = "pawn"
}
/**
* A knight.
* @param color a color.
*/
case class Knight(val color : Color) extends Piece with Promoted {
val name = "knight"
}
/**
* A bishop.
* @param color a color.
*/
case class Bishop(val color : Color) extends Piece with Promoted {
val name = "bishop"
}
/**
* A rook.
* @param color a color.
*/
case class Rook(val color : Color) extends Piece with Promoted {
val name = "rook"
}
/**
* A queen.
* @param color a color.
*/
case class Queen(color : Color) extends Piece with Promoted {
val name = "queen"
}
/**
* A king.
* @param color a color.
*/
case class King(color : Color) extends Piece {
val name = "king"
}
/**
* Converts a piece to its character class.
* @param lhs a piece.
* @return a character representing the colored piece.
*/
implicit def Piece2Char(lhs : Piece) : Char = lhs match {
case Pawn(White) => 'P'
case Knight(White) => 'N'
case Bishop(White) => 'B'
case Rook(White) => 'R'
case Queen(White) => 'Q'
case King(White) => 'K'
case Pawn(Black) => 'p'
case Knight(Black) => 'n'
case Bishop(Black) => 'b'
case Rook(Black) => 'r'
case Queen(Black) => 'q'
case King(Black) => 'k'
}
}
/**
* Standard piece color base class.
*/
sealed abstract class Color(val name : String) {
/**
* A reference to the other team.
*/
val other : Color
/**
* Gets the other color.
* @see other
* @return the other color.
*/
implicit def unary_! : Color = other
/**
* Gets the name of this color as a string.
* @return the color as a string.
*/
override def toString = name
}
/**
* Standard piece color values.
*/
object Color {
/**
* Implicit casts a color to an integer.
* @param color a color.
* @return `1` if White; `-1` if black.
*/
implicit def Color2Int(color : Color) : Int = color match {
case White => 1
case Black => -1
}
/**
* The standard color `White`.
*/
case object White extends Color("white") {
val other = Black
}
/**
* The standard color `Black`.
*/
case object Black extends Color("black") {
val other = White
}
}
| spacenut/meerkat-chess | src/com/alanjz/meerkat/pieces/Piece.scala | Scala | gpl-2.0 | 2,735 |
package de.htwg.scala.akkaStreams
import akka.{Done, NotUsed}
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import scala.concurrent.Future
object SimpleStreamExample {
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem("Sys")
implicit val materializer = ActorMaterializer()
val numbers = 1 to 1000
//We create a Source that will iterate over the number sequence
val numberSource: Source[Int, NotUsed] = Source.fromIterator(() => numbers.iterator)
//Only let pass even numbers through the Flow
val isEvenFlow: Flow[Int, Int, NotUsed] = Flow[Int].filter((num) => num % 2 == 0)
//Create a Source of even random numbers by combining the random number Source with the even number filter Flow
val evenNumbersSource: Source[Int, NotUsed] = numberSource.via(isEvenFlow)
//A Sink that will write its input onto the console
val consoleSink: Sink[Int, Future[Done]] = Sink.foreach[Int](println)
//Connect the Source with the Sink and run it using the materializer
evenNumbersSource.runWith(consoleSink)
}
} | markoboger/de.htwg.scala.inAction | src/main/scala/de/htwg/scala/akkaStreams/SimpleNumberExample.scala | Scala | mit | 1,141 |
package imperial
package mixins
/**
* The mixin trait for creating a class which is instrumented with metrics.
*
* Use it as follows:
* {{{
* object Application {
* // The application wide metrics registry.
* val metricRegistry = new com.codahale.metrics.MetricRegistry()
* }
* trait Instrumented extends InstrumentedBuilder {
* val metricRegistry = Application.metricRegistry
* }
*
* class Example(db: Database) extends Instrumented {
* private[this] val loading = metrics.timer("loading")
*
* def loadStuff(): Seq[Row] = loading.time {
* db.fetchRows()
* }
* }
* }}}
*
* It is also possible to override the metric base name. For example:
* {{{
* class Example(db: Database) extends Instrumented {
* override lazy val metricBaseName = MetricName("Overridden.Base.Name")
* private[this] val loading = metrics.timer("loading")
*
* def loadStuff(): Seq[Row] = loading.time {
* db.fetchRows()
* }
* }
* }}}
*/
trait Instrumented {
/** The Armoury where created measures are registered. */
def armoury: Armoury
}
| thecoda/scala-imperial | src/main/scala/imperial/mixins/Instrumented.scala | Scala | apache-2.0 | 1,075 |
package org.openurp.edu.eams.web.dwr
import java.util.Date
import javax.servlet.http.HttpServletRequest
import org.apache.commons.collections.CollectionUtils
import org.beangle.commons.bean.comparators.PropertyComparator
import org.beangle.commons.collection.Collections
import org.beangle.data.model.dao.EntityDao
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.commons.lang.Strings
import org.beangle.security.blueprint.Profile
import org.beangle.security.blueprint.User
import org.beangle.security.blueprint.function.FuncResource
import org.beangle.security.blueprint.model.UserProfileBean
import org.beangle.security.blueprint.service.UserToken
import org.beangle.security.core.context.SecurityContext
import org.beangle.security.web.context.HttpSessionContextFilter
import org.openurp.base.Department
import org.openurp.edu.base.Direction
import org.openurp.edu.base.Major
import org.openurp.edu.base.Project
import org.openurp.code.edu.Education
import org.openurp.edu.base.code.StdType
import org.openurp.edu.eams.web.helper.RestrictionHelper
class ProjectMajorSelect {
var entityDao: EntityDao = _
var restrictionHelper: RestrictionHelper = _
def projects(): String = null
def educationAndDeparts(request: HttpServletRequest, projectId: java.lang.Integer, resourceName: String): Array[List[_]] = {
val project = entityDao.get(classOf[Project], projectId)
val user = getUser(request)
var resource: FuncResource = null
val profiles = entityDao.get(classOf[UserProfileBean], "id", request.getSession.getAttribute("security.profileId").asInstanceOf[java.lang.Long]).asInstanceOf[List[_]]
var fineResourceName = resourceName
if (Strings.isNotEmpty(resourceName)) {
if (fineResourceName.contains(".")) fineResourceName = Strings.substringBeforeLast(resourceName,
".")
if (fineResourceName.contains("!")) fineResourceName = Strings.substringBeforeLast(resourceName,
"!")
}
if (Strings.isNotEmpty(fineResourceName)) {
resource = restrictionHelper.getFuncPermissionService.getResource(fineResourceName)
}
var departs = Collections.newBuffer[Any]
if (null != resource) {
val departs2 = restrictionHelper.getProperties(user, profiles, "departs", resource).asInstanceOf[List[Department]]
for (d <- project.departments if departs2.contains(d)) departs.add(d)
} else {
departs = project.departments
}
val departInfos = Collections.newBuffer[Any]
for (depart <- departs) {
departInfos.add(Array(depart.id, depart.getName))
}
val educations1 = project.educations
var educations2: List[Education] = null
educations2 = if (null != resource) restrictionHelper.getProperties(user, profiles, "educations",
resource).asInstanceOf[List[Education]] else educations1
val educations = CollectionUtils.intersection(educations1, educations2).asInstanceOf[List[Education]]
Collections.sort(educations, new PropertyComparator("code"))
val educationInfos = Collections.newBuffer[Any]
for (education <- educations) {
educationInfos.add(Array(education.id, education.getName))
}
val stdTypes1 = project.getTypes
var stdTypes2: List[StdType] = null
stdTypes2 = if (null != resource) restrictionHelper.getProperties(user, profiles, "stdTypes", resource).asInstanceOf[List[StdType]] else stdTypes1
val stdTypes = CollectionUtils.intersection(stdTypes1, stdTypes2).asInstanceOf[List[StdType]]
Collections.sort(stdTypes, new PropertyComparator("code"))
val stdTypeInfos = Collections.newBuffer[Any]
for (stdType <- stdTypes) {
stdTypeInfos.add(Array(stdType.id, stdType.getName))
}
Array(educationInfos, departInfos, stdTypeInfos)
}
private def getUser(request: HttpServletRequest): User = {
val scontext = request.getSession.getAttribute(HttpSessionContextFilter.SECURITY_CONTEXT_KEY).asInstanceOf[SecurityContext]
if (scontext == null) {
}
val userToken = scontext.getAuthentication.getPrincipal.asInstanceOf[UserToken]
val user = entityDao.get(classOf[User], userToken.id)
user
}
def majors(projectId: java.lang.Integer, educationId: java.lang.Integer, departId: java.lang.Integer): List[_] = {
if (null == departId || projectId == null) {
return Collections.emptyList()
}
val now = new Date()
val query = OqlBuilder.from(classOf[Major], "s")
query.select("s.id, s.name, s.engName").where("s.effectiveAt<=:now", now)
.where("(s.invalidAt is null or s.invalidAt >= :now)", now)
.where("exists(from s.journals md where md.major=s and md.depart.id = :departId)", departId)
.where("s.project.id = :projectId", projectId)
if (null != educationId) {
query.where("exists(from s.educations edu where edu.id = :educationId)", educationId)
}
query.orderBy("s.name")
entityDao.search(query)
}
def directions(majorId: java.lang.Integer): List[Array[Any]] = {
if (null == majorId) {
return Collections.emptyList()
}
val now = new Date()
val query = OqlBuilder.from(classOf[Direction], "s")
query.select("s.id, s.name, s.engName").where("s.effectiveAt<=:now", now)
.where("(s.invalidAt is null or s.invalidAt >= :now)", now)
.where("s.major.id = :majorId", majorId)
query.orderBy("s.name")
entityDao.search(query)
}
def adminClasses(grade: String, directionId: java.lang.Integer, majorId: java.lang.Integer): List[Array[Any]] = {
if (null == majorId) {
return Collections.emptyList()
}
var hql = "select s.id, s.name " + "from org.openurp.edu.base.Adminclass as s" +
" where s.effectiveAt<=:now and (s.invalidAt is null or s.invalidAt>=:now) and s.major.id=:majorId"
if (null != directionId) {
hql += " and s.direction.id =:directionId"
}
if (null != grade) {
hql += " and s.grade = :grade"
}
hql += " order by s.name"
val builder = OqlBuilder.from(hql)
builder.param("majorId", majorId)
builder.param("now", new Date())
if (null != directionId) {
builder.param("directionId", directionId)
}
if (null != grade) {
builder.param("grade", grade)
}
entityDao.search(builder)
}
def setEntityDao(entityDao: EntityDao) {
this.entityDao = entityDao
}
def setRestrictionHelper(restrictionHelper: RestrictionHelper) {
this.restrictionHelper = restrictionHelper
}
}
| openurp/edu-eams-webapp | web/src/main/scala/org/openurp/edu/eams/web/dwr/ProjectMajorSelect.scala | Scala | gpl-3.0 | 6,432 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package lancet
package interpreter
import lancet.api._
class TestInterpreter3 extends BaseTestInterpreter3 {
val prefix = "test-out/test-interpreter-3"
def newCompiler = new BytecodeInterpreter_TIR_Opt with Compiler {
initialize()
debugBlockKeys = false
}
}
class TestInterpreter3LMS extends BaseTestInterpreter3 {
val prefix = "test-out/test-interpreter-3-LMS-"
def newCompiler = new BytecodeInterpreter_LMS_Opt with Compiler {
initialize()
debugBlockKeys = false
def compile[A:Manifest,B:Manifest](f: A => B): A=>B = compile0(f)
}
}
class TestInterpreter3LMSD extends BaseTestInterpreter3 {
val prefix = "test-out/test-interpreter-3-LMSD-"
def newCompiler = new BytecodeInterpreter_LMS_Opt with Compiler {
initialize()
override def defaultHandler = execMethodPostDom
debugBlockKeys = false
def compile[A:Manifest,B:Manifest](f: A => B): A=>B = compile0(f)
}
}
trait Compiler {
def compile[A:Manifest,B:Manifest](f: A => B): A=>B
}
trait BaseTestInterpreter3 extends FileDiffSuite {
def newCompiler: Compiler
val prefix: String
final class Bar {
var intField: Int = _
var objField: AnyRef = _
}
// test the following:
// - reads/writes on allocs and constants
// - conditionals and loops
// dynamically allocated object
def testA1 = withOutFileChecked(prefix+"A1") {
val it = newCompiler
val f = it.compile { (x:Int) =>
val b = new Bar
b.intField = 7
if (x > 0) {
b.intField = 9
}
b.intField
}
printcheck(f(7), 9)
}
def testA2 = withOutFileChecked(prefix+"A2") {
val it = newCompiler
val f = it.compile { (x:Int) =>
val b = new Bar
b.intField = 7
var y = x
while (y > 0) {
b.intField += 1
y -= 1
}
b.intField
}
printcheck(f(7), 14)
}
def testA3 = withOutFileChecked(prefix+"A3") {
val it = newCompiler
val f = it.compile { (x:Int) =>
val b = new Bar
b.intField = 7
if (x > 0) {
b.intField = 9
}
var y = 0
while (b.intField > 0) {
b.intField -= 1
y += 1
}
y
}
printcheck(f(7), 9)
}
// static object
def testB1 = withOutFileChecked(prefix+"B1") {
val it = newCompiler
val b = new Bar
b.intField = 7
val f = it.compile { (x:Int) =>
if (x > 0) {
b.intField = 9
}
b.intField
}
printcheck(f(7), 9)
}
def testB2 = withOutFileChecked(prefix+"B2") {
val it = newCompiler
val b = new Bar
b.intField = 7
val f = it.compile { (x:Int) =>
var y = x
while (y > 0) {
b.intField += 1
y -= 1
}
b.intField
}
printcheck(f(7),14)
}
def testB3 = withOutFileChecked(prefix+"B3") {
val it = newCompiler
val b = new Bar
b.intField = 7
val f = it.compile { (x:Int) =>
if (x > 0) {
b.intField = 9
}
var y = 0
while (b.intField > 0) {
b.intField -= 1
y += 1
}
y
}
printcheck(f(7),9)
}
/*
test that needs multiple speculative iterations:
var x = 0
var y = 0
while (x < 10) {
if (x > 0) {
y += 1
}
x += 1
}
iteration 0:
before: x = 0, y = 0
after : x = 1, y = 0
generalize x
iteration 1:
before: x = ?, y = 0
after : x = ?, y = ?
generalize y
(do we need iteration 2 or can we just take result as gen?)
*/
} | TiarkRompf/lancet | src/test/scala/lancet/interpreter/test3.scala | Scala | agpl-3.0 | 4,555 |
package fr.simply
import org.simpleframework.http.core.Container
import org.simpleframework.http.Response
import org.simpleframework.http.Request
import org.simpleframework.transport.Server
import java.nio.charset.Charset
class SimplyScala(defaultResponse: StaticServerResponse, routes: List[ServerRoute]) extends Container {
def handle(request: Request, response: Response) {
val time = System.currentTimeMillis
response.setValue("Server", "SimplyScalaServer/1.0 (Simple 4.0)")
response.setDate("Date", time)
response.setDate("Last-Modified", time)
if(requestMatchWithRoute(request, response, routes)) println("one route match with request")
else defaultReponse(response, request)
response.getPrintStream.close()
response.close()
}
private def requestMatchWithRoute(request: Request, response: Response, routes: List[ServerRoute]): Boolean = {
routes.exists {
route =>
route.response match {
case staticResponse: StaticServerResponse => makeStaticResponse(request, response, route)
case dynamicResponse: DynamicServerResponse => makeDynamicResponse(request, response, route)
}
}
}
private def defaultReponse(response: Response, request: Request) {
println(s"defaultResponse from request : ${request.getMethod} - ${request.getAddress}")
response.setValue("Content-Type", defaultResponse.contentType.toString)
response.setCode(defaultResponse.code)
response.getPrintStream.println(defaultResponse.body)
/*response.getPrintStream.println("names : " + request.getNames)
response.getPrintStream.println("attributes : " + request.getAttributes)
response.getPrintStream.println("parameters : " + request.getParameter("param1"))
response.getPrintStream.println("request path : " + request.getPath.getPath)
response.getPrintStream.println("request verb : " + request.getMethod + "\\n")*/
/*response.getPrintStream.println("verb : " + routes.head.restVerb)
response.getPrintStream.println("path : " + routes.head.path)*/
}
private def makeStaticResponse(request: Request, response: Response, route: ServerRoute): Boolean = {
if(testRoute(request, route)) {
val staticResponse = route.response.asInstanceOf[StaticServerResponse]
makeResponse(response, staticResponse)
true
} else false
}
private def makeDynamicResponse(request: Request, response: Response, route: ServerRoute): Boolean = {
if(testRoute(request, route)) {
val dynamicResponse = route.response.asInstanceOf[DynamicServerResponse].response(request)
makeResponse(response, dynamicResponse)
true
} else false
}
private def testRoute(request: Request, route: ServerRoute): Boolean = {
request.getMethod.equalsIgnoreCase(route.restVerb.toString) &&
testPath(request, route) &&
testParams(request, route.params)
}
private def makeResponse(response: Response, serverResponse: StaticServerResponse) {
response.setContentType(serverResponse.contentType.toString)
response.setCode(serverResponse.code)
serverResponse.headers.foreach { case (k,v) => response.setValue(k,v) }
response.getPrintStream.write(serverResponse.body.getBytes(Charset.forName("UTF-8")))
}
private def testPath(request: Request, route: ServerRoute): Boolean = {
val routePath = route.path
if (routePath.startsWith("*") || routePath.endsWith("*") ) request.getPath.getPath contains routePath.replaceAll("\\\\*", "")
else request.getPath.getPath == route.path
}
private def testParams(request: Request, params: Map[String,String]): Boolean =
params.forall { case (key,value) => request.getParameter(key) != null && request.getParameter(key) == value }
} | SimplyScala/simplyscala-server | src/main/scala/fr/simply/SimplyScala.scala | Scala | gpl-3.0 | 3,995 |
/*
* @author Flavio Keller
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.sna.parser
import scala.io.Codec.string2codec
import scala.io.Source
import com.signalcollect.DefaultEdge
import com.signalcollect.GraphBuilder
import com.signalcollect.Vertex
import com.signalcollect.sna.constants.SNAClassNames
import com.signalcollect.sna.metrics.DegreeEdge
import com.signalcollect.sna.metrics.DegreeVertex
import com.signalcollect.sna.metrics.LabelPropagationEdge
import com.signalcollect.sna.metrics.LabelPropagationVertex
import com.signalcollect.sna.metrics.LocalClusterCoefficientEdge
import com.signalcollect.sna.metrics.LocalClusterCoefficientVertex
import com.signalcollect.sna.metrics.PageRankEdge
import com.signalcollect.sna.metrics.PageRankVertex
import com.signalcollect.sna.metrics.PathCollectorEdge
import com.signalcollect.sna.metrics.PathCollectorVertex
import com.signalcollect.sna.metrics.TriadCensusEdge
import com.signalcollect.sna.metrics.TriadCensusVertex
import com.signalcollect.Graph
/**
* Makes use of the {@link com.signalcollect.sna.parser.GMLParser}
* in order to parse files to a Signal/Collect graph
*/
object ParserImplementor {
/**
* creates a graph out of a gml-File by using the {@link com.signalcollect.sna.parser.GMLParser}
* @param fileName: a path to a File that should be parsed
* @param className: determines what kind of graph should be created
* @param signalSteps: only used for Label Propagation, indicates how many signal and collect steps should be done
* @return The built graph
*/
def getGraph(fileName: String, className: SNAClassNames, signalSteps: Option[Integer]): com.signalcollect.Graph[Any, Any] = {
val parser = new GmlParser
val graph = GraphBuilder.build
try {
val parsedGraphs: List[Graph] = parser.parse(Source.fromFile(fileName)("ISO8859_1")) //Kann auch ein File-Objekt sein
parsedGraphs foreach {
case ug: UndirectedGraph =>
ug.nodes.foreach({ n: Node =>
className match {
case SNAClassNames.LABELPROPAGATION => graph.addVertex(new LabelPropagationVertex(n.id, n.id.toString, signalSteps.getOrElse(0).asInstanceOf[Int]))
case _ => graph.addVertex(createVertex(n, className))
}
})
ug.edges.foreach({ e: Edge =>
graph.addEdge(e.source, createEdge(e.target, className))
})
ug.edges.foreach({ e: Edge =>
graph.addEdge(e.target, createEdge(e.source, className))
})
case dg: DirectedGraph =>
dg.nodes.foreach({ n: Node =>
className match {
case SNAClassNames.LABELPROPAGATION => graph.addVertex(new LabelPropagationVertex(n.id, n.id.toString, signalSteps.getOrElse(0).asInstanceOf[Int]))
case _ => graph.addVertex(createVertex(n, className))
}
})
dg.edges.foreach({ e: Edge =>
graph.addEdge(e.source, createEdge(e.target, className))
})
}
graph
} catch {
case p: ParseException => {
graph.shutdown
throw new IllegalArgumentException("Error when reading graph file " + fileName + ": " + p.getMessage())
}
case t: Throwable => {
graph.shutdown
throw new IllegalArgumentException("Error when reading graph file " + fileName + ": " + t.getMessage())
}
}
}
/**
* Creates a vertex object for a graph
* @param node: the node object of the parsed graph
* @param vertexClass: determines what kind of vertex should be created
* @return the vertex object
*/
def createVertex(node: Node, vertexClass: SNAClassNames): Vertex[Any, _, Any, Any] = {
vertexClass match {
case SNAClassNames.DEGREE => new DegreeVertex(node.id)
case SNAClassNames.PAGERANK => new PageRankVertex(node.id)
case SNAClassNames.PATH => new PathCollectorVertex(node.id)
case SNAClassNames.BETWEENNESS => new PathCollectorVertex(node.id)
case SNAClassNames.CLOSENESS => new PathCollectorVertex(node.id)
case SNAClassNames.LOCALCLUSTERCOEFFICIENT => new LocalClusterCoefficientVertex(node.id)
case SNAClassNames.TRIADCENSUS => new TriadCensusVertex(node.id)
//case LABELPROPAGATION omitted (is treated above)
}
}
/**
* Creates an edge object for a graph
* @param targetId: the id of the target vertex
* @param edgeClass: determines what kind of edge should be created
* @return the edge object
*/
def createEdge(targetId: Int, edgeClass: SNAClassNames): DefaultEdge[_] = {
edgeClass match {
case SNAClassNames.DEGREE => new DegreeEdge(targetId)
case SNAClassNames.PAGERANK => new PageRankEdge(targetId)
case SNAClassNames.PATH => new PathCollectorEdge(targetId)
case SNAClassNames.BETWEENNESS => new PathCollectorEdge(targetId)
case SNAClassNames.CLOSENESS => new PathCollectorEdge(targetId)
case SNAClassNames.LOCALCLUSTERCOEFFICIENT => new LocalClusterCoefficientEdge(targetId)
case SNAClassNames.TRIADCENSUS => new TriadCensusEdge(targetId)
case SNAClassNames.LABELPROPAGATION => new LabelPropagationEdge(targetId)
}
}
} | fkzrh/signal-collect-sna | src/main/scala/com/signalcollect/sna/parser/ParserImplementor.scala | Scala | apache-2.0 | 5,777 |
package com.twitter.server.lint
import com.twitter.finagle.stats.{
DelegatingStatsReceiver,
LoadedStatsReceiver,
StatsReceiver,
StatsReceiverWithCumulativeGauges
}
import com.twitter.util.lint.Rule
object TooManyCumulativeGaugesRules {
def apply(): Seq[Rule] = {
apply(DelegatingStatsReceiver.all(LoadedStatsReceiver))
}
/** Exposed for testing */
private[lint] def apply(statsReceivers: Seq[StatsReceiver]): Seq[Rule] =
statsReceivers.collect {
case srwg: StatsReceiverWithCumulativeGauges =>
srwg.largeGaugeLinterRule
}
}
| twitter/twitter-server | server/src/main/scala/com/twitter/server/lint/TooManyCumulativeGaugesRules.scala | Scala | apache-2.0 | 571 |
/* The Computer Language Benchmarks Game
http://shootout.alioth.debian.org/
contributed by Isaac Gouy
modified by Meiko Rachimow
updated for 2.8 by Rex Kerr
*/
import math._
object nbody {
def main(args: Array[String]) = {
var n = args(0).toInt
printf("%.9f\\n", JovianSystem.energy )
while (n > 0) { JovianSystem.advance(0.01); n -= 1 }
printf("%.9f\\n", JovianSystem.energy )
}
}
abstract class NBodySystem {
def energy() = {
var e = 0.0
for (i <- 0 until bodies.length) {
e += 0.5 * bodies(i).mass * bodies(i).speedSq
for (j <- i+1 until bodies.length) {
val dx = bodies(i).x - bodies(j).x
val dy = bodies(i).y - bodies(j).y
val dz = bodies(i).z - bodies(j).z
val distance = sqrt(dx*dx + dy*dy + dz*dz)
e -= (bodies(i).mass * bodies(j).mass) / distance
}
}
e
}
def advance(dt: Double) = {
var i = 0
while (i < bodies.length){
var j = i+1
while (j < bodies.length){
val dx = bodies(i).x - bodies(j).x
val dy = bodies(i).y - bodies(j).y
val dz = bodies(i).z - bodies(j).z
val distance = sqrt(dx*dx + dy*dy + dz*dz)
val mag = dt / (distance * distance * distance)
bodies(i).advance(dx,dy,dz,-bodies(j).mass*mag)
bodies(j).advance(dx,dy,dz,bodies(i).mass*mag)
j += 1
}
i += 1
}
i = 0
while (i < bodies.length){
bodies(i).move(dt)
i += 1
}
}
protected val bodies: Array[Body]
class Body(){
var x,y,z = 0.0
var vx,vy,vz = 0.0
var mass = 0.0
def speedSq = vx*vx + vy*vy + vz*vz
def move(dt: Double) {
x += dt*vx
y += dt*vy
z += dt*vz
}
def advance(dx: Double, dy: Double, dz: Double, delta: Double) {
vx += dx*delta
vy += dy*delta
vz += dz*delta
}
}
}
object JovianSystem extends NBodySystem {
protected val bodies = initialValues
private def initialValues() = {
val SOLAR_MASS = 4 * Pi * Pi
val DAYS_PER_YEAR = 365.24
val sun = new Body
sun.mass = SOLAR_MASS
val jupiter = new Body
jupiter.x = 4.84143144246472090e+00
jupiter.y = -1.16032004402742839e+00
jupiter.z = -1.03622044471123109e-01
jupiter.vx = 1.66007664274403694e-03 * DAYS_PER_YEAR
jupiter.vy = 7.69901118419740425e-03 * DAYS_PER_YEAR
jupiter.vz = -6.90460016972063023e-05 * DAYS_PER_YEAR
jupiter.mass = 9.54791938424326609e-04 * SOLAR_MASS
val saturn = new Body
saturn.x = 8.34336671824457987e+00
saturn.y = 4.12479856412430479e+00
saturn.z = -4.03523417114321381e-01
saturn.vx = -2.76742510726862411e-03 * DAYS_PER_YEAR
saturn.vy = 4.99852801234917238e-03 * DAYS_PER_YEAR
saturn.vz = 2.30417297573763929e-05 * DAYS_PER_YEAR
saturn.mass = 2.85885980666130812e-04 * SOLAR_MASS
val uranus = new Body
uranus.x = 1.28943695621391310e+01
uranus.y = -1.51111514016986312e+01
uranus.z = -2.23307578892655734e-01
uranus.vx = 2.96460137564761618e-03 * DAYS_PER_YEAR
uranus.vy = 2.37847173959480950e-03 * DAYS_PER_YEAR
uranus.vz = -2.96589568540237556e-05 * DAYS_PER_YEAR
uranus.mass = 4.36624404335156298e-05 * SOLAR_MASS
val neptune = new Body
neptune.x = 1.53796971148509165e+01
neptune.y = -2.59193146099879641e+01
neptune.z = 1.79258772950371181e-01
neptune.vx = 2.68067772490389322e-03 * DAYS_PER_YEAR
neptune.vy = 1.62824170038242295e-03 * DAYS_PER_YEAR
neptune.vz = -9.51592254519715870e-05 * DAYS_PER_YEAR
neptune.mass = 5.15138902046611451e-05 * SOLAR_MASS
val initialValues = Array ( sun, jupiter, saturn, uranus, neptune )
var px = 0.0; var py = 0.0; var pz = 0.0;
for (b <- initialValues){
px += (b.vx * b.mass)
py += (b.vy * b.mass)
pz += (b.vz * b.mass)
}
sun.vx = -px / SOLAR_MASS
sun.vy = -py / SOLAR_MASS
sun.vz = -pz / SOLAR_MASS
initialValues
}
}
| kragen/shootout | bench/nbody/nbody.scala | Scala | bsd-3-clause | 4,060 |
class C
object Holder {
implicit object O extends C
}
def f(implicit p: C) = {}
import Holder._
println(/* offset: 62 */ f)
| ilinum/intellij-scala | testdata/resolve2/function/implicit/ImportObjectImplicit.scala | Scala | apache-2.0 | 131 |
package io.finch
import java.util.UUID
import java.util.concurrent.TimeUnit
import cats.data.NonEmptyList
import cats.effect.{IO, Resource}
import cats.laws._
import cats.laws.discipline._
import cats.laws.discipline.AlternativeTests
import cats.laws.discipline.SemigroupalTests.Isomorphisms
import com.twitter.finagle.http.{Cookie, Method, Request}
import com.twitter.io.Buf
import io.finch.data.Foo
import java.io.{ByteArrayInputStream, InputStream}
import scala.concurrent.duration.Duration
import shapeless._
class EndpointSpec extends FinchSpec {
type EndpointIO[A] = Endpoint[IO, A]
implicit val isomorphisms: Isomorphisms[EndpointIO] =
Isomorphisms.invariant[EndpointIO](Endpoint.endpointInstances)
checkAll("Endpoint[String]", AlternativeTests[EndpointIO].applicative[String, String, String])
checkAll("ExtractPath[String]", ExtractPathLaws[IO, String].all)
checkAll("ExtractPath[Int]", ExtractPathLaws[IO, Int].all)
checkAll("ExtractPath[Long]", ExtractPathLaws[IO, Long].all)
checkAll("ExtractPath[UUID]", ExtractPathLaws[IO, UUID].all)
checkAll("ExtractPath[Boolean]", ExtractPathLaws[IO, Boolean].all)
behavior of "Endpoint"
private[this] val emptyRequest = Request()
it should "support very basic map" in {
check { i: Input =>
path[String].map(_ * 2).apply(i).awaitValueUnsafe() === i.route.headOption.map(_ * 2)
}
}
it should "correctly run mapF" in {
check { e: Endpoint[IO, String] =>
val fn: String => Int = _.length
e.transformF(_.map(fn)) <-> e.map(fn)
}
}
it should "support transform" in {
check { i: Input =>
val fn = (fs: IO[Output[String]]) => fs.map(_.map(_ * 2))
path[String].transform(fn).apply(i).awaitValueUnsafe() === i.route.headOption.map(_ * 2)
}
}
it should "propagate the default (Ok) output" in {
check { i: Input =>
path[String].apply(i).awaitOutputUnsafe() === i.route.headOption.map(s => Ok(s))
}
}
it should "propagate the default (Ok) output through its map'd/mapAsync'd version" in {
check { i: Input =>
val expected = i.route.headOption.map(s => Ok(s.length))
path[String].map(s => s.length).apply(i).awaitOutputUnsafe() === expected &&
path[String].mapAsync(s => IO.pure(s.length)).apply(i).awaitOutputUnsafe() === expected
}
}
it should "propagate the output through mapOutputAsync and /" in {
def expected(i: Int): Output[Int] =
Created(i)
.withHeader("A" -> "B")
.withCookie(new Cookie("C", "D"))
check { i: Input =>
path[String].mapOutputAsync(s => IO.pure(expected(s.length))).apply(i).awaitOutputUnsafe() ===
i.route.headOption.map(s => expected(s.length))
}
check { i: Input =>
val e = i.route.dropRight(1)
.map(s => path(s))
.foldLeft[Endpoint[IO, HNil]](zero)((acc, ee) => acc :: ee)
val v = (e :: path[String]).mapOutputAsync(s => IO.pure(expected(s.length))).apply(i)
v.awaitOutputUnsafe() === i.route.lastOption.map(s => expected(s.length))
}
}
it should "match one patch segment" in {
check { i: Input =>
val v = i.route.headOption
.flatMap(s => path(s).apply(i).remainder)
v.isEmpty|| v === Some(i.withRoute(i.route.tail))
}
}
it should "always match the entire input with *" in {
check { i: Input =>
pathAny.apply(i).remainder === Some(i.copy(route = Nil))
}
}
it should "match empty path" in {
check { i: Input =>
(i.route.isEmpty && pathEmpty.apply(i).isMatched) ||
(!i.route.isEmpty && !pathEmpty.apply(i).isMatched)
}
}
it should "match the HTTP method" in {
def matchMethod(
m: Method,
f: Endpoint[IO, HNil] => Endpoint[IO, HNil]): Input => Boolean = { i: Input =>
val v = f(zero)(i)
(i.request.method === m && v.remainder === Some(i)) ||
(i.request.method != m && v.remainder === None)
}
check(matchMethod(Method.Get, get))
check(matchMethod(Method.Post, post))
check(matchMethod(Method.Trace, trace))
check(matchMethod(Method.Put, put))
check(matchMethod(Method.Patch, patch))
check(matchMethod(Method.Head, head))
check(matchMethod(Method.Options, options))
check(matchMethod(Method.Delete, delete))
}
it should "always match the identity instance" in {
check { i: Input =>
zero.apply(i).remainder === Some(i)
}
}
it should "match the entire input" in {
check { i: Input =>
val e = i.route.map(s => path(s)).foldLeft[Endpoint[IO, HNil]](zero)((acc, e) => acc :: e)
e(i).remainder === Some(i.copy(route = Nil))
}
}
it should "not match the entire input if one of the underlying endpoints is failed" in {
check { (i: Input, s: String) =>
(pathAny :: s).apply(i).remainder === None
}
}
it should "match the input if one of the endpoints succeed" in {
def matchOneOfTwo(f: String => Endpoint[IO, HNil]): Input => Boolean = { i: Input =>
val v = i.route.headOption.map(f).flatMap(e => e(i).remainder)
v.isEmpty || v === Some(i.withRoute(i.route.tail))
}
check(matchOneOfTwo(s => path(s).coproduct(path(s.reverse))))
check(matchOneOfTwo(s => path(s.reverse).coproduct(path(s))))
}
it should "have the correct string representation" in {
def standaloneMatcher[A]: A => Boolean = { a: A =>
path(a.toString).toString == a.toString
}
check(standaloneMatcher[String])
check(standaloneMatcher[Int])
check(standaloneMatcher[Boolean])
def methodMatcher(
m: Method,
f: Endpoint[IO, HNil] => Endpoint[IO, HNil]
): String => Boolean = { s: String => f(s).toString === m.toString.toUpperCase + " /" + s }
check(methodMatcher(Method.Get, get))
check(methodMatcher(Method.Post, post))
check(methodMatcher(Method.Trace, trace))
check(methodMatcher(Method.Put, put))
check(methodMatcher(Method.Patch, patch))
check(methodMatcher(Method.Head, head))
check(methodMatcher(Method.Options, options))
check(methodMatcher(Method.Delete, delete))
check { (s: String, i: Int) => path(s).map(_ => i).toString === s }
check { (s: String, t: String) => (path(s) :+: path(t)).toString === s"($s :+: $t)" }
check { (s: String, t: String) => (path(s) :: path(t)).toString === s"$s :: $t" }
check { s: String => path(s).product[String](pathAny.map(_ => "foo")).toString === s }
check { (s: String, t: String) => path(s).mapAsync(_ => IO.pure(t)).toString === s }
pathEmpty.toString shouldBe ""
pathAny.toString shouldBe "*"
path[Int].toString shouldBe ":int"
path[String].toString shouldBe ":string"
path[Long].toString shouldBe ":long"
path[UUID].toString shouldBe ":uuid"
path[Boolean].toString shouldBe ":boolean"
paths[Int].toString shouldBe ":int*"
paths[String].toString shouldBe ":string*"
paths[Long].toString shouldBe ":long*"
paths[UUID].toString shouldBe ":uuid*"
paths[Boolean].toString shouldBe ":boolean*"
(path[Int] :: path[String]).toString shouldBe ":int :: :string"
(path[Boolean] :+: path[Long]).toString shouldBe "(:boolean :+: :long)"
}
it should "always respond with the same output if it's a constant Endpoint" in {
check { s: String =>
const(s).apply(Input.get("/")).awaitValueUnsafe() === Some(s) &&
lift(s).apply(Input.get("/")).awaitValueUnsafe() === Some(s) &&
liftAsync(IO.pure(s)).apply(Input.get("/")).awaitValueUnsafe() === Some(s)
}
check { o: Output[String] =>
liftOutput(o).apply(Input.get("/")).awaitOutputUnsafe() === Some(o) &&
liftOutputAsync(IO.pure(o)).apply(Input.get("/")).awaitOutputUnsafe() === Some(o)
}
}
it should "support the as[A] method for HList" in {
case class Foo(s: String, i: Int, b: Boolean)
val foo = (path[String] :: path[Int] :: path[Boolean]).as[Foo]
check { (s: String, i: Int, b: Boolean) =>
foo(Input(emptyRequest, List(s, i.toString, b.toString))).awaitValueUnsafe() ===
Some(Foo(s, i, b))
}
}
it should "rescue the exception occurred in it" in {
check { (i: Input, s: String, e: Exception) =>
val result = liftAsync[String](IO.raiseError(e)).handle {
case _ => Created(s)
}.apply(i).awaitOutput()
result === Some(Right(Created(s)))
}
}
it should "re-raise the exception if it wasn't handled" in {
case object CustomException extends Exception
check { (i: Input, s: String, e: Exception) =>
val result = liftAsync[String](IO.raiseError(e)).handle {
case CustomException => Created(s)
}.apply(i).awaitOutput()
result === Some(Left(e))
}
}
it should "not split comma separated param values" in {
val i = Input.get("/index", "foo" -> "a,b")
val e = params("foo")
e(i).awaitValueUnsafe() shouldBe Some(Seq("a,b"))
}
it should "throw NotPresent if an item is not found" in {
val i = Input.get("/")
Seq(
param("foo"), header("foo"), cookie("foo").map(_.value),
multipartFileUpload("foo").map(_.fileName), paramsNel("foo").map(_.toList.mkString),
paramsNel("foor").map(_.toList.mkString), binaryBody.map(new String(_)), stringBody
).foreach { ii => ii(i).awaitValue() shouldBe Some(Left(Error.NotPresent(ii.item))) }
}
it should "maps lazily to values" in {
val i = Input(emptyRequest, List.empty)
var c = 0
val e = get(pathAny) { c = c + 1; Ok(c) }
e(i).awaitValueUnsafe() shouldBe Some(1)
e(i).awaitValueUnsafe() shouldBe Some(2)
}
it should "not evaluate Futures until matched" in {
val i = Input(emptyRequest, List("a", "10"))
var flag = false
val endpointWithFailedFuture = "a".mapAsync { nil =>
IO { flag = true; nil }
}
val e = ("a" :: "10") :+: endpointWithFailedFuture
e(i).isMatched shouldBe true
flag shouldBe false
}
it should "be greedy in terms of | compositor" in {
val a = Input(emptyRequest, List("a", "10"))
val b = Input(emptyRequest, List("a"))
val e1 = "a".coproduct("b").coproduct("a" :: "10")
val e2 = ("a" :: "10").coproduct("b").coproduct("a")
e1(a).remainder shouldBe Some(a.withRoute(a.route.drop(2)))
e1(b).remainder shouldBe Some(b.withRoute(b.route.drop(2)))
e2(a).remainder shouldBe Some(a.withRoute(a.route.drop(2)))
e2(b).remainder shouldBe Some(b.withRoute(b.route.drop(2)))
}
it should "accumulate errors on its product" in {
check { (a: Either[Error, Errors], b: Either[Error, Errors]) =>
val aa = a.fold[Exception](identity, identity)
val bb = b.fold[Exception](identity, identity)
val left = liftAsync[Unit](IO.raiseError(aa))
val right = liftAsync[Unit](IO.raiseError(bb))
val lr = left.product(right)
val rl = right.product(left)
val all =
a.fold[Set[Error]](e => Set(e), es => es.errors.toList.toSet) ++
b.fold[Set[Error]](e => Set(e), es => es.errors.toList.toSet)
val Some(Left(first)) = lr(Input.get("/")).awaitValue()
val Some(Left(second)) = rl(Input.get("/")).awaitValue()
first.asInstanceOf[Errors].errors.toList.toSet === all &&
second.asInstanceOf[Errors].errors.toList.toSet === all
}
}
it should "fail-fast with the first non-error observed" in {
check { (a: Error, b: Errors, e: Exception) =>
val aa = liftAsync[Unit](IO.raiseError(a))
val bb = liftAsync[Unit](IO.raiseError(b))
val ee = liftAsync[Unit](IO.raiseError(e))
val aaee = aa.product(ee)
val eeaa = ee.product(aa)
val bbee = bb.product(ee)
val eebb = ee.product(bb)
aaee(Input.get("/")).awaitValue() === Some(Left(e)) &&
eeaa(Input.get("/")).awaitValue() === Some(Left(e)) &&
bbee(Input.get("/")).awaitValue() === Some(Left(e)) &&
eebb(Input.get("/")).awaitValue() === Some(Left(e))
}
}
it should "accumulate EndpointResult.NotMatched in its | compositor" in {
val a = get("foo")
val b = post("foo")
val ab = a.coproduct(b)
ab(Input.get("/foo")).isMatched shouldBe true
ab(Input.post("/foo")).isMatched shouldBe true
val put = ab(Input.put("/foo"))
put.isMatched shouldBe false
put.asInstanceOf[EndpointResult.NotMatched.MethodNotAllowed[IO]].allowed.toSet shouldBe {
Set(Method.Post, Method.Get)
}
}
it should "support the as[A] method on Endpoint[Seq[String]]" in {
val foos = params[Foo]("testEndpoint")
foos(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe() shouldBe Some(Seq(Foo("a")))
}
it should "collect errors on Endpoint[Seq[String]] failure" in {
val endpoint = params[UUID]("testEndpoint")
an[Errors] shouldBe thrownBy (
endpoint(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe()
)
}
it should "support the as[A] method on Endpoint[NonEmptyList[A]]" in {
val foos = paramsNel[Foo]("testEndpoint")
foos(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe() shouldBe
Some(NonEmptyList.of(Foo("a")))
}
it should "collect errors on Endpoint[NonEmptyList[String]] failure" in {
val endpoint = paramsNel[UUID]("testEndpoint")
an[Errors] shouldBe thrownBy (
endpoint(Input.get("/index", "testEndpoint" -> "a")).awaitValueUnsafe(Duration(10, TimeUnit.SECONDS))
)
}
it should "fromInputStream" in {
val bytes = Array[Byte](1, 2, 3, 4, 5)
val bis = Resource.fromAutoCloseable[IO, InputStream](IO.delay(new ByteArrayInputStream(bytes)))
val is = fromInputStream(bis)
is(Input.get("/")).awaitValueUnsafe() shouldBe Some(Buf.ByteArray.Owned(bytes))
}
it should "classpathAsset" in {
val r = classpathAsset("/test.txt")
r(Input.get("/foo")).awaitOutputUnsafe() shouldBe None
r(Input.post("/")).awaitOutputUnsafe() shouldBe None
r(Input.get("/test.txt")).awaitValueUnsafe() shouldBe Some(Buf.Utf8("foo bar baz\\n"))
}
}
| ImLiar/finch | core/src/test/scala/io/finch/EndpointSpec.scala | Scala | apache-2.0 | 13,917 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.