code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.raster.data
import org.locationtech.geomesa.utils.geohash.BoundingBox
/**
* This class contains parameters needed to create query to
* retrieve raster chunks from Accumulo table.
*
* @param bbox Bounding box defines geometric area of desired raster
* @param resolution Desired resolution of grid
*/
case class RasterQuery(bbox: BoundingBox, resolution: Double)
// TODO: WCS: include a list of bands as an optional parameter
// ticket is GEOMESA-559
|
ddseapy/geomesa
|
geomesa-accumulo/geomesa-accumulo-raster/src/main/scala/org/locationtech/geomesa/raster/data/RasterQuery.scala
|
Scala
|
apache-2.0
| 956
|
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
object ExpressionOrder1 {
def test1 = {
var x = 0
def bar(y: Int) = {
def fun(z: Int) = 1 * x * (y + z)
fun(3)
}
bar(2) == 0
}.holds
}
|
regb/leon
|
src/test/resources/regression/verification/xlang/valid/ExpressionOrder1.scala
|
Scala
|
gpl-3.0
| 234
|
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////
package com.adobe
// Akka
import akka.actor.{Actor,ActorRefFactory}
import akka.pattern.ask
import akka.util.Timeout
// Spray
import spray.http._
import spray.routing.HttpService
import MediaTypes._
// Scala
import scala.concurrent._
import scala.concurrent.duration._
import ExecutionContext.Implicits.global
import queries._
// Actor accepting Http requests for the Scala collector.
class QueryServiceActor extends Actor with HttpService {
implicit val timeout: Timeout = 1000.second // For the actor 'asks'
import context.dispatcher
def actorRefFactory = context
// Use QueryService so the same route can be accessed differently
// in the testing framework.
private val collectorService = new QueryService(context)
// Message loop for the Spray service.
def receive = handleTimeouts orElse runRoute(collectorService.collectorRoute)
def handleTimeouts: Receive = {
case Timedout(_) => sender !
HttpResponse(status = 408, entity = "Error: Page timed out.")
}
}
class QueryService(context: ActorRefFactory) extends HttpService {
implicit def actorRefFactory = context
val collectorRoute = {
get {
pathSingleSlash {
redirect("/index", StatusCodes.Found)
}~
path("index") {
respondWithMediaType(`text/html`) {
complete{
val unzippedMap = QueryMeta.info.unzip
html.index(unzippedMap._1.toSeq).toString
}
}
}~
path("favicon.ico") {
complete(StatusCodes.NotFound)
}~
path(Rest) { path =>
getFromResource("bootstrap/%s" format path)
}~
path(Rest) { path =>
getFromResource("bootstrap-daterangepicker/%s" format path)
}~
path(Rest) { path =>
getFromResource("terminus/%s" format path)
}~
path("query") {
parameters('name.as[String], 'profile ? false, 'cache ? false,
'start_ymd.as[String], 'finish_ymd.as[String],
'targetPartitionSize ? 100000L) {
(name, profile, cache, start_ymd, finish_ymd, targetPartitionSize) =>
respondWithMediaType(if (profile) `text/plain` else `text/html`) {
complete(
future {
QueryHandler.handle(name, profile, cache,
start_ymd, finish_ymd, targetPartitionSize)
}
)
}
}
}~
path("adhoc") {
parameters('start_ymd.as[String], 'finish_ymd.as[String]) {
(start_ymd, finish_ymd) =>
respondWithMediaType(`text/html`) {
complete(
future {
QueryHandler.generateAdhocPage(start_ymd, finish_ymd)
}
)
}
}
}~
path("adhoc-sql") {
parameters('sqlQuery.as[String], 'numResults.as[Int],
'start_ymd.as[String], 'finish_ymd.as[String]) {
(sqlQuery, numResults, start_ymd, finish_ymd) =>
respondWithMediaType(`text/plain`) {
complete(
future {
QueryHandler.handleSQL(sqlQuery, numResults,
start_ymd, finish_ymd)
}
)
}
}
}~
path("stats") {
complete{
QueryHandler.getStats()
}
}~
path("loadTime") {
complete{
QueryHandler.loadTime()
}
}~
path("kill") {
complete{
QueryHandler.stopSparkContext()
System.exit(0)
""
}
}~
path("restartSparkContext") {
parameters('memory ? "1g", 'cores ? 4) { (memory, cores) =>
respondWithMediaType(`text/plain`) {
complete(
future {
QueryHandler.restartSparkContext(memory,cores)
}
)
}
}
}
}~
complete(HttpResponse(status = 404, entity = "404 Not found"))
}
}
|
alexanderfield/spindle
|
src/main/scala/QueryServiceActor.scala
|
Scala
|
apache-2.0
| 4,699
|
object Test {
implicit class Foo
implicit def Foo = new Foo
}
|
som-snytt/dotty
|
tests/untried/neg/t5728.scala
|
Scala
|
apache-2.0
| 69
|
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.hibernate
import org.beangle.commons.lang.ClassLoaders
import org.beangle.data.hibernate.model.{ExtendRole, Role, User}
import org.beangle.data.hibernate.spring.{LocalSessionFactoryBean, SessionUtils}
import org.beangle.data.model.meta.SingularProperty
import org.beangle.data.model.util.ConvertPopulator
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpec
import org.springframework.core.io.UrlResource
class HibernateConfigTest extends AnyFunSpec with Matchers {
val ormLocations = ClassLoaders.getResource("META-INF/beangle/orm.xml").toList
val resouces = ormLocations map (url => new UrlResource(url.toURI))
val ds = Tests.buildTestH2()
val builder = new LocalSessionFactoryBean(ds)
builder.ormLocations = resouces.toArray
builder.properties.put("hibernate.show_sql", "true")
builder.properties.put("hibernate.hbm2ddl.auto", "create")
builder.properties.put("hibernate.cache.use_second_level_cache","true")
builder.properties.put("hibernate.javax.cache.provider","org.ehcache.jsr107.EhcacheCachingProvider")
builder.properties.put("hibernate.cache.use_query_cache", "true")
builder.properties.put("hibernate.ejb.metamodel.population", "disabled")
builder.init()
val sf = builder.result
val entityDao = new HibernateEntityDao(sf)
val domain = entityDao.domain
SessionUtils.enableBinding(sf)
SessionUtils.openSession(sf)
val roleMetaOption = domain.getEntity(classOf[Role])
val userMetaOption = domain.getEntity(classOf[User])
it("Should support option and collection") {
UserCrudTest.testCrud(sf)
}
it("Role's parent is entityType") {
assert(roleMetaOption.isDefined)
val parentMeta = roleMetaOption.get.getProperty("parent")
assert(parentMeta.isDefined)
assert(parentMeta.get.isInstanceOf[SingularProperty])
assert(parentMeta.get.asInstanceOf[SingularProperty].propertyType.clazz == classOf[ExtendRole])
}
it("populate to option entity") {
val populator = new ConvertPopulator()
val roleMeta = roleMetaOption.get
val role = new Role();
populator.populate(role, roleMeta, "parent.id", "1");
assert(role.parent != null)
assert(role.parent.isDefined)
assert(role.parent.get.id == 1)
role.parent = Some(new ExtendRole(1))
val oldParent = role.parent.get
populator.populate(role, roleMeta, "parent.id", "2");
assert(role.parent != null)
assert(role.parent.isDefined)
assert(role.parent.get.id == 2)
assert(oldParent.id == 1)
populator.populate(role, roleMeta, "parent.id", "");
assert(role.parent != null)
assert(role.parent.isEmpty)
val u = new User
val userMeta = userMetaOption.get
role.creator = Some(u)
populator.populate(u, userMeta, "age", "2");
assert(u.age.contains(2))
populator.populate(u, userMeta, "age", "");
assert(u.age.isEmpty)
populator.populate(role, roleMeta, "creator.age", "2");
assert(u.age.contains(2))
}
it("get java.sql.Date on Role.expiredOn") {
val roleMeta = domain.getEntity(classOf[Role])
assert(None != roleMeta)
roleMeta.foreach { rm =>
assert(classOf[java.sql.Timestamp] == rm.getProperty("updatedAt").get.clazz)
assert(classOf[java.util.Date] == rm.getProperty("createdAt").get.clazz)
assert(classOf[java.util.Calendar] == rm.getProperty("s").get.clazz)
assert(classOf[java.sql.Date] == rm.getProperty("expiredOn").get.clazz)
}
}
}
|
beangle/data
|
hibernate/src/test/scala/org/beangle/data/hibernate/HibernateConfigTest.scala
|
Scala
|
lgpl-3.0
| 4,185
|
package com.dslplatform.test
import com.dslplatform.api.patterns.{PersistableRepository, Repository, SearchableRepository, ServiceLocator}
import com.dslplatform.test.simple.{Self, Selfy, SimpleRoot, SimpleSnow}
import org.specs2.mutable._
import org.specs2.specification.Step
class SnowflakeTest extends Specification with Common {
override def is = sequential ^ s2"""
Snowflake test ${Step(located.clean[SimpleRoot])}
query repository ${located(queryRepository)}
search over roots specification ${located(searchOverRootsSpecification)}
search over snows specification ${located(searchOverSnowsSpecification)}
self reference ${located(selfReference)}
${Step(located.close())}
"""
private val numOfRoots = 13
private val numOfNames = 4
private val names = for (i <- 1 to 4) yield rName()
private val myName = names(0)
private val arrSR = for (i <- 0 to numOfRoots - 1) yield SimpleRoot(rInt(), rFloat(), names(rInt(numOfNames)))
// private def countOdds(e: Int*) = e.count(_ & 1 == 1)
val located = new Located
def queryRepository = { implicit locator: ServiceLocator =>
await(locator.resolve[PersistableRepository[SimpleRoot]].insert(arrSR))
val snow = await(locator.resolve[SearchableRepository[SimpleSnow]].search())
snow.size === numOfRoots
}
def searchOverRootsSpecification = { implicit locator: ServiceLocator =>
/*
val specification = SimpleRoot.withS(myName)
val snow = await(locator.resolve[SearchableRepository[SimpleSnow]].search(specification))
snow.size === numOfRoots
*/
pending
}
def searchOverSnowsSpecification = { implicit locator: ServiceLocator =>
val specification = SimpleSnow.withSInSnow(myName)
val snow = await(locator.resolve[SearchableRepository[SimpleSnow]].search(specification))
snow.size === arrSR.filter(_.s == myName).size
}
def selfReference = { implicit locator: ServiceLocator =>
val sr1 = Self().create
val sr2 = Self(self = Some(sr1)).create
val sr3 = Self(self = Some(sr2)).create
val rep = locator.resolve[Repository[Selfy]]
val selfs = await(rep.find(Seq(sr1.URI, sr2.URI, sr3.URI)))
selfs.size === 3
selfs(0).slim.size === 0
selfs(0).fat.size === 0
selfs(1).slim.size === 1
selfs(1).fat.size === 1
selfs(1).slim(0) == sr1.ID
selfs(2).slim.size === 2
selfs(2).fat.size === 2
selfs(2).slim(0) == sr2.ID
selfs(2).slim(1) == sr1.ID
}
}
|
ngs-doo/dsl-client-scala
|
http/src/test/scala/com/dslplatform/test/SnowflakeTest.scala
|
Scala
|
bsd-3-clause
| 2,587
|
package spgui.widgets.itemexplorer
trait DirectoryItem {
val name: String
// TODO: will be a string/UUID
val id: String
}
case class Directory(name: String, id: String, childrenIds: Seq[String]) extends DirectoryItem
class RootDirectory(private var _items: Seq[DirectoryItem]) {
private val nestedItemIds = _items.flatMap{
case Directory(_, id, childrenIds) => childrenIds
case item: DirectoryItem => Nil
}.toSet
private var _rootLevelItemIds = _items.map(_.id).filterNot(nestedItemIds)
// parentIdMap(str) points to parent of item of id str, undefined if item is on root level
private var parentIdMap: Map[String, String] = _items.flatMap{
case Directory(_, id, childrenIds) => childrenIds.map((_, id))
case _ => Nil
}.toMap
// apparently the only scala way of stopping the client from modifiying a var
def items = _items
def rootLevelItemIds = _rootLevelItemIds
// return self, to make $.modstate calls look cleaner
def addItem(item: DirectoryItem) = {
_items = item +: _items
_rootLevelItemIds = item.id +: _rootLevelItemIds
this
}
// move item to parent of target if target is not a directory
def moveItem(movedItemId: String, newParentId: String) = {
_items.find(_.id == newParentId).get match {
case mapp: Directory =>
moveItemToDir(movedItemId, newParentId)
case item: DirectoryItem if(parentIdMap.isDefinedAt(newParentId)) =>
moveItemToDir(movedItemId, parentIdMap(newParentId))
case item: DirectoryItem =>
moveItemToRoot(movedItemId)
}
this
}
private def moveItemToDir(movedItemId: String, newDirId: String) = {
_items = _items.map{
case Directory(name, `newDirId`, childrenIds) => Directory(name, newDirId, movedItemId +: childrenIds)
case Directory(name, id, childrenIds) => Directory(name, id, childrenIds.filter(_ != movedItemId))
case item: DirectoryItem => item
}
if(!parentIdMap.contains(movedItemId)) _rootLevelItemIds = _rootLevelItemIds.filter(_ != movedItemId)
parentIdMap = parentIdMap.filterKeys(_ != movedItemId) ++ Map(movedItemId -> newDirId)
}
private def moveItemToRoot(movedItemId: String) = {
_items = _items.map{
case Directory(name, id, childrenIds) => Directory(name, id, childrenIds.filter(_ != movedItemId))
case item: DirectoryItem => item
}
_rootLevelItemIds = movedItemId +: _rootLevelItemIds
parentIdMap = parentIdMap.filterKeys(_ != movedItemId)
}
}
|
kristoferB/SP
|
spgui/src/main/scala/spgui/widgets/itemexplorer/RootDirectory.scala
|
Scala
|
mit
| 2,494
|
package org.pgscala
package builder
package converters
object PGNullableIntegerConverterBuilder extends PGPredefNullableConverterBuilder {
val pgType = "integer"
val clazz = "java.lang.Integer"
val to = "Integer.toString(i)"
val from = "Integer.valueOf(i)"
}
|
melezov/pgscala
|
builder/src/main/scala/org/pgscala/builder/converters/java/PGNullableIntegerConverterBuilder.scala
|
Scala
|
bsd-3-clause
| 271
|
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.gpu_operator.expression
import cogx.compiler.parser.semantics.SemanticError
/** Expression which takes a single, floating point argument. This assumes
* that the type of the result equals the type of the argument.
*
* @param operator The operation performed to produce the expression.
* @param arg The argument to the operation.
* @return Result expression.
*/
private[gpu_operator]
class UnaryFloatExpression(operator: Operator, arg: GPUExpression)
extends GPUExpression(operator, arg.gpuType, Array(arg))
with SemanticError
{
check(arg.gpuType.isFloat, "requires non-integer argument")
}
|
hpe-cct/cct-core
|
src/main/scala/cogx/compiler/gpu_operator/expression/UnaryFloatExpression.scala
|
Scala
|
apache-2.0
| 1,269
|
package twentysix.playr.wrappers
import twentysix.playr.ResourceCaps
import twentysix.playr.core.BaseResource
import play.api.mvc.EssentialAction
import twentysix.playr.core.ResourceUpdate
import twentysix.playr.RouteFilterContext
import play.api.mvc.Handler
import play.api.mvc.RequestHeader
import twentysix.playr.core.ResourceRouteFilter
trait UpdateResourceWrapper[T <: BaseResource] extends ResourceWrapperBase {
def apply(
obj: T,
sid: String,
requestHeader: RequestHeader,
path: String,
parentContext: Option[RouteFilterContext[_]]
): Option[Handler]
}
trait DefaultUpdateResourceWrapper {
implicit def defaultImpl[T <: BaseResource] = new UpdateResourceWrapper[T] with DefaultApply[T]
}
trait DefaultFilteredUpdateResourceWrapper extends DefaultUpdateResourceWrapper {
implicit def defaultFilteredImpl[T <: BaseResource with ResourceRouteFilter] =
new UpdateResourceWrapper[T] with DefaultCaps {
def apply(
obj: T,
sid: String,
requestHeader: RequestHeader,
path: String,
parentContext: Option[RouteFilterContext[_]]
) =
obj.routeFilter.filterUpdate(
requestHeader,
RouteFilterContext(path, Some(sid), obj.parseId(sid), parentContext),
() => Some(methodNotAllowed(obj.Action))
)
}
}
object UpdateResourceWrapper extends DefaultFilteredUpdateResourceWrapper {
implicit def updateResourceImpl[T <: BaseResource with ResourceUpdate] = new UpdateResourceWrapper[T] {
def apply(
obj: T,
sid: String,
requestHeader: RequestHeader,
path: String,
parentContext: Option[RouteFilterContext[_]]
) =
obj.parseId(sid).flatMap(obj.updateResource(_))
val caps = ResourceCaps.ValueSet(ResourceCaps.Update)
}
implicit def updateFilteredResourceImpl[T <: BaseResource with ResourceUpdate with ResourceRouteFilter] =
new UpdateResourceWrapper[T] {
def apply(
obj: T,
sid: String,
requestHeader: RequestHeader,
path: String,
parentContext: Option[RouteFilterContext[_]]
) = {
val id = obj.parseId(sid)
obj.routeFilter.filterUpdate(
requestHeader,
RouteFilterContext(path, Some(sid), id, parentContext),
nextFct(id, obj.updateResource)
)
}
val caps = ResourceCaps.ValueSet(ResourceCaps.Update)
}
}
|
26lights/PlayR
|
src/main/scala/twentysix/playr/wrappers/UpdateResourceWrapper.scala
|
Scala
|
bsd-3-clause
| 2,445
|
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Cancelable
import monix.execution.cancelables.AssignableCancelable
import monix.reactive.Observable
import monix.reactive.observables.ChainedObservable
import monix.reactive.observers.Subscriber
/** Implementation for `Observable.uncancelable`. */
private[reactive] final class UncancelableObservable[A](source: Observable[A])
extends ChainedObservable[A] {
override def unsafeSubscribeFn(conn: AssignableCancelable.Multi, out: Subscriber[A]): Unit = {
ChainedObservable.subscribe(source, AssignableCancelable.dummy, out)
}
override def unsafeSubscribeFn(out: Subscriber[A]): Cancelable = {
out.scheduler.executeTrampolined(() => source.unsafeSubscribeFn(out))
Cancelable.empty
}
}
|
Wogan/monix
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/UncancelableObservable.scala
|
Scala
|
apache-2.0
| 1,448
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.StreamingWithMiniBatchTestBase.MiniBatchMode
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils._
import org.apache.flink.table.utils.LegacyRowResource
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.{Rule, Test}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.mutable
import scala.collection.JavaConversions._
@RunWith(classOf[Parameterized])
class DeduplicateITCase(miniBatch: MiniBatchMode, mode: StateBackendMode)
extends StreamingWithMiniBatchTestBase(miniBatch, mode) {
@Rule
def usesLegacyRows: LegacyRowResource = LegacyRowResource.INSTANCE
lazy val rowtimeTestData = new mutable.MutableList[(Int, Long, String)]
rowtimeTestData.+=((1, 1L, "Hi"))
rowtimeTestData.+=((1, 3L, "Hello"))
rowtimeTestData.+=((1, 2L, "Hello world"))
rowtimeTestData.+=((2, 3L, "I am fine."))
rowtimeTestData.+=((2, 6L, "Comment#1"))
rowtimeTestData.+=((3, 5L, "Comment#2"))
rowtimeTestData.+=((3, 4L, "Comment#2"))
rowtimeTestData.+=((4, 4L, "Comment#3"))
@Test
def testFirstRowOnProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3)
.toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "2,2,Hello", "4,3,Hello world, how are you?",
"7,4,Comment#1", "11,5,Comment#5", "16,6,Comment#10")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testFirstRowOnBuiltinProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime()) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "2,2,Hello", "4,3,Hello world, how are you?",
"7,4,Comment#1", "11,5,Comment#5", "16,6,Comment#10")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testLastRowOnProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3)
.toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime DESC) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "3,2,Hello world", "6,3,Luke Skywalker",
"10,4,Comment#4", "15,5,Comment#9", "21,6,Comment#15")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testLastRowOnBuiltinProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime() DESC) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "3,2,Hello world", "6,3,Luke Skywalker",
"10,4,Comment#4", "15,5,Comment#9", "21,6,Comment#15")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testFirstRowOnRowtime(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
createSinkTable("rowtime_sink")
val sql =
"""
|INSERT INTO rowtime_sink
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime) as rowNum
| FROM T
| )
| WHERE rowNum = 1
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getRawResults("rowtime_sink")
val expected = List(
"+I(1,1,Hi,1970-01-01T00:00:00.001)",
"+I(2,3,I am fine.,1970-01-01T00:00:00.003)",
"+I(3,5,Comment#2,1970-01-01T00:00:00.005)",
"-U(3,5,Comment#2,1970-01-01T00:00:00.005)",
"+U(3,4,Comment#2,1970-01-01T00:00:00.004)",
"+I(4,4,Comment#3,1970-01-01T00:00:00.004)")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testFirstRowOnRowTimeFollowedByUnboundedAgg(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
tEnv.executeSql(
s"""
|CREATE TABLE rowtime_sink (
| cnt BIGINT
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'false',
| 'changelog-mode' = 'I,UA,D'
|)
|""".stripMargin)
val sql =
"""
|INSERT INTO rowtime_sink
|SELECT COUNT(b) FROM (
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime) as rowNum
| FROM T
| )
| WHERE rowNum = 1
| )
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getResults("rowtime_sink")
val expected = List("6")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testLastRowOnRowtime(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
createSinkTable("rowtime_sink")
val sql =
"""
|INSERT INTO rowtime_sink
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime DESC) as rowNum
| FROM T
| )
| WHERE rowNum = 1
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getRawResults("rowtime_sink")
val expected = List(
"+I(1,1,Hi,1970-01-01T00:00:00.001)",
"+I(1,3,Hello,1970-01-01T00:00:00.003)",
"+I(1,2,Hello world,1970-01-01T00:00:00.002)",
"-U(1,3,Hello,1970-01-01T00:00:00.003)",
"+U(2,3,I am fine.,1970-01-01T00:00:00.003)",
"+I(2,6,Comment#1,1970-01-01T00:00:00.006)",
"+I(3,5,Comment#2,1970-01-01T00:00:00.005)",
"+I(3,4,Comment#2,1970-01-01T00:00:00.004)",
"-U(3,4,Comment#2,1970-01-01T00:00:00.004)",
"+U(4,4,Comment#3,1970-01-01T00:00:00.004)")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testLastRowOnRowTimeFollowedByUnboundedAgg(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
tEnv.executeSql(
s"""
|CREATE TABLE rowtime_sink (
| cnt BIGINT
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'false',
| 'changelog-mode' = 'I,UA,D'
|)
|""".stripMargin)
val sql =
"""
|INSERT INTO rowtime_sink
|SELECT COUNT(b) FROM (
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime DESC) as rowNum
| FROM T
| )
| WHERE rowNum = 1
| )
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getResults("rowtime_sink")
val expected = List("6")
assertEquals(expected.sorted, rawResult.sorted)
}
def createSinkTable(tableName: String): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE $tableName (
| a INT,
| b BIGINT,
| c STRING,
| rowtime TIMESTAMP(3)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'false',
| 'changelog-mode' = 'I,UA,D'
|)
|""".stripMargin)
}
}
class RowtimeExtractor extends AscendingTimestampExtractor[(Int, Long, String)] {
override def extractAscendingTimestamp(element: (Int, Long, String)): Long = element._2
}
|
tillrohrmann/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/DeduplicateITCase.scala
|
Scala
|
apache-2.0
| 10,295
|
package fp_in_scala.chapter_03
import scala.annotation.tailrec
import scala.collection.immutable
object ListUtils {
def setHead[A](as: List[A])(newHead: A): List[A] = as match {
case Nil => Nil
case Cons(head, tail) => Cons(newHead, tail)
}
implicit def fromScala[A](scalaList: scala.List[A]): List[A] = {
@tailrec
def doFromScala(acc: List[A], scalaList: scala.List[A]): List[A] = {
scalaList match {
case immutable.Nil => acc
case head :: tail => doFromScala(Cons(head, acc), tail)
}
}
doFromScala(Nil, scalaList.reverse)
}
implicit def toScala[A](ownList: List[A]): scala.List[A] = {
@tailrec
def doToScala(acc: scala.List[A], ownList: List[A]): scala.List[A] = {
ownList match {
case Nil => acc
case Cons(head, tail) => doToScala(head :: acc, tail)
}
}
doToScala(scala.List.empty, ownList).reverse
}
def sumUsingFoldLeft(numbers: List[Int]): Int = numbers.foldLeft(0)(_ + _)
def productUsingFoldLeft(numbers: List[Int]): Int = numbers.foldLeft(1)(_ * _)
def lengthUsingFoldLeft[A](numbers: List[A]): Int = numbers.foldLeft(0)((b, a) => b+1)
def reverseUsingFoldLeft[A](list: List[A]): List[A] = list.foldLeft(Nil:List[A])((b: List[A], a: A) => Cons(a, b))
def foldRightUsingFoldLeft[A, B](list: List[A], acc: B)(f: (A, B) => B): B = {
reverseUsingFoldLeft(list).foldLeft(acc)((acc: B, a: A) => f(a, acc))
}
def foldLeftUsingFoldRight[A, B](list: List[A], acc: B)(f: (B, A) => B): B = {
list.foldRight(acc)((a: A, acc: B) => f(acc, a))
}
def appendUsingFoldLeft[A](list: List[A], other: List[A]): List[A] = {
reverseUsingFoldLeft(list).foldLeft(other)((b, a) => Cons(a, b))
}
def appendUsingFoldRight[A](list: List[A], other: List[A]): List[A] = {
foldRightUsingFoldLeft(list, other)((a, b) => Cons(a, b))
}
def concatenateListOfLists[A](list: List[List[A]]): List[A] = {
val concatenated = list.foldLeft(Nil: List[A]) {
(acc: List[A], a: List[A]) =>
a.foldLeft(acc)((b, a) => Cons(a, b))
}
reverseUsingFoldLeft(concatenated)
}
def addTwoIntLists(list: List[Int], other: List[Int]): List[Int] = {
@tailrec
def doAddTwoLists(acc: List[Int], list: List[Int], other: List[Int]): List[Int] = list match {
case Cons(head1, cons1) => other match {
case Cons(head2, cons2) =>
doAddTwoLists(Cons(head1 + head2, acc), cons1, cons2)
case Nil =>
sys.error("Lists not of the same size")
}
case Nil if other.isEmpty => acc
case Nil => sys.error("Lists not of the same size")
}
ListUtils.reverseUsingFoldLeft(doAddTwoLists(Nil, list, other))
}
@tailrec
def hasSubsequence[A](list: List[A], subsequence: List[A]): Boolean = {
@tailrec
def startsWithSubsequence(list: List[A], subsequence: List[A]): Boolean = list match {
case Nil => subsequence.isEmpty
case Cons(head, tail) if subsequence.isEmpty => true
case Cons(head, tail) if head != subsequence.head => false
case Cons(head, tail) => startsWithSubsequence(tail, subsequence.tail)
}
if (subsequence.isEmpty) true
else {
list match {
case Nil => false
case Cons(head, tail) if startsWithSubsequence(list, subsequence) => true
case Cons(head, tail) => hasSubsequence(tail, subsequence) // Continue with next
}
}
}
}
|
jankeesvanandel/fp-in-scala
|
src/main/scala/fp_in_scala/chapter_03/ListUtils.scala
|
Scala
|
apache-2.0
| 3,421
|
package com.theseventhsense.datetime
import java.util.Date
/**
* Created by erik on 6/15/16.
*/
abstract class AbstractImplicits extends Serializable {
implicit def dateOrdering: Ordering[Date] =
Ordering.fromLessThan(_.getTime < _.getTime)
}
|
7thsense/utils-datetime
|
shared/src/main/scala/com/theseventhsense/datetime/AbstractImplicits.scala
|
Scala
|
mit
| 256
|
/*
Copyright (c) 2017, Qvantel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Qvantel nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Qvantel BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.qvantel.jsonapi
import com.netaporter.uri.Uri
final case class ApiRoot(apiRoot: Option[Uri])
object ApiRoot {
def empty = ApiRoot(None)
}
|
Doikor/jsonapi-scala
|
core/src/main/scala/com/qvantel/jsonapi/ApiRoot.scala
|
Scala
|
bsd-3-clause
| 1,629
|
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.sbt.scripted
import java.io.BufferedReader
import java.io.InputStreamReader
import java.net.HttpURLConnection
import com.lightbend.lagom.sbt.Internal
import com.lightbend.lagom.sbt.LagomPlugin
import com.lightbend.lagom.sbt.NonBlockingInteractionMode
import com.lightbend.lagom.core.LagomVersion
import sbt.Keys._
import sbt._
import sbt.complete.Parser
import scala.util.Try
import scala.util.control.NonFatal
object ScriptedTools extends AutoPlugin {
private val ConnectTimeout = 10000
private val ReadTimeout = 10000
override def trigger = allRequirements
override def requires = LagomPlugin
object autoImport {
val validateRequest = inputKey[Response]("Validate the given request")
val validateFile = inputKey[File]("Validate a file")
val lagomSbtScriptedLibrary = "com.lightbend.lagom" %% "lagom-sbt-scripted-library" % LagomVersion.current
}
import autoImport._
override def buildSettings: Seq[Setting[_]] = Seq(
validateRequest := {
val log = streams.value.log
val validateRequest = validateRequestParser.parsed
def attempt(): Response = {
log.info("Making request on " + validateRequest.uri.get)
val conn = validateRequest.uri.get.toURL.openConnection().asInstanceOf[HttpURLConnection]
try {
conn.setConnectTimeout(ConnectTimeout)
conn.setReadTimeout(ReadTimeout)
val status = conn.getResponseCode
if (validateRequest.shouldBeDown) {
throw ShouldBeDownException(status)
}
validateRequest.statusAssertion(status)
// HttpURLConnection throws FileNotFoundException on getInputStream when the status is 404
// HttpURLConnection throws IOException on getInputStream when the status is 500
val body =
Try {
val br = new BufferedReader(new InputStreamReader(conn.getInputStream))
Stream.continually(br.readLine()).takeWhile(_ != null).mkString("\\n")
}.recover {
case _ => ""
}.get
validateRequest.bodyAssertion(body)
Response(status, body)
} catch {
case NonFatal(ShouldBeDownException(status)) =>
val msg = s"Expected server to be down but request completed with status $status."
log.error(msg)
sys.error(msg)
case NonFatal(t) if validateRequest.shouldBeDown =>
Response(0, "")
case NonFatal(t) =>
val msg = t.getStackTrace.map(_.toString).mkString("\\n ")
log.error(msg)
sys.error(msg)
} finally {
conn.disconnect()
}
}
if (validateRequest.retry) {
repeatUntilSuccessful(log, attempt())
} else {
attempt()
}
},
aggregate in validateRequest := false,
validateFile := {
val validateFile = validateFileParser.parsed
val file = baseDirectory.value / validateFile.file.get
val log = streams.value.log
def attempt() = {
log.info("Validating file " + file)
val contents = IO.read(file)
validateFile.assertions(contents)
}
if (validateFile.retry) {
repeatUntilSuccessful(log, attempt())
} else {
attempt()
}
file
},
aggregate in validateFile := false,
Internal.Keys.interactionMode := NonBlockingInteractionMode
)
override def projectSettings: Seq[Setting[_]] = Seq(
scalaVersion := sys.props.get("scala.version").getOrElse("2.12.9")
)
private def repeatUntilSuccessful[T](log: Logger, operation: => T, times: Int = 10): T = {
try {
operation
} catch {
case NonFatal(t) =>
if (times <= 1) {
throw t
} else {
log.warn(s"Operation failed, $times attempts left")
Thread.sleep(500)
repeatUntilSuccessful(log, operation, times - 1)
}
}
}
case class ShouldBeDownException(actualStatus: Int) extends RuntimeException
case class Response(status: Int, body: String)
private case class ValidateRequest(
uri: Option[URI] = None,
retry: Boolean = false,
shouldBeDown: Boolean = false,
statusAssertion: Int => Unit = _ => (),
bodyAssertion: String => Unit = _ => ()
)
private val validateRequestParser: Parser[ValidateRequest] = {
import complete.DefaultParsers._
type ApplyOption = ValidateRequest => ValidateRequest
def optionArg[A](opt: String, parser: Parser[A])(applyOption: A => ApplyOption): Parser[ApplyOption] = {
(literal(opt) ~> Space ~> parser).map(applyOption)
}
def option(opt: String)(applyOption: ApplyOption): Parser[ApplyOption] = {
literal(opt).map(_ => applyOption)
}
def bodyAssertionOption(opt: String, description: String)(
assertion: (String, String) => Boolean
): Parser[ApplyOption] = {
optionArg(opt, StringBasic)(
expected =>
v => {
val oldAssertion = v.bodyAssertion
v.copy(bodyAssertion = body => {
// First run the existing assertion
oldAssertion(body)
// Now run this assertion
if (!assertion(body, expected)) sys.error(s"Expected body to $description '$expected' but got '$body'")
})
}
)
}
def statusAssertionOption(opt: String, description: String)(
assertion: (Int, Int) => Boolean
): Parser[ApplyOption] = {
optionArg(opt, NatBasic)(
expected =>
v => {
val oldAssertion = v.statusAssertion
v.copy(statusAssertion = status => {
oldAssertion(status)
if (!assertion(status, expected)) sys.error(s"Expected status to $description $expected but got $status")
})
}
)
}
val retry = option("retry-until-success")(_.copy(retry = true))
val shouldBeDown = option("should-be-down")(_.copy(shouldBeDown = true))
val status = statusAssertionOption("status", "equal")(_ == _)
val notStatus = statusAssertionOption("not-status", "not equal")(_ != _)
val contains = bodyAssertionOption("body-contains", "contain")(_.contains(_))
val notContains = bodyAssertionOption("body-not-contains", "not contain")(!_.contains(_))
val equals = bodyAssertionOption("body-equals", "equal")(_ == _)
val notEquals = bodyAssertionOption("body-not-equals", "not equal")(_ != _)
val matches =
bodyAssertionOption("body-matches", "match")((body, regexp) => regexp.r.pattern.matcher(body).matches())
val notMatches =
bodyAssertionOption("body-not-matches", "not match")((body, regexp) => !regexp.r.pattern.matcher(body).matches())
val uri = basicUri.map(uri => (validateRequest: ValidateRequest) => validateRequest.copy(uri = Some(uri)))
Space ~> repsep(
retry | shouldBeDown | status | notStatus | contains | notContains | matches | notMatches | equals | notEquals | uri,
Space
).map { options =>
options.foldLeft(ValidateRequest())((validateRequest, applyOption) => applyOption(validateRequest))
}
.filter(_.uri.isDefined, _ => "No URI supplied")
}
private case class ValidateFile(
file: Option[String] = None,
retry: Boolean = false,
assertions: String => Unit = _ => ()
)
private val validateFileParser: Parser[ValidateFile] = {
import complete.DefaultParsers._
def assertionOption[A](opt: String, parser: Parser[A])(
assertion: (String, A) => Unit
): Parser[ValidateFile => ValidateFile] = {
(literal(opt) ~> Space ~> parser).map(
expected =>
(v: ValidateFile) => {
val oldAssertions = v.assertions
v.copy(assertions = contents => {
// First run the existing assertion
oldAssertions(contents)
// Now run this assertion
assertion(contents, expected)
})
}
)
}
val retry = literal("retry-until-success").map(_ => (v: ValidateFile) => v.copy(retry = true))
val lineCount = assertionOption("line-count", NatBasic) { (contents, expected) =>
val count = contents.linesIterator.size
if (count != expected) sys.error(s"Expected line count of $expected but got $count")
}
val contains = assertionOption("contains", StringBasic)(
(contents, expected) =>
if (!contents.contains(expected)) sys.error(s"Expected file to contain '$expected' but got '$contents'")
)
val notContains = assertionOption("not-contains", StringBasic)(
(contents, expected) =>
if (contents.contains(expected)) sys.error(s"Expected file to not contain '$expected' but got '$contents'")
)
val file = StringBasic.map(fileName => (v: ValidateFile) => v.copy(file = Some(fileName)))
Space ~> repsep(retry | lineCount | contains | notContains | file, Space)
.map { options =>
options.foldLeft(ValidateFile())((v, applyOption) => applyOption(v))
}
.filter(_.file.isDefined, _ => "No file supplied")
}
}
|
rcavalcanti/lagom
|
dev/sbt-scripted-tools/src/main/scala/com/lightbend/lagom/sbt/scripted/ScriptedTools.scala
|
Scala
|
apache-2.0
| 9,265
|
/*
Copyright 2013 Josh Conrad
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache.lru
/**
* Tracks stats of LRU cache usage.
* @param bytes Total bytes of all keys and values.
* @param items Number of key/value entries in the cache.
* @param gets Number of gets that have been attempted.
* @param hits Number of gets that have resulted in a cache hit.
* @param evictions Number of entries that have been evicted
* from the cache for any reason.
*/
class CacheStats(val bytes: Long,
val items: Long,
val gets: Long,
val hits: Long,
val evictions: Long) {
}
|
jmconrad/scala-groupcache
|
src/main/scala/groupcache/lru/CacheStats.scala
|
Scala
|
apache-2.0
| 1,148
|
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.test
import org.dom4j.{Document ⇒ JDocument}
import org.junit.After
import org.orbeon.oxf.processor.ProcessorUtils
import org.orbeon.oxf.xforms.state.AnnotatedTemplate
import org.orbeon.oxf.xforms.{XFormsStaticStateImpl, XFormsContainingDocument}
abstract class DocumentTestBase extends ResourceManagerTestBase with XFormsSupport with TestSupport {
private var _document: XFormsContainingDocument = _
def document = _document
def setupDocument(documentURL: String): XFormsContainingDocument =
setupDocument(ProcessorUtils.createDocumentFromURL(documentURL, null))
def setupDocument(xhtml: JDocument): XFormsContainingDocument = {
ResourceManagerTestBase.staticSetup()
val (template, staticState) = XFormsStaticStateImpl.createFromDocument(xhtml)
this._document = new XFormsContainingDocument(staticState, AnnotatedTemplate(template), null, null)
_document.afterInitialResponse()
_document.beforeExternalEvents(null)
_document
}
@After def disposeDocument() {
if (_document ne null) {
_document.afterExternalEvents()
_document.afterUpdateResponse()
_document = null
}
}
}
|
evlist/orbeon-forms
|
src/test/scala/org/orbeon/oxf/test/DocumentTestBase.scala
|
Scala
|
lgpl-2.1
| 1,888
|
package scala.scalajs.runtime
import java.lang.{Long => JLong}
/** Explicit box for longs when doing a reflective call.
* This class and its methods are only here to properly support reflective
* calls on longs.
*/
class LongReflectiveCall(value: Long) {
// Methods of java.lang.Long
def byteValue(): Byte = value.toByte
def shortValue(): Short = value.toShort
def intValue(): Int = value.toInt
def longValue(): Long = value
def floatValue(): Float = value.toFloat
def doubleValue(): Double = value.toDouble
def compareTo(that: JLong): Int =
new JLong(value).compareTo(that)
def compareTo(that: AnyRef): Int =
new JLong(value).compareTo(that.asInstanceOf[JLong])
// Methods of scala.Long
def toByte: Byte = value.toByte
def toShort: Short = value.toShort
def toChar: Char = value.toChar
def toInt: Int = value.toInt
def toLong: Long = value
def toFloat: Float = value.toFloat
def toDouble: Double = value.toDouble
def unary_~ : Long = ~value
def unary_+ : Long = value
def unary_- : Long = -value
def <<(y: Int): Long = value << y
def <<(y: Long): Long = value << y
def >>>(y: Int): Long = value >>> y
def >>>(y: Long): Long = value >>> y
def >>(y: Int): Long = value >> y
def >>(y: Long): Long = value >> y
def ==(y: Byte): Boolean = value == y
def ==(y: Short): Boolean = value == y
def ==(y: Char): Boolean = value == y
def ==(y: Int): Boolean = value == y
def ==(y: Long): Boolean = value == y
def ==(y: Float): Boolean = value == y
def ==(y: Double): Boolean = value == y
def !=(y: Byte): Boolean = value != y
def !=(y: Short): Boolean = value != y
def !=(y: Char): Boolean = value != y
def !=(y: Int): Boolean = value != y
def !=(y: Long): Boolean = value != y
def !=(y: Float): Boolean = value != y
def !=(y: Double): Boolean = value != y
def <(y: Byte): Boolean = value < y
def <(y: Short): Boolean = value < y
def <(y: Char): Boolean = value < y
def <(y: Int): Boolean = value < y
def <(y: Long): Boolean = value < y
def <(y: Float): Boolean = value < y
def <(y: Double): Boolean = value < y
def <=(y: Byte): Boolean = value <= y
def <=(y: Short): Boolean = value <= y
def <=(y: Char): Boolean = value <= y
def <=(y: Int): Boolean = value <= y
def <=(y: Long): Boolean = value <= y
def <=(y: Float): Boolean = value <= y
def <=(y: Double): Boolean = value <= y
def >(y: Byte): Boolean = value > y
def >(y: Short): Boolean = value > y
def >(y: Char): Boolean = value > y
def >(y: Int): Boolean = value > y
def >(y: Long): Boolean = value > y
def >(y: Float): Boolean = value > y
def >(y: Double): Boolean = value > y
def >=(y: Byte): Boolean = value >= y
def >=(y: Short): Boolean = value >= y
def >=(y: Char): Boolean = value >= y
def >=(y: Int): Boolean = value >= y
def >=(y: Long): Boolean = value >= y
def >=(y: Float): Boolean = value >= y
def >=(y: Double): Boolean = value >= y
def |(y: Byte): Long = value | y
def |(y: Short): Long = value | y
def |(y: Char): Long = value | y
def |(y: Int): Long = value | y
def |(y: Long): Long = value | y
def &(y: Byte): Long = value & y
def &(y: Short): Long = value & y
def &(y: Char): Long = value & y
def &(y: Int): Long = value & y
def &(y: Long): Long = value & y
def ^(y: Byte): Long = value ^ y
def ^(y: Short): Long = value ^ y
def ^(y: Char): Long = value ^ y
def ^(y: Int): Long = value ^ y
def ^(y: Long): Long = value ^ y
def +(y: Byte): Long = value + y
def +(y: Short): Long = value + y
def +(y: Char): Long = value + y
def +(y: Int): Long = value + y
def +(y: Long): Long = value + y
def +(y: Float): Float = value + y
def +(y: Double): Double = value + y
def -(y: Byte): Long = value - y
def -(y: Short): Long = value - y
def -(y: Char): Long = value - y
def -(y: Int): Long = value - y
def -(y: Long): Long = value - y
def -(y: Float): Float = value - y
def -(y: Double): Double = value - y
def *(y: Byte): Long = value - y
def *(y: Short): Long = value - y
def *(y: Char): Long = value - y
def *(y: Int): Long = value - y
def *(y: Long): Long = value - y
def *(y: Float): Float = value - y
def *(y: Double): Double = value - y
def /(y: Byte): Long = value / y
def /(y: Short): Long = value / y
def /(y: Char): Long = value / y
def /(y: Int): Long = value / y
def /(y: Long): Long = value / y
def /(y: Float): Float = value / y
def /(y: Double): Double = value / y
def %(y: Byte): Long = value % y
def %(y: Short): Long = value % y
def %(y: Char): Long = value % y
def %(y: Int): Long = value % y
def %(y: Long): Long = value % y
def %(y: Float): Float = value % y
def %(y: Double): Double = value % y
}
|
matthughes/scala-js
|
library/src/main/scala/scala/scalajs/runtime/LongReflectiveCall.scala
|
Scala
|
bsd-3-clause
| 4,749
|
package ozmi.lambda_core.sql
import org.kiama.output._
sealed abstract class SqlExpr extends PrettyExpression
sealed abstract class Relation extends SqlExpr
case class ObjectRef (schemaName : Option[String], objectName : String) extends Relation
case class Rename (baseRel : Relation, newName : String) extends Relation
case class Select (baseRel : Relation, selectors : Seq[Selector]) extends Relation
case class Where (baseRel : Relation, predicate : ColumnExpr) extends Relation
case class Join (baseRel : Relation, joinType : JoinType, joinedRel : Relation, joinPredicate : ColumnExpr) extends Relation
sealed trait Selector
case class SingleColumnSelector (expr : ColumnExpr, alias : Option[String]) extends Selector
case class AllColumnsSelector (objectAlias : Option[String]) extends Selector
sealed trait JoinType
case object InnerJoin extends JoinType
sealed trait ColumnExpr extends SqlExpr
case class ColumnRef (objectRef : Option[String], columnRef : String) extends ColumnExpr
case class UnaryOp (operator : String, arg : ColumnExpr) extends ColumnExpr
case class BinaryOp (operator : String, arg1 : ColumnExpr, arg2 : ColumnExpr) extends ColumnExpr with PrettyBinaryExpression {
val fixity = BinaryOp.fixity (operator)
val priority = BinaryOp.priority (operator)
val left = arg1
val op = operator
val right = arg2
}
object BinaryOp {
def fixity (op : String) : Fixity =
op match {
case "+" | "-" | "*" | "/" => Infix (LeftAssoc)
}
def priority (op : String) : Int =
// lower number is higher priority
op match {
case "+" | "-" => 2
case "*" | "/" => 1
}
}
case class ColFunCall (functionName : String, args : ColumnExpr*) extends ColumnExpr
sealed trait Literal extends ColumnExpr
case class IntegerLit (value : BigInt) extends Literal
case class StringLit (value : String) extends Literal
case class DecimalLit (value : BigDecimal) extends Literal
|
ozmi/lambda_core
|
src/main/scala/ozmi/lambda_core/sql/SqlExpr.scala
|
Scala
|
mit
| 2,006
|
package de.zalando.beard.performance
import com.github.jknack.handlebars.Handlebars
import com.github.jknack.handlebars.io.ClassPathTemplateLoader
import org.scalameter.api._
import scala.collection.JavaConverters._
/**
* @author dpersa
*/
object HandlebarsBenchmark extends Bench.LocalTime {
val loader = new ClassPathTemplateLoader("/handlebars-benchmark")
var handlebars = new Handlebars(loader)
var template = handlebars.compile("index")
val context = Map[String, AnyRef]("example" -> Map("title" -> "Mustache").asJava,
"presentations" -> Seq(Map("title" -> "Title1", "speakerName" -> "Name1", "summary" -> "Summary1").asJava,
Map("title" -> "Title2", "speakerName" -> "Name2", "summary" -> "Summary2").asJava).asJava).asJava
val sizes = Gen.range("size")(1, 100000, 20000)
val ranges = for {
size <- sizes
} yield 0 until size
performance of "Handlebars" in {
measure method "render" in {
using(ranges) in {
(r: Range) => {
r.foreach { _ =>
template.apply(context)
}
}
}
}
}
}
|
danpersa/beard
|
src/test/scala/de/zalando/beard/performance/HandlebarsBenchmark.scala
|
Scala
|
apache-2.0
| 1,094
|
// Solution-4.scala
// Solution to Exercise 4 in "Overloading"
class FunNumbers2 {
def f(i:Int, j:Int):Double = { i + j }
// This won't work (redefined function, different only in return type):
def f(i:Int, j:Int):Int = { i + j }
}
/* OUTPUT_SHOULD_CONTAIN
error: method f is defined twice
def f(i:Int, j:Int):Int = { i + j }
^
one error found
*/
|
P7h/ScalaPlayground
|
Atomic Scala/atomic-scala-solutions/24_Overloading/Solution-4.scala
|
Scala
|
apache-2.0
| 364
|
package org.jetbrains.plugins.scala.lang.psi.stubs
import com.intellij.psi.stubs.StubElement
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReference
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportSelector
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScTypeElementOwnerStub
trait ScImportSelectorStub extends StubElement[ScImportSelector] with ScTypeElementOwnerStub[ScImportSelector] {
def isAliasedImport: Boolean
def referenceText: Option[String]
def reference: Option[ScStableCodeReference]
def isWildcardSelector: Boolean
def importedName: Option[String]
def aliasName: Option[String]
def isGivenSelector: Boolean
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/ScImportSelectorStub.scala
|
Scala
|
apache-2.0
| 696
|
package de.csmath.scalog
object Types {
sealed trait PrologType
trait Term extends PrologType
case class Var(name: String) extends Term with PlList
trait Const extends Term
case class Atom(name: String) extends Const
case class ConstInt(value: Int) extends Const
case class ConstStr(value: String) extends Const
case class Struct(functor: Atom, terms: List[Term]) extends Term
//Prolog List
trait PlList extends Term
case object PlNil extends PlList
case class PlCons(head: Term, tail: PlList) extends PlList
//Predicates
trait Predicate extends PrologType
case class Clause(head: Struct, body: List[Struct]) extends Predicate
case class Query(predicates: List[Struct]) extends Predicate
}
|
lpcsmath/scalog
|
src/main/scala/de/csmath/scalog/Types.scala
|
Scala
|
bsd-2-clause
| 731
|
package gines.simulation
class Cell(val typ: CellType)
sealed abstract class CellType {
val name: String
}
case object School extends CellType {
val name: String = "School"
}
case object Work extends CellType {
val name: String = "Work"
}
case object Home extends CellType {
val name: String = "Home"
}
case object FakeHome extends CellType {
val name : String = "FakeHome"
}
|
mikusp/gines
|
src/main/scala/gines/simulation/Cell.scala
|
Scala
|
gpl-3.0
| 391
|
package com.sksamuel.elastic4s.requests.searches.aggs
import com.sksamuel.exts.OptionImplicits._
case class MissingAggregation(name: String,
field: Option[String] = None,
subaggs: Seq[AbstractAggregation] = Nil,
metadata: Map[String, AnyRef] = Map.empty)
extends Aggregation {
type T = MissingAggregation
def field(field: String): T = copy(field = field.some)
override def subAggregations(aggs: Iterable[AbstractAggregation]): T = copy(subaggs = aggs.toSeq)
override def metadata(map: Map[String, AnyRef]): T = copy(metadata = map)
}
|
stringbean/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/MissingAggregation.scala
|
Scala
|
apache-2.0
| 659
|
package adtoyou.spark.analysis
import aiouniya.spark.MyRDDFunctions._
import aiouniya.spark.common.Constants._
import aiouniya.spark.common.MacPartitioner
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable
/**
* Created by tuyou006 on 2017/5/10.
*/
object MacImeiRestore {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
.setAppName("Mac Test")
.setMaster("yarn-client")
.set("spark.shuffle.file.buffer", "128k")
.set("spark.reducer.maxSizeInFlight", "96m")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.kryo.referenceTracking", "false")
val sc = new SparkContext(conf)
val hbaseConf = HBaseConfiguration.create
hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
// set "zookeeper.znode.parent" in hbase-site.xml
hbaseConf.set("zookeeper.znode.parent", "/hbase-unsecure")
hbaseConf.set("hbase.zookeeper.quorum", "master.hdp,slave01.hdp,slave02.hdp,slave25.hdp,slave26.hdp")
val conn = ConnectionFactory.createConnection(hbaseConf)
val tableName = TableName.valueOf(MAC_TEST_TABLE)
val admin = conn.getAdmin
val table = conn.getTable(tableName)
val regionLocator = conn.getRegionLocator(tableName)
val startKeys = regionLocator.getStartKeys
val macPartitioner = new MacPartitioner(startKeys)
val job = Job.getInstance(hbaseConf)
job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
job.setMapOutputValueClass(classOf[KeyValue])
val outDir = s"/drp/tmp/$MAC_TEST_TABLE"
job.getConfiguration.set(FileOutputFormat.OUTDIR, outDir)
HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)
val hdconf = sc.hadoopConfiguration
val hdfs = FileSystem.get(hdconf)
val hLoader = new LoadIncrementalHFiles(hbaseConf)
var out = sc.parallelize(Seq(("", "")), 1)
for (i <- 4 to 10) {
try {
hdfs.delete(new Path(outDir), true)
} catch {
case _: Throwable => {}
}
val d = "%02d".format(i)
out = sc.forPath(s"/drp/tyv2/data/mac_data_3/2017$d*")
.combineWithPath
.map(_._2.split("\t"))
.filter(arr =>
arr.length == 7 && arr(3).trim.length > 0 && arr(4).trim.length > 0)
.map(arr => (arr(4).trim, mutable.Set(arr(3).trim)))
.reduceByKey(_ ++ _)
.flatMapValues(_.toIterator)
out.map(tup =>
convertMapping(tup.productIterator.mkString("|")))
.repartitionAndSortWithinPartitions(macPartitioner)
.saveAsNewAPIHadoopFile(outDir,
classOf[ImmutableBytesWritable],
classOf[KeyValue],
classOf[HFileOutputFormat2],
job.getConfiguration)
hLoader.doBulkLoad(new Path(outDir), admin, table, regionLocator)
}
sc.stop()
}
def convertMapping(key: String) = {
val rowKey = Bytes.toBytes(key)
val cell = new KeyValue(rowKey, FAMILY, VALID_MAP, Array[Byte](1.toByte))
(new ImmutableBytesWritable(rowKey), cell)
}
}
|
7u/spark-learning
|
spark.learning/spark_test/src/main/scala/adtoyou/spark/analysis/MacImeiRestore.scala
|
Scala
|
apache-2.0
| 3,617
|
package org.scalacoin.util
import org.scalacoin.protocol.script.{ScriptPubKey, ScriptSignature}
import org.scalacoin.protocol.{CompactSizeUInt, CompactSizeUIntImpl}
import org.slf4j.LoggerFactory
/**
* Created by chris on 2/8/16.
*/
trait NumberUtil extends BitcoinSLogger {
/**
* Takes a hex number and converts it into a signed number
* used in the bitcoin numbering system
* @param hex
* @return
*/
def toLong(hex : String) : Long = toLong(BitcoinSUtil.decodeHex(hex))
/**
* Takes a list of bytes and converts it in to signed number inside of bitcoins
* numbering system
* @param bytes
* @return
*/
def toLong(bytes : Seq[Byte]) : Long = {
logger.debug("bytes: " + bytes)
val reversedBytes = bytes.reverse
if (bytes.size == 1 && bytes.head == -128) {
//the case for negative zero
0
} else if (isPositive(bytes)) {
if (firstByteAllZeros(reversedBytes.toList) && reversedBytes.size > 1) {
parseLong(reversedBytes.slice(1,reversedBytes.size))
} else parseLong(reversedBytes)
} else {
//remove the sign bit
val removedSignBit : List[Byte] = changeSignBitToPositive(reversedBytes.toList)
if (firstByteAllZeros(removedSignBit)) -parseLong(removedSignBit.slice(1,removedSignBit.size))
else -parseLong(removedSignBit)
}
}
/**
* Converts a long number to the representation of number inside of Bitcoin's number system
* @param long
* @return
*/
def longToHex(long : Long) : String = {
if (long > -1) {
val bytes = toByteSeq(long)
BitcoinSUtil.flipEndianess(BitcoinSUtil.encodeHex(bytes))
} else {
val bytes = toByteSeq(long.abs)
//add sign bit
val negativeNumberBytes : List[Byte] = changeSignBitToNegative(bytes.toList)
val hex = BitcoinSUtil.encodeHex(negativeNumberBytes.reverse)
hex
}
}
/**
* Determines if a given hex string is a positive number
* @param hex
* @return
*/
def isPositive(hex : String) : Boolean = isPositive(BitcoinSUtil.decodeHex(hex))
/**
* Determines if a byte array is a positive or negative number
* @param bytes
* @return
*/
def isPositive(bytes : Seq[Byte]) : Boolean = {
if (bytes.isEmpty) false
else {
val result: Int = bytes(bytes.size-1) & 0x80
if (result == 0x80) false else true
}
}
def isNegative(hex : String) : Boolean = isNegative(BitcoinSUtil.decodeHex(hex))
def isNegative(bytes : List[Byte]) : Boolean = {
if (bytes.isEmpty) false else !isPositive(bytes)
}
/**
* Change sign bit to positive
* @param bytes
* @return
*/
def changeSignBitToPositive(bytes : List[Byte]) : List[Byte] = {
val newByte : Byte = (bytes.head & 0x7F).toByte
newByte :: bytes.tail
}
def changeSignBitToPositive(hex : String) : List[Byte] = changeSignBitToPositive(BitcoinSUtil.decodeHex(hex))
def changeSignBitToNegative(bytes : List[Byte]) : List[Byte] = {
val newByte = (bytes.head | 0x80).toByte
(newByte :: bytes.tail)
}
def changeSignBitToNegative(hex : String) : List[Byte] = changeSignBitToNegative(BitcoinSUtil.decodeHex(hex))
def firstByteAllZeros(hex : String) : Boolean = firstByteAllZeros(BitcoinSUtil.decodeHex(hex))
def firstByteAllZeros(bytes : List[Byte]) : Boolean = {
val lastByte = bytes.head
(lastByte & 0xFF) == 0
}
def toByteSeq(long : Long) : Seq[Byte] = BigInt(long).toByteArray
/**
* Parses a VarInt from a string of hex characters
* https://bitcoin.org/en/developer-reference#compactsize-unsigned-integers
* @param hex
* @return
*/
def parseCompactSizeUInt(hex : String) : CompactSizeUInt = parseCompactSizeUInt(BitcoinSUtil.decodeHex(hex))
/**
* Parses a CompactSizeUInt from a sequence of bytes
* https://bitcoin.org/en/developer-reference#compactsize-unsigned-integers
* @param bytes
* @return
*/
def parseCompactSizeUInt(bytes : Seq[Byte]) : CompactSizeUInt = {
require(bytes.size > 0, "Cannot parse a VarInt if the byte array is size 0")
//8 bit number
if (parseLong(bytes.head) < 253) CompactSizeUIntImpl(parseLong(bytes.head),1)
//16 bit number
else if (parseLong(bytes.head) == 253) CompactSizeUIntImpl(parseLong(bytes.slice(1,3).reverse),3)
//32 bit number
else if (parseLong(bytes.head) == 254) CompactSizeUIntImpl(parseLong(bytes.slice(1,5).reverse),5)
//64 bit number
else CompactSizeUIntImpl(parseLong(bytes.slice(1,9).reverse),9)
}
/**
* Returns the size of a VarInt in the number of bytes
* https://en.bitcoin.it/wiki/Protocol_documentation#Variable_length_integer
* @param byte
* @return
*/
def parseCompactSizeUIntSize(byte : Byte) : Long = {
//8 bit number
if (parseLong(byte) < 253) 1
//16 bit number
else if (parseLong(byte) == 253) 3
//32 bit number
else if (parseLong(byte) == 254) 5
//64 bit number
else 9
}
/**
* Parses the compact size uint from a script signature
* https://bitcoin.org/en/developer-reference#compactsize-unsigned-integers
* @param script
* @return
*/
def parseCompactSizeUInt(script : ScriptSignature) : CompactSizeUInt = {
if (script.bytes.size <=252 ) {
CompactSizeUIntImpl(script.bytes.size,1)
} else if (script.bytes.size <= 0xffff) {
CompactSizeUIntImpl(script.bytes.size,3)
} else if (script.bytes.size <= 0xffffffff) {
CompactSizeUIntImpl(script.bytes.size,5)
}
else CompactSizeUIntImpl(script.bytes.size,9)
}
/**
* Parses a compact size uint from a script pubkey
* https://bitcoin.org/en/developer-reference#compactsize-unsigned-integers
* @param scriptPubKey
* @return
*/
def parseCompactSizeUInt(scriptPubKey : ScriptPubKey) : CompactSizeUInt = {
if (scriptPubKey.bytes.size <=252 ) {
CompactSizeUIntImpl(scriptPubKey.bytes.size,1)
} else if (scriptPubKey.bytes.size <= 0xffff) {
CompactSizeUIntImpl(scriptPubKey.bytes.size,3)
} else if (scriptPubKey.bytes.size <= 0xffffffff) {
CompactSizeUIntImpl(scriptPubKey.bytes.size,5)
}
else CompactSizeUIntImpl(scriptPubKey.bytes.size,9)
}
private def parseLong(hex : String) : Long = java.lang.Long.parseLong(hex,16)
private def parseLong(bytes : List[Byte]) : Long = parseLong(BitcoinSUtil.encodeHex(bytes))
private def parseLong(byte : Byte) : Long = parseLong(List(byte))
private def parseLong(bytes : Seq[Byte]) : Long = parseLong(bytes.toList)
}
|
TomMcCabe/scalacoin
|
src/main/scala/org/scalacoin/util/NumberUtil.scala
|
Scala
|
mit
| 6,487
|
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * Itszuvalex@gmail.com
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.industry.traits
import com.itszuvalex.femtocraft.core.tiles.TileEntityBase
trait IndustryBehavior extends TileEntityBase {
override def femtocraftServerUpdate() {
super.femtocraftServerUpdate()
if (!isWorking) {
if (canStartWork) startWork()
}
else continueWork()
if (isWorking && canFinishWork) {
finishWork()
}
}
def isWorking = false
protected def canStartWork = false
protected def startWork() {
}
protected def continueWork() {
}
protected def canFinishWork = true
protected def finishWork() {
}
}
|
Itszuvalex/Femtocraft-alpha-1
|
src/main/java/com/itszuvalex/femtocraft/industry/traits/IndustryBehavior.scala
|
Scala
|
gpl-2.0
| 1,623
|
package com.twitter.algebird
import scala.collection.compat._
object Scan {
/**
* Most consumers of Scan don't care about the type of the type State type variable. But for those that do,
* we make an effort to expose it in all of our combinators.
* @tparam I
* @tparam S
* @tparam O
*/
type Aux[-I, S, +O] = Scan[I, O] { type State = S }
implicit def applicative[I]: Applicative[Scan[I, *]] = new ScanApplicative[I]
def from[I, S, O](initState: S)(presentAndNextStateFn: (I, S) => (O, S)): Aux[I, S, O] =
new Scan[I, O] {
override type State = S
override val initialState = initState
override def presentAndNextState(i: I, s: State): (O, State) = presentAndNextStateFn(i, s)
}
def fromFunction[I, O](f: I => O): Aux[I, Unit, O] = new Scan[I, O] {
override type State = Unit
override val initialState = ()
override def presentAndNextState(i: I, stateBeforeProcessingI: Unit): (O, State) = (f(i), ())
}
/**
* Scans take streams of inputs to streams of outputs, but some scans have trivial inputs and just produce a
* stream of outputs. Streams can be thought of as being a hidden state that is queryable for a head
* element, and another hidden state that represents the rest of the stream.
* @param initState
* The initial state of the scan; think of this as an infinite stream.
* @param destructor
* This function decomposes a stream into the its head-element and tail-stream.
* @tparam S
* The hidden state of the stream that we are turning into a Scan.
* @tparam O
* The type of the elments of the stream that we are turning into a Scan
* @return
* A Scan whose inputs are irrelevant, and whose outputs are those that we would get from implementing a
* stream using the information provided to this method.
*/
def iterate[S, O](initState: S)(destructor: S => (O, S)): Aux[Any, S, O] = new Scan[Any, O] {
override type State = S
override val initialState = initState
override def presentAndNextState(i: Any, stateBeforeProcessingI: S): (O, S) =
destructor(stateBeforeProcessingI)
}
/**
* A Scan whose `Nth` output is the number `N` (starting from 0).
*/
val index: Aux[Any, Long, Long] = iterate(0L)(n => (n, n + 1))
def identity[A]: Aux[A, Unit, A] = fromFunction[A, A](x => x)
/**
* @param initStateCreator
* A call-by-name method that allocates new mutable state
* @param presentAndUpdateStateFn
* A function that both presents the output value, and has the side-effect of updating the mutable state
* @tparam I
* @tparam S
* @tparam O
* @return
* A Scan that safely encapsulates state while it's doing its thing.
*/
def mutable[I, S, O](initStateCreator: => S)(presentAndUpdateStateFn: (I, S) => O): Aux[I, S, O] =
new Scan[I, O] {
override type State = S
override def initialState = initStateCreator
override def presentAndNextState(i: I, s: S): (O, S) = (presentAndUpdateStateFn(i, s), s)
}
/**
* The trivial scan that always returns the same value, regardless of input
* @param t
* @tparam T
*/
def const[T](t: T): Aux[Any, Unit, T] = fromFunction(_ => t)
/**
* @param aggregator
* @param initState
* @tparam A
* @tparam B
* @tparam C
* @return
* A scan which, when given `[a_1, ..., a_n]` outputs `[c_1, ..., c_n]` where `c_i = initState +
* aggregator.prepare(a_1) + ... + aggregator.prepare(a_i)`
*/
def fromAggregator[A, B, C](aggregator: Aggregator[A, B, C], initState: B): Aux[A, B, C] =
from(initState) { (a: A, stateBeforeProcessingI: B) =>
// nb: the order of the arguments to semigroup.plus here is what determines the order of the final summation;
// this matters because not all semigroups are commutative
val stateAfterProcessingA =
aggregator.append(stateBeforeProcessingI, a)
(aggregator.present(stateAfterProcessingA), stateAfterProcessingA)
}
/**
* @param monoidAggregator
* @tparam A
* @tparam B
* @tparam C
* @return
* A scan which, when given `[a_1, ..., a_n]` outputs `[c_1, ..., c_n]` where `c_i =
* monoidAggregator.monoid.zero + aggregator.prepare(a_1) + ... + aggregator.prepare(a_i)`
*/
def fromMonoidAggregator[A, B, C](monoidAggregator: MonoidAggregator[A, B, C]): Aux[A, B, C] =
fromAggregator(monoidAggregator, monoidAggregator.monoid.zero)
}
/**
* The Scan trait is an alternative to the `scanLeft` method on iterators/other collections for a range of of
* use-cases where `scanLeft` is awkward to use. At a high level it provides some of the same functionality as
* `scanLeft`, but with a separation of "what is the state of the scan" from "what are the elements that I'm
* scanning over?". In particular, when scanning over an iterator with `N` elements, the output is an iterator
* with `N` elements (in contrast to scanLeft's `N+1`).
*
* If you find yourself writing a `scanLeft` over pairs of elements, where you only use one element of the
* pair within the `scanLeft`, then throw that element away in a `map` immediately after the scanLeft is done,
* then this abstraction is for you.
*
* The canonical method to use a scan is `apply`.
*
* @tparam I
* The type of elements that the computation is scanning over.
* @tparam O
* The output type of the scan (typically distinct from the hidden `State` of the scan).
*/
sealed abstract class Scan[-I, +O] extends Serializable {
import Scan.{from, Aux}
/**
* The computation of any given scan involves keeping track of a hidden state.
*/
type State
/**
* The state of the scan before any elements have been processed
* @return
*/
def initialState: State
/**
* @param i
* An element in the stream to process
* @param stateBeforeProcessingI
* The state of the scan before processing i
* @return
* The output of the scan corresponding to processing i with state stateBeforeProcessing, along with the
* result of updating stateBeforeProcessing with the information from i.
*/
def presentAndNextState(i: I, stateBeforeProcessingI: State): (O, State)
/**
* @param iter
* @return
* If `iter = Iterator(a_1, ..., a_n)`, return:` `Iterator(o_1, ..., o_n)` where `(o_(i+1), state_(i+1)) =
* presentAndNextState(a_i, state_i)` and `state_0 = initialState`
*/
def scanIterator(iter: Iterator[I]): Iterator[O] = new AbstractIterator[O] {
override def hasNext: Boolean = iter.hasNext
var state: State = initialState
override def next: O = {
val thisState = state
val thisA = iter.next
val (thisC, nextState) = presentAndNextState(thisA, thisState)
state = nextState
thisC
}
}
/**
* @param inputs
* @param bf
* @tparam In
* The type of the input collection
* @tparam Out
* The type of the output collection
* @return
* Given inputs as a collection of the form `[a_1, ..., a_n]` the output will be a collection of the form:
* `[o_1, ..., o_n]` where `(o_(i+1), state_(i+1)) = presentAndNextState(a_i, state_i)` and `state_0 =
* initialState`.
*/
def apply[In <: TraversableOnce[I], Out](
inputs: In
)(implicit bf: BuildFrom[In, O, Out]): Out =
bf.fromSpecific(inputs)(scanIterator(inputs.toIterator))
// combinators
/**
* Return a new scan that is the same as this scan, but with a different `initialState`.
* @param newInitialState
* @return
*/
def replaceState(newInitialState: => State): Aux[I, State, O] =
from(newInitialState)(presentAndNextState(_, _))
def composePrepare[I1](f: I1 => I): Aux[I1, State, O] = from(initialState) { (i, stateBeforeProcessingI) =>
presentAndNextState(f(i), stateBeforeProcessingI)
}
def andThenPresent[O1](g: O => O1): Aux[I, State, O1] = from(initialState) { (i, stateBeforeProcessingI) =>
val (c, stateAfterProcessingA) = presentAndNextState(i, stateBeforeProcessingI)
(g(c), stateAfterProcessingA)
}
/**
* Return a scan that is semantically identical to `this.join(Scan.identity[I1])`, but where we don't
* pollute the `State` by pairing it redundantly with `Unit`.
* @tparam I1
* @return
* If this Scan's `apply` method is given inputs `[a_1, ..., a_n]` resulting in outputs of the form `[o_1,
* ..., o_n`, then this results in a Scan whose `apply` method returns `[(o_1, a_1), ..., (o_n, a_n)]`
* when given the same input.
*/
def joinWithInput[I1 <: I]: Aux[I1, State, (O, I1)] = from(initialState) { (i, stateBeforeProcessingI) =>
val (o, stateAfterProcessingI) = presentAndNextState(i, stateBeforeProcessingI)
((o, i), stateAfterProcessingI)
}
/**
* Return a scan whose output is paired with the state of the scan before each input updates the state.
* @return
* If this Scan's `apply` method is given inputs [a_1, ..., a_n] resulting in outputs of the form `[o_1,
* ..., o_n]`, where `(o_(i+1), state_(i+1)) = presentAndNextState(a_i, state_i)` and `state_0 =
* initialState`, return a scan that whose apply method, when given inputs `[a_1, ..., a_n]` will return
* `[(o_1, state_0), ..., (o_n, state_(n-1))]`.
*/
def joinWithPriorState: Aux[I, State, (State, O)] = from(initialState) { (i, stateBeforeProcessingI) =>
val (o, stateAfterProcessingA) = presentAndNextState(i, stateBeforeProcessingI)
((stateBeforeProcessingI, o), stateAfterProcessingA)
}
/**
* Return a scan whose output is paired with the state of the scan after each input updates the state.
* @return
* If this Scan's `apply` method is given inputs `[a_1, ..., a_n]` resulting in outputs of the form `[o_1,
* ..., o_n]`, where `(o_(i+1), state_(i+1)) = presentAndNextState(a_i, state_i)`` and state_0 =
* initialState, return a scan that whose apply method, when given inputs `[a_1, ..., a_n]` will return
* `[(o_1, state_1), ..., (o_n, state_n]`.
*/
def joinWithPosteriorState: Aux[I, State, (O, State)] = from(initialState) { (i, stateBeforeProcessingI) =>
val (c, stateAfterProcessingA) = presentAndNextState(i, stateBeforeProcessingI)
((c, stateAfterProcessingA), stateAfterProcessingA)
}
/**
* For every `foo`, `scan.joinWithIndex(foo) == scan(foo).zipWithIndex`.
* @return
* If this Scan's `apply` method is given inputs `[a_1, ..., a_n]` resulting in outputs of the form `[o_1,
* ..., o_n]`, return a scan that whose apply method, when given the same input, will return `[(o_1, 1),
* ..., (o_n, n)]`.
*/
def joinWithIndex: Aux[I, (State, Long), (O, Long)] = join(Scan.index)
/**
* Compose two scans pairwise such that, when given pairwise zipped inputs, the resulting scan will output
* pairwise zipped outputs.
* @param scan2
* @tparam I2
* @tparam O2
* @return
* If this Scan's apply method is given inputs `[a_1, ..., a_n]` resulting in outputs of the form `[o_1,
* ..., o_n]`, and `scan2.apply([b_1, ..., b_n] = [p_1, ..., p_n]` then `zip` will return a scan whose
* apply method, when given input `[(a_1, b_1), ..., (a_n, b_n)]` results in the output `[(o_1, p_1), ...,
* (o_2, p_2)]`. In other words: `scan.zip(scan2)(foo.zip(bar)) == scan(foo).zip(scan2(bar))`
*/
def zip[I2, O2](scan2: Scan[I2, O2]): Aux[(I, I2), (State, scan2.State), (O, O2)] =
from((initialState, scan2.initialState)) { (i1i2, stateBeforeProcessingI1I2) =>
val (o1, state1AfterProcesingI1) =
presentAndNextState(i1i2._1, stateBeforeProcessingI1I2._1)
val (o2, state2AfterProcesingI2) =
scan2.presentAndNextState(i1i2._2, stateBeforeProcessingI1I2._2)
((o1, o2), (state1AfterProcesingI1, state2AfterProcesingI2))
}
/**
* Given a scan that takes compatible input to this one, pairwise compose the state and outputs of each scan
* on a common input stream.
* @param scan2
* @tparam I2
* @tparam O2
* @return
* If this Scan's apply method is given inputs [a_1, ..., a_n] resulting in outputs of the form `[o_1,
* ..., o_n]`, and `scan2.apply([a_1, ..., a_n] = [p_1, ..., p_n]` then `join` will return a scan whose
* apply method returns `[(o_1, p_1), ..., (o_2, p_2)]`. In other words: `scan.join(scan2)(foo) ==
* scan(foo).zip(scan2(foo))`
*/
def join[I2 <: I, O2](scan2: Scan[I2, O2]): Aux[I2, (State, scan2.State), (O, O2)] =
from((initialState, scan2.initialState)) { (i, stateBeforeProcessingI) =>
val (o1, state1AfterProcesingI1) = presentAndNextState(i, stateBeforeProcessingI._1)
val (o2, state2AfterProcesingI2) = scan2.presentAndNextState(i, stateBeforeProcessingI._2)
((o1, o2), (state1AfterProcesingI1, state2AfterProcesingI2))
}
/**
* Takes the output of this scan and feeds as input into scan2.
* @param scan2
* @tparam P
* @return
* If this Scan's apply method is given inputs `[a_1, ..., a_n]` resulting in outputs of the form `[o_1,
* ..., o_n]`, and `scan2.apply([o_1, ..., o_n] = [p_1, ..., p_n]` then `compose` will return a scan which
* returns `[p_1, ..., p_n]`.
*/
def compose[P](scan2: Scan[O, P]): Aux[I, (State, scan2.State), P] =
from((initialState, scan2.initialState)) { (i, stateBeforeProcessingI) =>
val (o, state1AfterProcesingI) = presentAndNextState(i, stateBeforeProcessingI._1)
val (p, state2AfterProcesingO) = scan2.presentAndNextState(o, stateBeforeProcessingI._2)
(p, (state1AfterProcesingI, state2AfterProcesingO))
}
}
class ScanApplicative[I] extends Applicative[Scan[I, *]] {
override def map[T, U](mt: Scan[I, T])(fn: T => U): Scan[I, U] =
mt.andThenPresent(fn)
override def apply[T](v: T): Scan[I, T] =
Scan.const(v)
override def join[T, U](mt: Scan[I, T], mu: Scan[I, U]): Scan[I, (T, U)] =
mt.join(mu)
}
|
twitter/algebird
|
algebird-core/src/main/scala/com/twitter/algebird/Scan.scala
|
Scala
|
apache-2.0
| 13,856
|
package net.sansa_stack.rdf.spark.qualityassessment.metrics.conciseness
import net.sansa_stack.rdf.common.qualityassessment.utils.NodeUtils._
import net.sansa_stack.rdf.common.qualityassessment.utils.vocabularies.DQV
import org.apache.jena.graph.{ Node, Triple }
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
/**
* @author Gezim Sejdiu
*/
object ExtensionalConciseness {
/**
* The extensional conciseness
* This metric metric checks for redundant resources in the assessed dataset,
* and thus measures the number of unique instances found in the dataset.
* @return No. of unique subjects / Total No. of subjects
*/
def assessExtensionalConciseness(dataset: RDD[Triple]): Double = {
val mapSubjects = dataset.map(_.getSubject)
val mapSubjectsWithPredicates = dataset.filter(triple => triple.getSubject.isURI() && triple.getPredicate.isURI())
.map(f => (f.getSubject, f.getPredicate))
.map((_, 1L))
.reduceByKey(_ + _)
.map { case ((k, v), cnt) => (k, (v, cnt)) }
.groupByKey()
val duplicateSubjects = dataset.filter(triple => triple.getSubject.isURI() && triple.getPredicate.isURI())
.map(f => (f.getSubject, f.getPredicate.getURI.toString() + " " + f.getObject.toString() + " "))
.map(f => (f._2, 1L))
.reduceByKey(_ + _)
.filter(_._2 > 1)
.values.sum()
// val duplicates = mapSubjectsWithPredicatesValue.map(x => (x._1, x._2.groupBy(_._1).map(y => (y._1, y._2.size))))
val totalSubjects = mapSubjects.count().toDouble
if (totalSubjects > 0) (totalSubjects - duplicateSubjects) / totalSubjects else 0
}
}
|
SANSA-Stack/Spark-RDF
|
sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/qualityassessment/metrics/conciseness/ExtensionalConciseness.scala
|
Scala
|
gpl-3.0
| 1,639
|
import scala.language.postfixOps
import scala.util._
import scala.util.control.NonFatal
import scala.concurrent._
import scala.concurrent.duration._
import ExecutionContext.Implicits.global
import scala.async.Async.{async, await}
/** Contains basic data types, data structures and `Future` extensions.
*/
package object nodescala {
/** Adds extensions methods to the `Future` companion object.
*/
implicit class FutureCompanionOps[T](val f: Future.type) extends AnyVal {
/** Returns a future that is always completed with `value`.
*/
def always[T](value: T): Future[T] = {
val p = Promise[T]()
p success value
p.future
}
/** Returns a future that is never completed.
*
* This future may be useful when testing if timeout logic works correctly.
*/
def never[T]: Future[T] = {
val p = Promise[T]()
p.future
}
def failure[T]: Future[T] = {
val p = Promise[T]()
p failure new Exception
p.future
}
/** Given a list of futures `fs`, returns the future holding the list of values of all the futures from `fs`.
* The returned future is completed only once all of the futures in `fs` have been completed.
* The values in the list are in the same order as corresponding futures `fs`.
* If any of the futures `fs` fails, the resulting future also fails.
*/
def all[T](fs: List[Future[T]]): Future[List[T]] = {
val emptyList = Future.always(List[T]())
fs.foldRight(emptyList) {
(f, acc) => for { x <- f; xs <- acc } yield x :: xs
}
}
/** Given a list of futures `fs`, returns the future holding the value of the future from `fs` that completed first.
* If the first completing future in `fs` fails, then the result is failed as well.
*
* E.g.:
*
* Future.any(List(Future { 1 }, Future { 2 }, Future { throw new Exception }))
*
* may return a `Future` succeeded with `1`, `2` or failed with an `Exception`.
*/
def any[T](fs: List[Future[T]]): Future[T] = {
val p = Promise[T]()
fs.foreach { f => f onComplete { p.tryComplete(_) } }
p.future
}
/** Returns a future with a unit value that is completed after time `t`.
*/
def delay(t: Duration): Future[Unit] = async {
blocking {
try {
Await.ready(never[Unit], t)
} catch {
case e: Exception =>
}
}
}
/** Completes this future with user input.
*/
def userInput(message: String): Future[String] = Future {
readLine(message)
}
/** Creates a cancellable context for an execution and runs it.
*/
def run()(f: CancellationToken => Future[Unit]): Subscription = {
val cancelSource = CancellationTokenSource()
f(cancelSource.cancellationToken)
cancelSource
}
}
/** Adds extension methods to future objects.
*/
implicit class FutureOps[T](val f: Future[T]) extends AnyVal {
/** Returns the result of this future if it is completed now.
* Otherwise, throws a `NoSuchElementException`.
*
* Note: This method does not wait for the result.
* It is thus non-blocking.
* However, it is also non-deterministic -- it may throw or return a value
* depending on the current state of the `Future`.
*/
def now: T = {
f.value match {
case Some(v) => v match {
case Success(v) => v
case Failure(e) => throw e
}
case None => throw new NoSuchElementException
}
}
/** Continues the computation of this future by taking the current future
* and mapping it into another future.
*
* The function `cont` is called only after the current future completes.
* The resulting future contains a value returned by `cont`.
*/
def continueWith[S](cont: Future[T] => S): Future[S] = {
val p = Promise[S]()
f onComplete { t =>
try { p success cont(f) } catch { case NonFatal(t) => p failure t }
}
p.future
}
/** Continues the computation of this future by taking the result
* of the current future and mapping it into another future.
*
* The function `cont` is called only after the current future completes.
* The resulting future contains a value returned by `cont`.
*/
def continue[S](cont: Try[T] => S): Future[S] = {
val p = Promise[S]()
f onComplete { t =>
try { p success cont(t) } catch { case NonFatal(t) => p failure t }
}
p.future
}
}
/** Subscription objects are used to be able to unsubscribe
* from some event source.
*/
trait Subscription {
def unsubscribe(): Unit
}
object Subscription {
/** Given two subscriptions `s1` and `s2` returns a new composite subscription
* such that when the new composite subscription cancels both `s1` and `s2`
* when `unsubscribe` is called.
*/
def apply(s1: Subscription, s2: Subscription) = new Subscription {
def unsubscribe() {
s1.unsubscribe()
s2.unsubscribe()
}
}
}
/** Used to check if cancellation was requested.
*/
trait CancellationToken {
def isCancelled: Boolean
def nonCancelled = !isCancelled
}
/** The `CancellationTokenSource` is a special kind of `Subscription` that
* returns a `cancellationToken` which is cancelled by calling `unsubscribe`.
*
* After calling `unsubscribe` once, the associated `cancellationToken` will
* forever remain cancelled -- its `isCancelled` will return `false.
*/
trait CancellationTokenSource extends Subscription {
def cancellationToken: CancellationToken
}
/** Creates cancellation token sources.
*/
object CancellationTokenSource {
/** Creates a new `CancellationTokenSource`.
*/
def apply(): CancellationTokenSource = new CancellationTokenSource {
val p = Promise[Unit]()
val cancellationToken = new CancellationToken {
def isCancelled = p.future.value != None
}
def unsubscribe() {
p.trySuccess(())
}
}
}
}
|
albertpastrana/reactive
|
nodescala/src/main/scala/nodescala/package.scala
|
Scala
|
mit
| 6,172
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.sclearn.dataset.spark.util
import java.io._
import java.lang.management.{LockInfo, ManagementFactory, MonitorInfo, ThreadInfo}
import java.math.{MathContext, RoundingMode}
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.{Channels, FileChannel}
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}
import java.util.{Locale, Properties, Random, UUID}
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.zip.GZIPInputStream
import javax.net.ssl.HttpsURLConnection
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.Try
import scala.util.control.{ControlThrowable, NonFatal}
import scala.util.matching.Regex
/**
* Various utility methods used by Spark.
*/
private[spark] object Utils {
/** Whether we have warned about plan string truncation yet. */
private val truncationWarningPrinted = new AtomicBoolean(false)
/**
* Format a sequence with semantics similar to calling .mkString(). Any elements beyond
* maxNumToStringFields will be dropped and replaced by a "... N more fields" placeholder.
*
* @return the trimmed and formatted string.
*/
def truncatedString[T](
seq: Seq[T],
start: String,
sep: String,
end: String,
maxNumFields: Int = 25): String = {
if (seq.length > maxNumFields) {
if (truncationWarningPrinted.compareAndSet(false, true)) {
}
val numFields = math.max(0, maxNumFields - 1)
seq.take(numFields).mkString(
start, sep, sep + "... " + (seq.length - numFields) + " more fields" + end)
} else {
seq.mkString(start, sep, end)
}
}
/** Shorthand for calling truncatedString() without start or end strings. */
def truncatedString[T](seq: Seq[T], sep: String): String = truncatedString(seq, "", sep, "")
/**
* Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
* loaded Spark.
*
* This should be used whenever passing a ClassLoader to Class.ForName or finding the currently
* active loader when setting up ClassLoader delegation chains.
*/
def getContextOrSparkClassLoader: ClassLoader =
Option(Thread.currentThread().getContextClassLoader).getOrElse(getClass.getClassLoader)
/** Determines whether the provided class is loadable in the current thread. */
def classIsLoadable(clazz: String): Boolean = {
// scalastyle:off classforname
Try { Class.forName(clazz, false, getContextOrSparkClassLoader) }.isSuccess
// scalastyle:on classforname
}
// scalastyle:off classforname
/** Preferred alternative to Class.forName(className) */
def classForName(className: String): Class[_] = {
Class.forName(className, true, getContextOrSparkClassLoader)
// scalastyle:on classforname
}
/**
* NaN-safe version of `java.lang.Double.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN double.
*/
def nanSafeCompareDoubles(x: Double, y: Double): Int = {
val xIsNan: Boolean = java.lang.Double.isNaN(x)
val yIsNan: Boolean = java.lang.Double.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* NaN-safe version of `java.lang.Float.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN float.
*/
def nanSafeCompareFloats(x: Float, y: Float): Int = {
val xIsNan: Boolean = java.lang.Float.isNaN(x)
val yIsNan: Boolean = java.lang.Float.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
}
|
sclearn/sclearn
|
sc/src/main/scala/io/github/sclearn/dataset/spark/util/Utils.scala
|
Scala
|
apache-2.0
| 4,737
|
package eu.pepot.eu.spark.inputsplitter.common.file.matcher
import java.io.{File, FileNotFoundException}
import eu.pepot.eu.spark.inputsplitter.helper.TestsHelper._
import eu.pepot.eu.spark.inputsplitter.common.file.{FileDetails, FileDetailsSet}
import org.apache.hadoop.fs.Path
import org.specs2.mutable.Specification
class FilesMatcherSpec extends Specification {
"FilesMatcher" should {
"match files based on their size" in {
val smallFile = fileDetails(resourcesBaseDir("scenario-000/input/small1.txt"))
val bigFile = fileDetails(resourcesBaseDir("scenario-000/input/big.txt"))
val files = FileDetailsSet(Set(bigFile, smallFile))
val FileDetailsSet(matches) = FilesMatcher.matches(files, Condition(biggerThan = Some(50)))
matches.size mustEqual 1
matches.head mustEqual bigFile
}
"match files based on their name" in {
val smallFile = fileDetails(resourcesBaseDir("scenario-000/input/small1.txt"))
val bigFile = fileDetails(resourcesBaseDir("scenario-000/input/big.txt"))
val files = FileDetailsSet(Set(bigFile, smallFile))
val FileDetailsSet(matches) = FilesMatcher.matches(files, Condition(namePattern = Some(".*all.*")))
matches.size mustEqual 1
matches.head mustEqual smallFile
}
"match files based on a given path condition" in {
val smallFile = fileDetails(resourcesBaseDir("scenario-000/input/small1.txt"))
val bigFile = fileDetails(resourcesBaseDir("scenario-000/input/big.txt"))
val files = FileDetailsSet(Set(bigFile, smallFile))
val FileDetailsSet(matches) = FilesMatcher.matches(files, Condition(pathCondition = Some((p: String) => p.contains("big"))))
matches.size mustEqual 1
matches.head mustEqual bigFile
}
}
private def fileDetails(path: String): FileDetails = {
val p = new Path(path)
val f = new File(path)
if (!f.exists()) {
throw new FileNotFoundException(f.getAbsolutePath)
}
FileDetails(p.toString, f.length())
}
}
|
mauriciojost/spark-input-splitter
|
src/test/scala/eu/pepot/eu/spark/inputsplitter/common/file/matcher/FilesMatcherSpec.scala
|
Scala
|
apache-2.0
| 2,025
|
package dotty.tools.scaladoc
sealed trait EngineQuery
case class EngineMatchersQuery(matchers: List[Matchers]) extends EngineQuery
case class BySignature(signature: String) extends EngineQuery
sealed trait Matchers extends Function1[PageEntry, Int]
case class ByName(query: String) extends Matchers:
val tokens = StringUtils.createCamelCaseTokens(query)
def apply(p: PageEntry): Int = {
val nameOption = Option(p.shortName.toLowerCase)
//Edge case for empty query string
if query == "" then 1
else {
val results = List(
nameOption.filter(_.contains(query.toLowerCase)).fold(-1)(_.size - query.size),
if p.tokens.size >= tokens.size && p.tokens.zip(tokens).forall( (token, query) => token.startsWith(query))
then p.tokens.size - tokens.size + 1
else -1
//acronym.filter(_.contains(query)).fold(-1)(_.size - query.size + 1)
)
if results.forall(_ == -1) then -1 else results.filter(_ != -1).min
}
}
case class ByKind(kind: String) extends Matchers:
def apply(p: PageEntry): Int = p.fullName.split(" ").headOption.filter(_.equalsIgnoreCase(kind)).fold(-1)(_ => 1)
|
dotty-staging/dotty
|
scaladoc-js/main/src/searchbar/engine/Matchers.scala
|
Scala
|
apache-2.0
| 1,154
|
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.validation
import at.nonblocking.cliwix.core.util.ListTypeUtils
import at.nonblocking.cliwix.model.{IMPORT_POLICY, LiferayConfig}
import com.liferay.portal.kernel.util.{PropsUtil, PropsKeys}
import com.typesafe.scalalogging.slf4j.LazyLogging
import scala.collection.mutable
import scala.collection.JavaConversions._
private[core] class DefaultCompanyLiferayConfigValidator extends LiferayConfigValidator with ListTypeUtils with LazyLogging {
val DEFAULT_VIRTUAL_HOST = "localhost"
override def validate(liferayConfig: LiferayConfig): List[ValidationError] = {
assert(liferayConfig != null, "liferayConfig != null")
if (!DefaultCompanyLiferayConfigValidator.disabled) {
val messages = new mutable.MutableList[ValidationError]()
val defaultCompanyWebId = PropsUtil.get(PropsKeys.COMPANY_DEFAULT_WEB_ID)
if (liferayConfig.getCompanies != null && liferayConfig.getCompanies.getImportPolicy == IMPORT_POLICY.ENFORCE) {
if (liferayConfig.getCompanies.getList != null && !liferayConfig.getCompanies.getList.exists(_.getWebId == defaultCompanyWebId)) {
messages += new ValidationError(s" Default company (webId='$defaultCompanyWebId') is required if import policy is ENFORCE!", "", null)
}
}
safeForeach(liferayConfig.getCompanies) { company =>
if (company.getWebId == defaultCompanyWebId
&& company.getCompanyConfiguration != null && company.getCompanyConfiguration.getVirtualHost != DEFAULT_VIRTUAL_HOST) {
messages += new ValidationError(s"Virtual host of the default company (webId='$defaultCompanyWebId') must be 'localhost'", s"Company:${company.getWebId}", null)
} else if (company.getWebId != defaultCompanyWebId
&& company.getCompanyConfiguration != null && company.getCompanyConfiguration.getVirtualHost == DEFAULT_VIRTUAL_HOST) {
messages += new ValidationError(s"Virtual host 'localhost' is only allowed for the default company (webId='$defaultCompanyWebId')!", s"Company:${company.getWebId}", null)
}
}
messages.toList
} else {
logger.warn("DefaultCompanyLiferayConfigValidator has been disabled.")
Nil
}
}
}
object DefaultCompanyLiferayConfigValidator {
private [core] var disabled = false
def disable() = disabled = true
}
|
nonblocking/cliwix
|
cliwix-core/src/main/scala/at/nonblocking/cliwix/core/validation/DefaultCompanyLiferayConfigValidator.scala
|
Scala
|
agpl-3.0
| 3,130
|
package org.openurp.edu.eams.teach.service
import org.beangle.commons.lang.time.YearWeekTime
import org.openurp.edu.base.Student
trait StudentTimeService {
def isOccupied(time: YearWeekTime, stds: Student*): Boolean
}
|
openurp/edu-eams-webapp
|
core/src/main/scala/org/openurp/edu/eams/teach/service/StudentTimeService.scala
|
Scala
|
gpl-3.0
| 225
|
package org.apache.spark.sql.ext
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql._
import scala.language.implicitConversions
// scalastyle:off
object functions {
// scalastyle:on
private[this] implicit def toColumn(expr: Expression): Column = Column(expr)
// TODO: Workaround for https://issues.apache.org/jira/browse/SPARK-9301
def collectArray(expr: Column): Column = CollectArray(expr.expr)
}
|
mr1azl/spark-ext
|
sparkext-sql/src/main/scala/org/apache/spark/sql/ext/functions.scala
|
Scala
|
apache-2.0
| 441
|
package mesosphere.marathon
package core.deployment
import akka.Done
import akka.actor.{ ActorRef, Props }
import akka.event.EventStream
import akka.stream.Materializer
import mesosphere.marathon.core.deployment.impl.{ DeploymentActor, DeploymentManagerActor, DeploymentManagerDelegate }
import mesosphere.marathon.core.health.HealthCheckManager
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.leadership.LeadershipModule
import mesosphere.marathon.core.readiness.ReadinessCheckExecutor
import mesosphere.marathon.core.task.termination.KillService
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.storage.repository.DeploymentRepository
import scala.concurrent.Promise
/**
* Provides a [[DeploymentManager]] implementation that can be used to start and cancel a deployment and
* to list currently running deployments.
*/
class DeploymentModule(
config: DeploymentConfig,
leadershipModule: LeadershipModule,
taskTracker: InstanceTracker,
killService: KillService,
launchQueue: LaunchQueue,
scheduler: SchedulerActions,
healthCheckManager: HealthCheckManager,
eventBus: EventStream,
readinessCheckExecutor: ReadinessCheckExecutor,
deploymentRepository: DeploymentRepository,
deploymentActorProps: (ActorRef, KillService, SchedulerActions, DeploymentPlan, InstanceTracker, LaunchQueue, HealthCheckManager, EventStream, ReadinessCheckExecutor) => Props = DeploymentActor.props)(implicit val mat: Materializer) {
private[this] val deploymentManagerActorRef: ActorRef = {
val props = DeploymentManagerActor.props(
taskTracker: InstanceTracker,
killService,
launchQueue,
scheduler,
healthCheckManager,
eventBus,
readinessCheckExecutor,
deploymentRepository,
deploymentActorProps)
leadershipModule.startWhenLeader(props, "deploymentManager")
}
val deploymentManager: DeploymentManager = new DeploymentManagerDelegate(config, deploymentManagerActorRef)
}
|
janisz/marathon
|
src/main/scala/mesosphere/marathon/core/deployment/DeploymentModule.scala
|
Scala
|
apache-2.0
| 2,049
|
package es.udc.graph.utils
import breeze.linalg._
import breeze.numerics.{sqrt, pow, exp}
import breeze.stats.distributions.{ThreadLocalRandomGenerator, RandBasis}
import org.apache.commons.math3.random.MersenneTwister
import scala.math.Pi
/**
* Multivariate Gaussian
*
* Created by David Martinez Rego on 16/11/15.
*/
case class MultivariateGaussian(m: DenseVector[Double], v: DenseMatrix[Double], randSeed: Int) {
val R : DenseMatrix[Double] = cholesky(v)
val stdGauss = new RandBasis(new ThreadLocalRandomGenerator(new MersenneTwister(randSeed))).gaussian(0, 1)
val nConstant = normConstant()
val iR = invchol(cholesky(v).t)
/** Draws a new sample from the Multivatiate Gaussian */
def draw(): DenseVector[Double] = R * DenseVector.rand(m.length, stdGauss) += m
/** Draws a new matrix of samples from the Multivatiate Gaussian */
def draw(n : Int): DenseMatrix[Double] = {
val S : DenseMatrix[Double] = R * DenseMatrix.rand(m.length, n, stdGauss)
for(i <- 0 to m.length; j <- 0 to n)
S.update(i, j, S(i,j) + m(i))
return S
}
/**
* Returns the value of probability density function for a given value of x.
*/
def pdf(x: Double): Double = pdf(DenseVector(x))
/**
* Returns the value of probability density function for a given value of vector x.
*/
def pdf(x: DenseVector[Double]): Double = nConstant * exp(-0.5 * ((x - m).t * (iR * (x - m))))
def invchol(R:DenseMatrix[Double]): DenseMatrix[Double] = {
val Rinv = inv(R)
Rinv*Rinv.t
}
def normConstant(): Double = 1d / (pow(2 * Pi, m.size.toDouble / 2) * sqrt(det(v)))
}
object MultivariateGaussian {
/**
* Creates a Gaussian sampler for scalars
*
* @param m Mean
* @param v Variance
*/
def apply(m: Double, v: Double, randSeed: Int): MultivariateGaussian =
new MultivariateGaussian(DenseVector(m), DenseMatrix(v), randSeed)
/**
* Creates a Standard Multivariate Gaussian sampler for a specific dimension
*
* @param dim dimension
* @param randSeed seed
*/
def apply(dim : Int, randSeed: Int): MultivariateGaussian =
new MultivariateGaussian(DenseVector.zeros(dim), DenseMatrix.eye(dim), randSeed)
}
|
eirasf/KNiNe
|
src/main/scala/es/udc/graph/utils/MultivariateGaussian.scala
|
Scala
|
gpl-2.0
| 2,185
|
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import slamdata.Predef._
import quasar.{IdStatus, RenderTree, RenderTreeT}
import quasar.common.{JoinType, SortDir}
import quasar.contrib.iota._
import quasar.contrib.matryoshka._
import quasar.contrib.pathy.AFile
import quasar.contrib.std.errorNotImplemented
import quasar.ejson.{EJson, Fixed}
import quasar.fp.ski.{ι, κ}
import quasar.fp._
import quasar.qscript._
import quasar.qscript.RecFreeS._
import quasar.qsu.mra.AutoJoin
import matryoshka.{Hole => _, birecursiveIso => _, _} // {delayEqual, equalTEqual, delayShow, showTShow, BirecursiveT, Delay, Embed, EqualT, ShowT}
import matryoshka.data._
import matryoshka.patterns.{CoEnv, EnvT}
import monocle.{Iso, PTraversal, Prism}
import pathy.Path
import scalaz.{Applicative, Bitraverse, Cofree, Enum, Equal, Forall, Free, Functor, Id, Order, Scalaz, Show, Traverse, \\/-, NonEmptyList => NEL}
import scalaz.std.anyVal._
import scalaz.std.list._
import scalaz.std.tuple._
import scalaz.syntax.show._
import shims.{showToCats, showToScalaz}
sealed trait QScriptUniform[T[_[_]], A] extends Product with Serializable
object QScriptUniform {
implicit def traverse[T[_[_]]]: Traverse[QScriptUniform[T, ?]] = new Traverse[QScriptUniform[T, ?]] {
// we need both apply and traverse syntax, which conflict
import Scalaz._
def traverseImpl[G[_]: Applicative, A, B](qsu: QScriptUniform[T, A])(f: A => G[B])
: G[QScriptUniform[T, B]] = qsu match {
case AutoJoin2(left, right, combiner) =>
(f(left) |@| f(right))(AutoJoin2(_, _, combiner))
case AutoJoin3(left, center, right, combiner) =>
(f(left) |@| f(center) |@| f(right))(AutoJoin3(_, _, _, combiner))
case QSAutoJoin(left, right, keys, combiner) =>
(f(left) |@| f(right))(QSAutoJoin(_, _, keys, combiner))
case GroupBy(left, right) =>
(f(left) |@| f(right))(GroupBy(_, _))
case DimEdit(source, dtrans) =>
f(source).map(DimEdit(_, dtrans))
case LPJoin(left, right, condition, joinType, leftRef, rightRef) =>
(f(left) |@| f(right) |@| f(condition))(LPJoin(_, _, _, joinType, leftRef, rightRef))
case ThetaJoin(left, right, condition, joinType, combiner) =>
(f(left) |@| f(right))(ThetaJoin(_, _, condition, joinType, combiner))
case Unary(source, mf) =>
f(source).map(Unary(_, mf))
case Map(source, fm) =>
f(source).map(Map(_, fm))
case Read(path, idStatus) =>
(Read(path, idStatus): QScriptUniform[T, B]).point[G]
case Transpose(source, retain, rotations) =>
f(source).map(Transpose(_, retain, rotations))
case LeftShift(source, struct, idStatus, onUndefined, repair, rot) =>
f(source).map(LeftShift(_, struct, idStatus, onUndefined, repair, rot))
case LPReduce(source, reduce) =>
f(source).map(LPReduce(_, reduce))
case QSReduce(source, buckets, reducers, repair) =>
f(source).map(QSReduce(_, buckets, reducers, repair))
case Distinct(source) =>
f(source).map(Distinct(_))
case LPSort(source, order) =>
val T = Bitraverse[(?, ?)].leftTraverse[SortDir]
val source2G = f(source)
val orders2G = order.traverse(p => T.traverse(p)(f))
(source2G |@| orders2G)(LPSort(_, _))
case QSSort(source, buckets, order) =>
f(source).map(QSSort(_, buckets, order))
case Union(left, right) =>
(f(left) |@| f(right))(Union(_, _))
case Subset(from, op, count) =>
(f(from) |@| f(count))(Subset(_, op, _))
case LPFilter(source, predicate) =>
(f(source) |@| f(predicate))(LPFilter(_, _))
case QSFilter(source, predicate) =>
f(source).map(QSFilter(_, predicate))
case JoinSideRef(id) => (JoinSideRef(id): QScriptUniform[T, B]).point[G]
case Unreferenced() => (Unreferenced(): QScriptUniform[T, B]).point[G]
}
}
implicit def show[T[_[_]]: ShowT]: Delay[Show, QScriptUniform[T, ?]] =
new Delay[Show, QScriptUniform[T, ?]] {
def apply[A](a: Show[A]) = {
implicit val showA = a
Show shows {
case AutoJoin2(left, right, combiner) =>
s"AutoJoin2(${left.shows}, ${right.shows}, ${combiner.shows})"
case AutoJoin3(left, center, right, combiner) =>
s"AutoJoin3(${left.shows}, ${center.shows}, ${right.shows}, ${combiner.shows})"
case QSAutoJoin(left, right, keys, combiner) =>
s"QSAutoJoin(${left.shows}, ${right.shows}, ${keys.shows}, ${combiner.shows})"
case GroupBy(left, right) =>
s"GroupBy(${left.shows}, ${right.shows})"
case DimEdit(source, dtrans) =>
s"DimEdit(${source.shows}, ${dtrans.shows})"
case LPJoin(left, right, condition, joinType, leftRef, rightRef) =>
s"LPJoin(${left.shows}, ${right.shows}, ${condition.shows}, ${joinType.shows}, ${leftRef.shows}, ${rightRef.shows})"
case ThetaJoin(left, right, condition, joinType, combiner) =>
s"ThetaJoin(${left.shows}, ${right.shows}, ${condition.shows}, ${joinType.shows}, ${combiner.shows})"
case Unary(source, mf) =>
s"Unary(${source.shows}, ${mf.shows})"
case Map(source, fm) =>
s"Map(${source.shows}, ${fm.shows})"
case Read(path, idStatus) =>
s"Read(${Path.posixCodec.printPath(path)}, ${idStatus.shows})"
case Transpose(source, retain, rotations) =>
s"Transpose(${source.shows}, ${retain.shows}, ${rotations.shows})"
case LeftShift(source, struct, idStatus, onUndefined, repair, rot) =>
s"LeftShift(${source.shows}, ${struct.linearize.shows}, ${idStatus.shows}, ${onUndefined.shows}, ${repair.shows}, ${rot.shows})"
case LPReduce(source, reduce) =>
s"LPReduce(${source.shows}, ${reduce.shows})"
case QSReduce(source, buckets, reducers, repair) =>
s"QSReduce(${source.shows}, ${buckets.shows}, ${reducers.shows}, ${repair.shows})"
case Distinct(source) =>
s"Distinct(${source.shows})"
case LPSort(source, order) =>
s"LPSort(${source.shows}, ${order.shows})"
case QSSort(source, buckets, order) =>
s"QSSort(${source.shows}, ${buckets.shows}, ${order.shows})"
case Union(left, right) =>
s"Union(${left.shows}, ${right.shows})"
case Subset(from, op, count) =>
s"Subset(${from.shows}, ${op.shows}, ${count.shows})"
case LPFilter(source, predicate) =>
s"LPFilter(${source.shows}, ${predicate.shows})"
case QSFilter(source, predicate) =>
s"QSFilter(${source.shows}, ${predicate.shows})"
case JoinSideRef(id) =>
s"JoinSideRef(${id.shows})"
case Unreferenced() =>
"⊥"
}
}
}
@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements"))
implicit def renderTree[T[_[_]]: RenderTreeT: ShowT]
: Delay[RenderTree, QScriptUniform[T, ?]] = errorNotImplemented
@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements"))
implicit def equal[T[_[_]]: BirecursiveT: EqualT]
: Delay[Equal, QScriptUniform[T, ?]] = errorNotImplemented
final case class AutoJoin2[T[_[_]], A](
left: A,
right: A,
combiner: FreeMapA[T, JoinSide]) extends QScriptUniform[T, A]
final case class AutoJoin3[T[_[_]], A](
left: A,
center: A,
right: A,
combiner: FreeMapA[T, JoinSide3]) extends QScriptUniform[T, A]
final case class QSAutoJoin[T[_[_]], A](
left: A,
right: A,
keys: AutoJoin[T[EJson], IdAccess],
combiner: JoinFunc[T]) extends QScriptUniform[T, A]
final case class GroupBy[T[_[_]], A](
left: A,
right: A) extends QScriptUniform[T, A]
final case class DimEdit[T[_[_]], A](
source: A,
trans: DTrans[T]) extends QScriptUniform[T, A]
sealed trait DTrans[T[_[_]]] extends Product with Serializable
object DTrans {
final case class Squash[T[_[_]]]() extends DTrans[T]
final case class Group[T[_[_]]](getKey: FreeMap[T]) extends DTrans[T]
implicit def show[T[_[_]]: ShowT]: Show[DTrans[T]] =
Show.shows[DTrans[T]] {
case Squash() => "Squash"
case Group(k) => s"Group(${k.shows})"
}
}
// LPish
final case class LPJoin[T[_[_]], A](
left: A,
right: A,
condition: A,
joinType: JoinType,
leftRef: Symbol,
rightRef: Symbol) extends QScriptUniform[T, A]
// QScriptish
final case class ThetaJoin[T[_[_]], A](
left: A,
right: A,
condition: JoinFunc[T],
joinType: JoinType,
combiner: JoinFunc[T]) extends QScriptUniform[T, A]
/**
* This is a non-free (as in monad) variant of Map. We need it
* in ReadLP so that graph compaction is defined, which is required
* because compaction utilizes an `SMap[QSU[Symbol], Symbol]`, which
* isn't valid when the `QSU`s inside the keys are libre.
*/
final case class Unary[T[_[_]], A](
source: A,
mf: MapFunc[T, Hole]) extends QScriptUniform[T, A]
final case class Map[T[_[_]], A](
source: A,
fm: RecFreeMap[T]) extends QScriptUniform[T, A]
final case class Read[T[_[_]], A](
path: AFile,
idStatus: IdStatus) extends QScriptUniform[T, A]
// LPish
final case class Transpose[T[_[_]], A](
source: A,
retain: Retain,
rotations: Rotation) extends QScriptUniform[T, A]
sealed trait Retain extends Product with Serializable {
def fold[A](ids: => A, vals: => A): A = this match {
case Retain.Identities => ids
case Retain.Values => vals
}
}
object Retain {
case object Identities extends Retain
case object Values extends Retain
implicit val enum: Enum[Retain] =
new Enum[Retain] {
def succ(r: Retain) =
r match {
case Identities => Values
case Values => Identities
}
def pred(r: Retain) =
r match {
case Identities => Values
case Values => Identities
}
override val min = Some(Identities)
override val max = Some(Values)
def order(x: Retain, y: Retain) =
Order[Int].order(toInt(x), toInt(y))
val toInt: Retain => Int = {
case Identities => 0
case Values => 1
}
}
implicit val show: Show[Retain] =
Show.showFromToString
}
sealed trait Rotation extends Product with Serializable
object Rotation {
case object FlattenArray extends Rotation
case object ShiftArray extends Rotation
case object FlattenMap extends Rotation
case object ShiftMap extends Rotation
implicit val enum: Enum[Rotation] =
new Enum[Rotation] {
def succ(r: Rotation) =
r match {
case FlattenArray => ShiftArray
case ShiftArray => FlattenMap
case FlattenMap => ShiftMap
case ShiftMap => FlattenArray
}
def pred(r: Rotation) =
r match {
case FlattenArray => ShiftMap
case ShiftArray => FlattenArray
case FlattenMap => ShiftArray
case ShiftMap => FlattenMap
}
override val min = Some(FlattenArray)
override val max = Some(ShiftMap)
def order(x: Rotation, y: Rotation) =
Order[Int].order(toInt(x), toInt(y))
val toInt: Rotation => Int = {
case FlattenArray => 0
case ShiftArray => 1
case FlattenMap => 2
case ShiftMap => 3
}
}
implicit val show: Show[Rotation] =
Show.showFromToString
}
// QScriptish
final case class LeftShift[T[_[_]], A](
source: A,
struct: RecFreeMap[T],
idStatus: IdStatus,
onUndefined: OnUndefined,
repair: JoinFunc[T],
rot: Rotation) extends QScriptUniform[T, A]
// LPish
final case class LPReduce[T[_[_]], A](
source: A,
reduce: ReduceFunc[Unit]) extends QScriptUniform[T, A]
// QScriptish
final case class QSReduce[T[_[_]], A](
source: A,
buckets: List[FreeMapA[T, Access[Hole]]],
// TODO: NEL
reducers: List[ReduceFunc[FreeMap[T]]],
repair: FreeMapA[T, ReduceIndex]) extends QScriptUniform[T, A]
final case class Distinct[T[_[_]], A](source: A) extends QScriptUniform[T, A]
// LPish
final case class LPSort[T[_[_]], A](
source: A,
order: NEL[(A, SortDir)]) extends QScriptUniform[T, A]
// QScriptish
final case class QSSort[T[_[_]], A](
source: A,
buckets: List[FreeMapA[T, Access[Hole]]],
order: NEL[(FreeMap[T], SortDir)]) extends QScriptUniform[T, A]
final case class Union[T[_[_]], A](left: A, right: A) extends QScriptUniform[T, A]
final case class Subset[T[_[_]], A](
from: A,
op: SelectionOp,
count: A) extends QScriptUniform[T, A]
// LPish
final case class LPFilter[T[_[_]], A](
source: A,
predicate: A) extends QScriptUniform[T, A]
// QScriptish
final case class QSFilter[T[_[_]], A](
source: A,
predicate: RecFreeMap[T]) extends QScriptUniform[T, A]
final case class Unreferenced[T[_[_]], A]() extends QScriptUniform[T, A]
final case class JoinSideRef[T[_[_]], A](id: Symbol) extends QScriptUniform[T, A]
final class Optics[T[_[_]]] private () extends QSUTTypes[T] {
def autojoin2[A]: Prism[QScriptUniform[A], (A, A, FreeMapA[JoinSide])] =
Prism.partial[QScriptUniform[A], (A, A, FreeMapA[JoinSide])] {
case AutoJoin2(left, right, func) => (left, right, func)
} { case (left, right, func) => AutoJoin2(left, right, func) }
def autojoin3[A]: Prism[QScriptUniform[A], (A, A, A, FreeMapA[JoinSide3])] =
Prism.partial[QScriptUniform[A], (A, A, A, FreeMapA[JoinSide3])] {
case AutoJoin3(left, center, right, func) => (left, center, right, func)
} { case (left, center, right, func) => AutoJoin3(left, center, right, func) }
def dimEdit[A]: Prism[QScriptUniform[A], (A, DTrans[T])] =
Prism.partial[QScriptUniform[A], (A, DTrans[T])] {
case DimEdit(a, dt) => (a, dt)
} { case (a, dt) => DimEdit(a, dt) }
def distinct[A]: Prism[QScriptUniform[A], A] =
Prism.partial[QScriptUniform[A], A] {
case Distinct(a) => a
} (Distinct(_))
def groupBy[A]: Prism[QScriptUniform[A], (A, A)] =
Prism.partial[QScriptUniform[A], (A, A)] {
case GroupBy(l, r) => (l, r)
} { case (l, r) => GroupBy(l, r) }
def joinSideRef[A]: Prism[QScriptUniform[A], Symbol] =
Prism.partial[QScriptUniform[A], Symbol] {
case JoinSideRef(s) => s
} (JoinSideRef(_))
def leftShift[A]: Prism[QScriptUniform[A], (A, RecFreeMap, IdStatus, OnUndefined, JoinFunc, Rotation)] =
Prism.partial[QScriptUniform[A], (A, RecFreeMap, IdStatus, OnUndefined, JoinFunc, Rotation)] {
case LeftShift(s, fm, ids, ou, jf, rot) => (s, fm, ids, ou, jf, rot)
} { case (s, fm, ids, ou, jf, rot) => LeftShift(s, fm, ids, ou, jf, rot) }
def lpFilter[A]: Prism[QScriptUniform[A], (A, A)] =
Prism.partial[QScriptUniform[A], (A, A)] {
case LPFilter(s, p) => (s, p)
} { case (s, p) => LPFilter(s, p) }
def lpJoin[A]: Prism[QScriptUniform[A], (A, A, A, JoinType, Symbol, Symbol)] =
Prism.partial[QScriptUniform[A], (A, A, A, JoinType, Symbol, Symbol)] {
case LPJoin(l, r, c, t, lr, rr) => (l, r, c, t, lr, rr)
} { case (l, r, c, t, lr, rr) => LPJoin(l, r, c, t, lr, rr) }
def lpReduce[A]: Prism[QScriptUniform[A], (A, ReduceFunc[Unit])] =
Prism.partial[QScriptUniform[A], (A, ReduceFunc[Unit])] {
case LPReduce(a, rf) => (a, rf)
} { case (a, rf) => LPReduce(a, rf) }
def lpSort[A]: Prism[QScriptUniform[A], (A, NEL[(A, SortDir)])] =
Prism.partial[QScriptUniform[A], (A, NEL[(A, SortDir)])] {
case LPSort(a, keys) => (a, keys)
} { case (a, keys) => LPSort(a, keys) }
def unary[A]: Prism[QScriptUniform[A], (A, MapFunc[Hole])] =
Prism.partial[QScriptUniform[A], (A, MapFunc[Hole])] {
case Unary(a, mf) => (a, mf)
} { case (a, mf) => Unary(a, mf) }
def map[A]: Prism[QScriptUniform[A], (A, RecFreeMap)] =
Prism.partial[QScriptUniform[A], (A, RecFreeMap)] {
case Map(a, fm) => (a, fm)
} { case (a, fm) => Map(a, fm) }
def qsAutoJoin[A]: Prism[QScriptUniform[A], (A, A, AutoJoin[T[EJson], IdAccess], JoinFunc)] =
Prism.partial[QScriptUniform[A], (A, A, AutoJoin[T[EJson], IdAccess], JoinFunc)] {
case QSAutoJoin(l, r, ks, c) => (l, r, ks, c)
} { case (l, r, ks, c) => QSAutoJoin(l, r, ks, c) }
def qsFilter[A]: Prism[QScriptUniform[A], (A, RecFreeMap)] =
Prism.partial[QScriptUniform[A], (A, RecFreeMap)] {
case QSFilter(a, p) => (a, p)
} { case (a, p) => QSFilter(a, p) }
def qsReduce[A]: Prism[QScriptUniform[A], (A, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])] =
Prism.partial[QScriptUniform[A], (A, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])] {
case QSReduce(a, bs, rfs, rep) => (a, bs, rfs, rep)
} { case (a, bs, rfs, rep) => QSReduce(a, bs, rfs, rep) }
def qsSort[A]: Prism[QScriptUniform[A], (A, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])] =
Prism.partial[QScriptUniform[A], (A, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])] {
case QSSort(a, buckets, keys) => (a, buckets, keys)
} { case (a, buckets, keys) => QSSort(a, buckets, keys) }
def read[A]: Prism[QScriptUniform[A], (AFile, IdStatus)] =
Prism.partial[QScriptUniform[A], (AFile, IdStatus)] {
case Read(f, s) => (f, s)
} { case (f, s) => Read(f, s) }
def subset[A]: Prism[QScriptUniform[A], (A, SelectionOp, A)] =
Prism.partial[QScriptUniform[A], (A, SelectionOp, A)] {
case Subset(f, op, c) => (f, op, c)
} { case (f, op, c) => Subset(f, op, c) }
def thetaJoin[A]: Prism[QScriptUniform[A], (A, A, JoinFunc, JoinType, JoinFunc)] =
Prism.partial[QScriptUniform[A], (A, A, JoinFunc, JoinType, JoinFunc)] {
case ThetaJoin(l, r, c, t, b) => (l, r, c, t, b)
} { case (l, r, c, t, b) => ThetaJoin(l, r, c, t, b) }
def transpose[A]: Prism[QScriptUniform[A], (A, Retain, Rotation)] =
Prism.partial[QScriptUniform[A], (A, Retain, Rotation)] {
case Transpose(a, ret, rot) => (a, ret, rot)
} { case (a, ret, rot) => Transpose(a, ret, rot) }
def union[A]: Prism[QScriptUniform[A], (A, A)] =
Prism.partial[QScriptUniform[A], (A, A)] {
case Union(l, r) => (l, r)
} { case (l, r) => Union(l, r) }
def unreferenced[A]: Prism[QScriptUniform[A], Unit] =
Prism.partial[QScriptUniform[A], Unit] {
case Unreferenced() => ()
} (κ(Unreferenced()))
def holes[A, B]: PTraversal[QScriptUniform[A], QScriptUniform[B], A, B] =
PTraversal.fromTraverse[QScriptUniform, A, B]
}
object Optics {
def apply[T[_[_]]]: Optics[T] = new Optics[T]
}
sealed abstract class Dsl[T[_[_]]: BirecursiveT, F[_]: Functor, A] extends QSUTTypes[T] {
import Scalaz._
val iso: Iso[A, F[QScriptUniform[A]]]
def lifting[S, A]: Prism[S, A] => Prism[F[S], F[A]]
val recFunc = construction.RecFunc[T]
type Bin[A] = (A, A) => Binary[T, A]
type Tri[A] = (A, A, A) => Ternary[T, A]
private val O = Optics[T]
def mfc[A] = PrismNT.injectCopK[MapFuncCore, MapFunc].asPrism[A]
private def composeLifting[G[_]](optic: Prism[QScriptUniform[A], G[A]]) =
iso composePrism lifting[QScriptUniform[A], G[A]](optic)
def _autojoin2: Prism[A, F[(A, A, FreeMapA[JoinSide])]] = {
type G[A] = (A, A, FreeMapA[JoinSide])
composeLifting[G](O.autojoin2[A])
}
def _autojoin3: Prism[A, F[(A, A, A, FreeMapA[JoinSide3])]] = {
type G[A] = (A, A, A, FreeMapA[JoinSide3])
composeLifting[G](O.autojoin3[A])
}
def autojoin2(input: F[(A, A, Forall.CPS[Bin])]): A =
_autojoin2(input.map {
case (left, right, combiner) =>
(left, right,
Free.liftF(mfc(Forall[Bin](combiner)[JoinSide](LeftSide, RightSide))))
})
def autojoin3(input: F[(A, A, A, Forall.CPS[Tri])]): A =
_autojoin3(input.map {
case (left, center, right, combiner) =>
(left, center, right,
Free.liftF(mfc(Forall[Tri](combiner)[JoinSide3](LeftSide3, Center, RightSide3))))
})
def dimEdit: Prism[A, F[(A, DTrans[T])]] =
composeLifting[(?, DTrans[T])](O.dimEdit[A])
def distinct: Prism[A, F[A]] =
composeLifting[Id](O.distinct[A])
def groupBy: Prism[A, F[(A, A)]] = {
type G[A] = (A, A)
composeLifting[G](O.groupBy[A])
}
def joinSideRef: Prism[A, F[Symbol]] = {
type G[A] = Symbol
composeLifting[G](O.joinSideRef[A])
}
def leftShift: Prism[A, F[(A, RecFreeMap, IdStatus, OnUndefined, JoinFunc, Rotation)]] = {
composeLifting[(?, RecFreeMap, IdStatus, OnUndefined, JoinFunc, Rotation)](O.leftShift[A])
}
def lpFilter: Prism[A, F[(A, A)]] = {
type G[A] = (A, A)
composeLifting[G](O.lpFilter[A])
}
def lpJoin: Prism[A, F[(A, A, A, JoinType, Symbol, Symbol)]] = {
type G[A] = (A, A, A, JoinType, Symbol, Symbol)
composeLifting[G](O.lpJoin[A])
}
def lpReduce: Prism[A, F[(A, ReduceFunc[Unit])]] =
composeLifting[(?, ReduceFunc[Unit])](O.lpReduce[A])
def lpSort: Prism[A, F[(A, NEL[(A, SortDir)])]] = {
type G[A] = (A, NEL[(A, SortDir)])
composeLifting[G](O.lpSort[A])
}
def unary: Prism[A, F[(A, MapFunc[Hole])]] =
composeLifting[(?, MapFunc[Hole])](O.unary[A])
def map: Prism[A, F[(A, RecFreeMap)]] =
composeLifting[(?, RecFreeMap)](O.map[A])
def map1(pair: F[(A, MapFuncCore[Hole])]): A =
map(pair.map {
case(src, f) => (src, RecFreeS.roll(mfc(f.as(recFunc.Hole))))
})
def qsAutoJoin: Prism[A, F[(A, A, AutoJoin[T[EJson], IdAccess], JoinFunc)]] = {
type G[A] = (A, A, AutoJoin[T[EJson], IdAccess], JoinFunc)
composeLifting[G](O.qsAutoJoin[A])
}
def qsFilter: Prism[A, F[(A, RecFreeMap)]] =
composeLifting[(?, RecFreeMap)](O.qsFilter[A])
def qsReduce: Prism[A, F[(A, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])]] =
composeLifting[(?, List[FreeAccess[Hole]], List[ReduceFunc[FreeMap]], FreeMapA[ReduceIndex])](O.qsReduce[A])
def qsSort: Prism[A, F[(A, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])]] =
composeLifting[(?, List[FreeAccess[Hole]], NEL[(FreeMap, SortDir)])](O.qsSort[A])
def read: Prism[A, F[(AFile, IdStatus)]] = {
type G[_] = (AFile, IdStatus)
composeLifting[G](O.read[A])
}
def subset: Prism[A, F[(A, SelectionOp, A)]] = {
type G[A] = (A, SelectionOp, A)
composeLifting[G](O.subset[A])
}
def thetaJoin: Prism[A, F[(A, A, JoinFunc, JoinType, JoinFunc)]] = {
type G[A] = (A, A, JoinFunc, JoinType, JoinFunc)
composeLifting[G](O.thetaJoin[A])
}
def transpose: Prism[A, F[(A, Retain, Rotation)]] =
composeLifting[(?, Retain, Rotation)](O.transpose[A])
def union: Prism[A, F[(A, A)]] = {
type G[A] = (A, A)
composeLifting[G](O.union[A])
}
def unreferenced: Prism[A, F[Unit]] = {
type G[_] = Unit
composeLifting[G](O.unreferenced[A])
}
}
sealed abstract class DslT[T[_[_]]: BirecursiveT] private () extends Dsl[T, Id.Id, T[QScriptUniform[T, ?]]] {
type QSU[A] = QScriptUniform[A]
private val J = Fixed[T[EJson]]
// read
def tread(file: AFile): T[QSU] =
read((file, IdStatus.ExcludeId))
def tread1(name: String): T[QSU] =
tread(Path.rootDir </> Path.file(name))
// undefined
val undefined: Prism[T[QSU], Unit] =
Prism[T[QSU], Unit](map.getOption(_) collect {
case (Unreferenced(), Embed(CoEnv(\\/-(Suspend(MFC(MapFuncsCore.Undefined())))))) => ()
})(_ => map(unreferenced(), recFunc.Undefined[Hole]))
// constants
val constant: Prism[T[QSU], T[EJson]] =
Prism[T[QSU], T[EJson]](map.getOption(_) collect {
case (Unreferenced(), Embed(CoEnv(\\/-(Suspend(MFC(MapFuncsCore.Constant(ejs))))))) => ejs
})(ejs => map(unreferenced(), recFunc.Constant[Hole](ejs)))
val carr: Prism[T[QSU], List[T[EJson]]] =
constant composePrism J.arr
val cbool: Prism[T[QSU], Boolean] =
constant composePrism J.bool
val cchar: Prism[T[QSU], Char] =
constant composePrism J.char
val cdec: Prism[T[QSU], BigDecimal] =
constant composePrism J.dec
val cint: Prism[T[QSU], BigInt] =
constant composePrism J.int
val cmap: Prism[T[QSU], List[(T[EJson], T[EJson])]] =
constant composePrism J.map
val cmeta: Prism[T[QSU], (T[EJson], T[EJson])] =
constant composePrism J.meta
val cnull: Prism[T[QSU], Unit] =
constant composePrism J.nul
val cstr: Prism[T[QSU], String] =
constant composePrism J.str
}
object DslT {
def apply[T[_[_]]: BirecursiveT]: DslT[T] =
new DslT {
val iso: Iso[T[QSU], QSU[T[QSU]]] = birecursiveIso[T[QSU], QSU]
def lifting[S, A]: Prism[S, A] => Prism[S, A] = ι
}
}
object AnnotatedDsl {
import Scalaz._
def apply[T[_[_]]: BirecursiveT, A]
: Dsl[T, (A, ?), Cofree[QScriptUniform[T, ?], A]] = {
type QSU[B] = QScriptUniform[T, B]
type CoQSU = Cofree[QSU, A]
new Dsl[T, (A, ?), CoQSU] {
val iso: Iso[CoQSU, (A, QSU[CoQSU])] =
birecursiveIso[CoQSU, EnvT[A, QSU, ?]]
.composeIso(envTIso[A, QSU, CoQSU])
def lifting[S, B]: Prism[S, B] => Prism[(A, S), (A, B)] =
_.second[A]
}
}
}
}
|
slamdata/quasar
|
qsu/src/main/scala/quasar/qsu/QScriptUniform.scala
|
Scala
|
apache-2.0
| 26,516
|
package net.debasishg.codemesh.domain.trade
package model
import scalaz._
import Scalaz._
import \\/._
trait ContractNoteModel {this: TradeModel =>
val isAddressDefined = true
case class ContractNote(trade: Trade)
def makeContractNote(trade: Trade): \\/[String, ContractNote] =
if (isAddressDefined) right(ContractNote(trade)) else left("Address not defined")
}
|
debasishg/codemesh14
|
src/main/scala/net/debasishg/codemesh/contractnotemodel.scala
|
Scala
|
apache-2.0
| 373
|
package com.mesosphere.universe.v3.model
case class Images(
iconSmall: String,
iconMedium: String,
iconLarge: String,
screenshots: Option[List[String]]
)
|
movicha/cosmos
|
cosmos-model/src/main/scala/com/mesosphere/universe/v3/model/Images.scala
|
Scala
|
apache-2.0
| 163
|
package org.greenrd.seraphim
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import scala.sys.process.{Process, ProcessBuilder}
class GitRepo (val path: File) extends LazyLogging {
if (!path.exists()) {
path.mkdir()
}
require(path.isDirectory)
private def runProcess(processBuilder: ProcessBuilder): String =
processBuilder.lineStream.mkString("\\n")
def gitCommand(command: String*): String =
runProcess(Process("git" +: command, path))
}
|
greenrd/project-seraphim
|
src/main/scala/org/greenrd/seraphim/GitRepo.scala
|
Scala
|
mit
| 484
|
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
trait Iso[T, U] {
def to(t : T) : U
// def from(u : U) : T
}
object Iso {
implicit def materializeIso[T, U]: Iso[T, U] = macro impl[T, U]
def impl[T: c.WeakTypeTag, U: c.WeakTypeTag](c: Context): c.Expr[Iso[T, U]] = {
import c.universe._
import definitions._
import Flag._
val sym = c.weakTypeOf[T].typeSymbol
if (!sym.isClass || !sym.asClass.isCaseClass) c.abort(c.enclosingPosition, s"$sym is not a case class")
val fields = sym.info.decls.toList.collect{ case x: TermSymbol if x.isVal && x.isCaseAccessor => x }
def mkTpt() = {
val core = Ident(TupleClass(fields.length) orElse UnitClass)
if (fields.length == 0) core
else AppliedTypeTree(core, fields map (f => TypeTree(f.info)))
}
def mkFrom() = {
if (fields.length == 0) Literal(Constant(Unit))
else Apply(Ident(newTermName("Tuple" + fields.length)), fields map (f => Select(Ident(newTermName("f")), newTermName(f.name.toString.trim))))
}
val evidenceClass = ClassDef(Modifiers(FINAL), newTypeName("$anon"), List(), Template(
List(AppliedTypeTree(Ident(newTypeName("Iso")), List(Ident(sym), mkTpt()))),
emptyValDef,
List(
DefDef(Modifiers(), termNames.CONSTRUCTOR, List(), List(List()), TypeTree(), Block(List(Apply(Select(Super(This(typeNames.EMPTY), typeNames.EMPTY), termNames.CONSTRUCTOR), List())), Literal(Constant(())))),
DefDef(Modifiers(), newTermName("to"), List(), List(List(ValDef(Modifiers(PARAM), newTermName("f"), Ident(sym), EmptyTree))), TypeTree(), mkFrom()))))
c.Expr[Iso[T, U]](Block(List(evidenceClass), Apply(Select(New(Ident(newTypeName("$anon"))), termNames.CONSTRUCTOR), List())))
}
}
|
folone/dotty
|
tests/untried/neg-with-implicits/macro-blackbox-fundep-materialization/Macros_1.scala
|
Scala
|
bsd-3-clause
| 1,781
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{ByteArrayOutputStream, File}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.UUID
import java.util.concurrent.atomic.AtomicLong
import scala.reflect.runtime.universe.TypeTag
import scala.util.Random
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.SparkException
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.expressions.Uuid
import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, OneRowRelation}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.{FilterExec, QueryExecution, WholeStageCodegenExec}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSparkSession}
import org.apache.spark.sql.test.SQLTestData.{DecimalData, TestData2}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.Utils
import org.apache.spark.util.random.XORShiftRandom
class DataFrameSuite extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
test("analysis error should be eagerly reported") {
intercept[Exception] { testData.select("nonExistentName") }
intercept[Exception] {
testData.groupBy("key").agg(Map("nonExistentName" -> "sum"))
}
intercept[Exception] {
testData.groupBy("nonExistentName").agg(Map("key" -> "sum"))
}
intercept[Exception] {
testData.groupBy($"abcd").agg(Map("key" -> "sum"))
}
}
test("dataframe toString") {
assert(testData.toString === "[key: int, value: string]")
assert(testData("key").toString === "key")
assert($"test".toString === "test")
}
test("rename nested groupby") {
val df = Seq((1, (1, 1))).toDF()
checkAnswer(
df.groupBy("_1").agg(sum("_2._1")).toDF("key", "total"),
Row(1, 1) :: Nil)
}
test("access complex data") {
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1)
}
test("table scan") {
checkAnswer(
testData,
testData.collect().toSeq)
}
test("empty data frame") {
assert(spark.emptyDataFrame.columns.toSeq === Seq.empty[String])
assert(spark.emptyDataFrame.count() === 0)
}
test("head, take and tail") {
assert(testData.take(2) === testData.collect().take(2))
assert(testData.head(2) === testData.collect().take(2))
assert(testData.tail(2) === testData.collect().takeRight(2))
assert(testData.head(2).head.schema === testData.schema)
}
test("dataframe alias") {
val df = Seq(Tuple1(1)).toDF("c").as("t")
val dfAlias = df.alias("t2")
df.col("t.c")
dfAlias.col("t2.c")
}
test("simple explode") {
val df = Seq(Tuple1("a b c"), Tuple1("d e")).toDF("words")
checkAnswer(
df.explode("words", "word") { word: String => word.split(" ").toSeq }.select('word),
Row("a") :: Row("b") :: Row("c") :: Row("d") ::Row("e") :: Nil
)
}
test("explode") {
val df = Seq((1, "a b c"), (2, "a b"), (3, "a")).toDF("number", "letters")
val df2 =
df.explode('letters) {
case Row(letters: String) => letters.split(" ").map(Tuple1(_)).toSeq
}
checkAnswer(
df2
.select('_1 as 'letter, 'number)
.groupBy('letter)
.agg(countDistinct('number)),
Row("a", 3) :: Row("b", 2) :: Row("c", 1) :: Nil
)
}
test("Star Expansion - CreateStruct and CreateArray") {
val structDf = testData2.select("a", "b").as("record")
// CreateStruct and CreateArray in aggregateExpressions
assert(structDf.groupBy($"a").agg(min(struct($"record.*"))).
sort("a").first() == Row(1, Row(1, 1)))
assert(structDf.groupBy($"a").agg(min(array($"record.*"))).
sort("a").first() == Row(1, Seq(1, 1)))
// CreateStruct and CreateArray in project list (unresolved alias)
assert(structDf.select(struct($"record.*")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*")).first().getAs[Seq[Int]](0) === Seq(1, 1))
// CreateStruct and CreateArray in project list (alias)
assert(structDf.select(struct($"record.*").as("a")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*").as("a")).first().getAs[Seq[Int]](0) === Seq(1, 1))
}
test("Star Expansion - hash") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(hash($"a", $"*")),
structDf.groupBy($"a", $"b").agg(hash($"a", $"a", $"b")))
checkAnswer(
structDf.select(hash($"*")),
structDf.select(hash($"record.*")))
checkAnswer(
structDf.select(hash($"a", $"*")),
structDf.select(hash($"a", $"record.*")))
}
test("Star Expansion - xxhash64") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(xxhash64($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(xxhash64($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(xxhash64($"a", $"*")),
structDf.groupBy($"a", $"b").agg(xxhash64($"a", $"a", $"b")))
checkAnswer(
structDf.select(xxhash64($"*")),
structDf.select(xxhash64($"record.*")))
checkAnswer(
structDf.select(xxhash64($"a", $"*")),
structDf.select(xxhash64($"a", $"record.*")))
}
private def assertDecimalSumOverflow(
df: DataFrame, ansiEnabled: Boolean, expectedAnswer: Row): Unit = {
if (!ansiEnabled) {
checkAnswer(df, expectedAnswer)
} else {
val e = intercept[SparkException] {
df.collect()
}
assert(e.getCause.isInstanceOf[ArithmeticException])
assert(e.getCause.getMessage.contains("cannot be represented as Decimal") ||
e.getCause.getMessage.contains("Overflow in sum of decimals"))
}
}
test("SPARK-28224: Aggregate sum big decimal overflow") {
val largeDecimals = spark.sparkContext.parallelize(
DecimalData(BigDecimal("1"* 20 + ".123"), BigDecimal("1"* 20 + ".123")) ::
DecimalData(BigDecimal("9"* 20 + ".123"), BigDecimal("9"* 20 + ".123")) :: Nil).toDF()
Seq(true, false).foreach { ansiEnabled =>
withSQLConf((SQLConf.ANSI_ENABLED.key, ansiEnabled.toString)) {
val structDf = largeDecimals.select("a").agg(sum("a"))
assertDecimalSumOverflow(structDf, ansiEnabled, Row(null))
}
}
}
test("SPARK-28067: sum of null decimal values") {
Seq("true", "false").foreach { wholeStageEnabled =>
withSQLConf((SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStageEnabled)) {
Seq("true", "false").foreach { ansiEnabled =>
withSQLConf((SQLConf.ANSI_ENABLED.key, ansiEnabled)) {
val df = spark.range(1, 4, 1).select(expr(s"cast(null as decimal(38,18)) as d"))
checkAnswer(df.agg(sum($"d")), Row(null))
}
}
}
}
}
test("SPARK-28067: Aggregate sum should not return wrong results for decimal overflow") {
Seq("true", "false").foreach { wholeStageEnabled =>
withSQLConf((SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStageEnabled)) {
Seq(true, false).foreach { ansiEnabled =>
withSQLConf((SQLConf.ANSI_ENABLED.key, ansiEnabled.toString)) {
val df0 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df1 = Seq(
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df = df0.union(df1)
val df2 = df.withColumnRenamed("decNum", "decNum2").
join(df, "intNum").agg(sum("decNum"))
val expectedAnswer = Row(null)
assertDecimalSumOverflow(df2, ansiEnabled, expectedAnswer)
val decStr = "1" + "0" * 19
val d1 = spark.range(0, 12, 1, 1)
val d2 = d1.select(expr(s"cast('$decStr' as decimal (38, 18)) as d")).agg(sum($"d"))
assertDecimalSumOverflow(d2, ansiEnabled, expectedAnswer)
val d3 = spark.range(0, 1, 1, 1).union(spark.range(0, 11, 1, 1))
val d4 = d3.select(expr(s"cast('$decStr' as decimal (38, 18)) as d")).agg(sum($"d"))
assertDecimalSumOverflow(d4, ansiEnabled, expectedAnswer)
val d5 = d3.select(expr(s"cast('$decStr' as decimal (38, 18)) as d"),
lit(1).as("key")).groupBy("key").agg(sum($"d").alias("sumd")).select($"sumd")
assertDecimalSumOverflow(d5, ansiEnabled, expectedAnswer)
val nullsDf = spark.range(1, 4, 1).select(expr(s"cast(null as decimal(38,18)) as d"))
val largeDecimals = Seq(BigDecimal("1"* 20 + ".123"), BigDecimal("9"* 20 + ".123")).
toDF("d")
assertDecimalSumOverflow(
nullsDf.union(largeDecimals).agg(sum($"d")), ansiEnabled, expectedAnswer)
val df3 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("50000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df4 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df5 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("20000000000000000000"), 2)).toDF("decNum", "intNum")
val df6 = df3.union(df4).union(df5)
val df7 = df6.groupBy("intNum").agg(sum("decNum"), countDistinct("decNum")).
filter("intNum == 1")
assertDecimalSumOverflow(df7, ansiEnabled, Row(1, null, 2))
}
}
}
}
}
test("Star Expansion - ds.explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1", "1,2"), ("2", "4"), ("3", "7,8,9")).toDF("prefix", "csv")
val e = intercept[AnalysisException] {
df.explode($"*") { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
}.queryExecution.assertAnalyzed()
}
assert(e.getMessage.contains("Invalid usage of '*' in explode/json_tuple/UDTF"))
checkAnswer(
df.explode('prefix, 'csv) { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
},
Row("1", "1,2", "1:1") ::
Row("1", "1,2", "1:2") ::
Row("2", "4", "2:4") ::
Row("3", "7,8,9", "3:7") ::
Row("3", "7,8,9", "3:8") ::
Row("3", "7,8,9", "3:9") :: Nil)
}
test("Star Expansion - explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv")
val e = intercept[AnalysisException] {
df.select(explode($"*"))
}
assert(e.getMessage.contains("Invalid usage of '*' in expression 'explode'"))
}
test("explode on output of array-valued function") {
val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv")
checkAnswer(
df.select(explode(split($"csv", pattern = ","))),
Row("1") :: Row("2") :: Row("4") :: Row("7") :: Row("8") :: Row("9") :: Nil)
}
test("Star Expansion - explode alias and star") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select(explode($"a").as("a"), $"*"),
Row("a", Seq("a"), 1) :: Nil)
}
test("sort after generate with join=true") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select($"*", explode($"a").as("c")).sortWithinPartitions("b", "c"),
Row(Seq("a"), 1, "a") :: Nil)
}
test("selectExpr") {
checkAnswer(
testData.selectExpr("abs(key)", "value"),
testData.collect().map(row => Row(math.abs(row.getInt(0)), row.getString(1))).toSeq)
}
test("selectExpr with alias") {
checkAnswer(
testData.selectExpr("key as k").select("k"),
testData.select("key").collect().toSeq)
}
test("selectExpr with udtf") {
val df = Seq((Map("1" -> 1), 1)).toDF("a", "b")
checkAnswer(
df.selectExpr("explode(a)"),
Row("1", 1) :: Nil)
}
test("filterExpr") {
val res = testData.collect().filter(_.getInt(0) > 90).toSeq
checkAnswer(testData.filter("key > 90"), res)
checkAnswer(testData.filter("key > 9.0e1"), res)
checkAnswer(testData.filter("key > .9e+2"), res)
checkAnswer(testData.filter("key > 0.9e+2"), res)
checkAnswer(testData.filter("key > 900e-1"), res)
checkAnswer(testData.filter("key > 900.0E-1"), res)
checkAnswer(testData.filter("key > 9.e+1"), res)
}
test("filterExpr using where") {
checkAnswer(
testData.where("key > 50"),
testData.collect().filter(_.getInt(0) > 50).toSeq)
}
test("repartition") {
intercept[IllegalArgumentException] {
testData.select("key").repartition(0)
}
checkAnswer(
testData.select("key").repartition(10).select("key"),
testData.select("key").collect().toSeq)
}
test("repartition with SortOrder") {
// passing SortOrder expressions to .repartition() should result in an informative error
def checkSortOrderErrorMsg[T](data: => Dataset[T]): Unit = {
val ex = intercept[IllegalArgumentException](data)
assert(ex.getMessage.contains("repartitionByRange"))
}
checkSortOrderErrorMsg {
Seq(0).toDF("a").repartition(2, $"a".asc)
}
checkSortOrderErrorMsg {
Seq((0, 0)).toDF("a", "b").repartition(2, $"a".asc, $"b")
}
}
test("repartitionByRange") {
val data1d = Random.shuffle(0.to(9))
val data2d = data1d.map(i => (i, data1d.size - i))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, $"val".asc)
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, $"val".desc)
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, data1d.size - 1 - i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, lit(42))
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(0, i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, lit(null), $"val".asc, rand())
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, i)))
// .repartitionByRange() assumes .asc by default if no explicit sort order is specified
checkAnswer(
data2d.toDF("a", "b").repartitionByRange(data2d.size, $"a".desc, $"b")
.select(spark_partition_id().as("id"), $"a", $"b"),
data2d.toDF("a", "b").repartitionByRange(data2d.size, $"a".desc, $"b".asc)
.select(spark_partition_id().as("id"), $"a", $"b"))
// at least one partition-by expression must be specified
intercept[IllegalArgumentException] {
data1d.toDF("val").repartitionByRange(data1d.size)
}
intercept[IllegalArgumentException] {
data1d.toDF("val").repartitionByRange(data1d.size, Seq.empty: _*)
}
}
test("coalesce") {
intercept[IllegalArgumentException] {
testData.select("key").coalesce(0)
}
assert(testData.select("key").coalesce(1).rdd.partitions.size === 1)
checkAnswer(
testData.select("key").coalesce(1).select("key"),
testData.select("key").collect().toSeq)
assert(spark.emptyDataFrame.coalesce(1).rdd.partitions.size === 0)
}
test("convert $\\"attribute name\\" into unresolved attribute") {
checkAnswer(
testData.where($"key" === lit(1)).select($"value"),
Row("1"))
}
test("convert Scala Symbol 'attrname into unresolved attribute") {
checkAnswer(
testData.where($"key" === lit(1)).select("value"),
Row("1"))
}
test("select *") {
checkAnswer(
testData.select($"*"),
testData.collect().toSeq)
}
test("simple select") {
checkAnswer(
testData.where($"key" === lit(1)).select("value"),
Row("1"))
}
test("select with functions") {
checkAnswer(
testData.select(sum("value"), avg("value"), count(lit(1))),
Row(5050.0, 50.5, 100))
checkAnswer(
testData2.select($"a" + $"b", $"a" < $"b"),
Seq(
Row(2, false),
Row(3, true),
Row(3, false),
Row(4, false),
Row(4, false),
Row(5, false)))
checkAnswer(
testData2.select(sumDistinct($"a")),
Row(6))
}
test("sorting with null ordering") {
val data = Seq[java.lang.Integer](2, 1, null).toDF("key")
checkAnswer(data.orderBy($"key".asc), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy($"key".asc_nulls_first), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc_nulls_first("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy($"key".asc_nulls_last), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy(asc_nulls_last("key")), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy($"key".desc), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy($"key".desc_nulls_first), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy(desc_nulls_first("key")), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy($"key".desc_nulls_last), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc_nulls_last("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
}
test("global sorting") {
checkAnswer(
testData2.orderBy($"a".asc, $"b".asc),
Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2)))
checkAnswer(
testData2.orderBy(asc("a"), desc("b")),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy($"a".asc, $"b".desc),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy($"a".desc, $"b".desc),
Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1)))
checkAnswer(
testData2.orderBy($"a".desc, $"b".asc),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)))
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(0).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).toSeq)
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(0).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).reverse.toSeq)
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(1).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).toSeq)
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(1).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).reverse.toSeq)
}
test("limit") {
checkAnswer(
testData.limit(10),
testData.take(10).toSeq)
checkAnswer(
arrayData.toDF().limit(1),
arrayData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
checkAnswer(
mapData.toDF().limit(1),
mapData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
// SPARK-12340: overstep the bounds of Int in SparkPlan.executeTake
checkAnswer(
spark.range(2).toDF().limit(2147483638),
Row(0) :: Row(1) :: Nil
)
}
test("udf") {
val foo = udf((a: Int, b: String) => a.toString + b)
checkAnswer(
// SELECT *, foo(key, value) FROM testData
testData.select($"*", foo($"key", $"value")).limit(3),
Row(1, "1", "11") :: Row(2, "2", "22") :: Row(3, "3", "33") :: Nil
)
}
test("callUDF without Hive Support") {
val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
df.sparkSession.udf.register("simpleUDF", (v: Int) => v * v)
checkAnswer(
df.select($"id", callUDF("simpleUDF", $"value")),
Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil)
}
test("withColumn") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol"))
}
test("withColumns") {
val df = testData.toDF().withColumns(Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2))
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1, key + 2)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol1", "newCol2"))
val err = intercept[IllegalArgumentException] {
testData.toDF().withColumns(Seq("newCol1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(
err.getMessage.contains("The size of column names: 1 isn't equal to the size of columns: 2"))
val err2 = intercept[AnalysisException] {
testData.toDF().withColumns(Seq("newCol1", "newCOL1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(err2.getMessage.contains("Found duplicate column(s)"))
}
test("withColumns: case sensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val df = testData.toDF().withColumns(Seq("newCol1", "newCOL1"),
Seq(col("key") + 1, col("key") + 2))
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1, key + 2)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol1", "newCOL1"))
val err = intercept[AnalysisException] {
testData.toDF().withColumns(Seq("newCol1", "newCol1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(err.getMessage.contains("Found duplicate column(s)"))
}
}
test("withColumns: given metadata") {
def buildMetadata(num: Int): Seq[Metadata] = {
(0 until num).map { n =>
val builder = new MetadataBuilder
builder.putLong("key", n.toLong)
builder.build()
}
}
val df = testData.toDF().withColumns(
Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2),
buildMetadata(2))
df.select("newCol1", "newCol2").schema.zipWithIndex.foreach { case (col, idx) =>
assert(col.metadata.getLong("key").toInt === idx)
}
val err = intercept[IllegalArgumentException] {
testData.toDF().withColumns(
Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2),
buildMetadata(1))
}
assert(err.getMessage.contains(
"The size of column names: 2 isn't equal to the size of metadata elements: 1"))
}
test("replace column using withColumn") {
val df2 = sparkContext.parallelize(Array(1, 2, 3)).toDF("x")
val df3 = df2.withColumn("x", df2("x") + 1)
checkAnswer(
df3.select("x"),
Row(2) :: Row(3) :: Row(4) :: Nil)
}
test("replace column using withColumns") {
val df2 = sparkContext.parallelize(Seq((1, 2), (2, 3), (3, 4))).toDF("x", "y")
val df3 = df2.withColumns(Seq("x", "newCol1", "newCol2"),
Seq(df2("x") + 1, df2("y"), df2("y") + 1))
checkAnswer(
df3.select("x", "newCol1", "newCol2"),
Row(2, 2, 3) :: Row(3, 3, 4) :: Row(4, 4, 5) :: Nil)
}
test("drop column using drop") {
val df = testData.drop("key")
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop columns using drop") {
val src = Seq((0, 2, 3)).toDF("a", "b", "c")
val df = src.drop("a", "b")
checkAnswer(df, Row(3))
assert(df.schema.map(_.name) === Seq("c"))
}
test("drop unknown column (no-op)") {
val df = testData.drop("random")
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop column using drop with column reference") {
val col = testData("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("SPARK-28189 drop column using drop with column reference with case-insensitive names") {
// With SQL config caseSensitive OFF, case insensitive column name should work
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val col1 = testData("KEY")
val df1 = testData.drop(col1)
checkAnswer(df1, testData.selectExpr("value"))
assert(df1.schema.map(_.name) === Seq("value"))
val col2 = testData("Key")
val df2 = testData.drop(col2)
checkAnswer(df2, testData.selectExpr("value"))
assert(df2.schema.map(_.name) === Seq("value"))
}
}
test("drop unknown column (no-op) with column reference") {
val col = Column("random")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop unknown column with same name with column reference") {
val col = Column("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop column after join with duplicate columns using column reference") {
val newSalary = salary.withColumnRenamed("personId", "id")
val col = newSalary("id")
// this join will result in duplicate "id" columns
val joinedDf = person.join(newSalary,
person("id") === newSalary("id"), "inner")
// remove only the "id" column that was associated with newSalary
val df = joinedDf.drop(col)
checkAnswer(
df,
joinedDf.collect().map {
case Row(id: Int, name: String, age: Int, idToDrop: Int, salary: Double) =>
Row(id, name, age, salary)
}.toSeq)
assert(df.schema.map(_.name) === Seq("id", "name", "age", "salary"))
assert(df("id") == person("id"))
}
test("drop top level columns that contains dot") {
val df1 = Seq((1, 2)).toDF("a.b", "a.c")
checkAnswer(df1.drop("a.b"), Row(2))
// Creates data set: {"a.b": 1, "a": {"b": 3}}
val df2 = Seq((1)).toDF("a.b").withColumn("a", struct(lit(3) as "b"))
// Not like select(), drop() parses the column name "a.b" literally without interpreting "."
checkAnswer(df2.drop("a.b").select("a.b"), Row(3))
// "`" is treated as a normal char here with no interpreting, "`a`b" is a valid column name.
assert(df2.drop("`a.b`").columns.size == 2)
}
test("drop(name: String) search and drop all top level columns that matchs the name") {
val df1 = Seq((1, 2)).toDF("a", "b")
val df2 = Seq((3, 4)).toDF("a", "b")
checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4))
// Finds and drops all columns that match the name (case insensitive).
checkAnswer(df1.crossJoin(df2).drop("A"), Row(2, 4))
}
test("withColumnRenamed") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
.withColumnRenamed("value", "valueRenamed")
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "valueRenamed", "newCol"))
}
private lazy val person2: DataFrame = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
test("describe") {
val describeResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("max", "David", "60", "192"))
val emptyDescribeResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val describeAllCols = person2.describe()
assert(getSchemaAsSeq(describeAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(describeAllCols, describeResult)
// All aggregate value should have been cast to string
describeAllCols.collect().foreach { row =>
row.toSeq.foreach { value =>
if (value != null) {
assert(value.isInstanceOf[String], "expected string but found " + value.getClass)
}
}
}
val describeOneCol = person2.describe("age")
assert(getSchemaAsSeq(describeOneCol) === Seq("summary", "age"))
checkAnswer(describeOneCol, describeResult.map { case Row(s, _, d, _) => Row(s, d)} )
val describeNoCol = person2.select().describe()
assert(getSchemaAsSeq(describeNoCol) === Seq("summary"))
checkAnswer(describeNoCol, describeResult.map { case Row(s, _, _, _) => Row(s)} )
val emptyDescription = person2.limit(0).describe()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptyDescribeResult)
}
test("summary") {
val summaryResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("25%", null, "16", "164"),
Row("50%", null, "24", "176"),
Row("75%", null, "32", "180"),
Row("max", "David", "60", "192"))
val emptySummaryResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("25%", null, null, null),
Row("50%", null, null, null),
Row("75%", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val summaryAllCols = person2.summary()
assert(getSchemaAsSeq(summaryAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(summaryAllCols, summaryResult)
// All aggregate value should have been cast to string
summaryAllCols.collect().foreach { row =>
row.toSeq.foreach { value =>
if (value != null) {
assert(value.isInstanceOf[String], "expected string but found " + value.getClass)
}
}
}
val summaryOneCol = person2.select("age").summary()
assert(getSchemaAsSeq(summaryOneCol) === Seq("summary", "age"))
checkAnswer(summaryOneCol, summaryResult.map { case Row(s, _, d, _) => Row(s, d)} )
val summaryNoCol = person2.select().summary()
assert(getSchemaAsSeq(summaryNoCol) === Seq("summary"))
checkAnswer(summaryNoCol, summaryResult.map { case Row(s, _, _, _) => Row(s)} )
val emptyDescription = person2.limit(0).summary()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptySummaryResult)
}
test("summary advanced") {
val stats = Array("count", "50.01%", "max", "mean", "min", "25%")
val orderMatters = person2.summary(stats: _*)
assert(orderMatters.collect().map(_.getString(0)) === stats)
val onlyPercentiles = person2.summary("0.1%", "99.9%")
assert(onlyPercentiles.count() === 2)
val fooE = intercept[IllegalArgumentException] {
person2.summary("foo")
}
assert(fooE.getMessage === "foo is not a recognised statistic")
val parseE = intercept[IllegalArgumentException] {
person2.summary("foo%")
}
assert(parseE.getMessage === "Unable to parse foo% as a percentile")
}
test("apply on query results (SPARK-5462)") {
val df = testData.sparkSession.sql("select key from testData")
checkAnswer(df.select(df("key")), testData.select("key").collect().toSeq)
}
test("inputFiles") {
Seq("csv", "").foreach { useV1List =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1List) {
withTempDir { dir =>
val df = Seq((1, 22)).toDF("a", "b")
val parquetDir = new File(dir, "parquet").getCanonicalPath
df.write.parquet(parquetDir)
val parquetDF = spark.read.parquet(parquetDir)
assert(parquetDF.inputFiles.nonEmpty)
val csvDir = new File(dir, "csv").getCanonicalPath
df.write.json(csvDir)
val csvDF = spark.read.json(csvDir)
assert(csvDF.inputFiles.nonEmpty)
val unioned = csvDF.union(parquetDF).inputFiles.sorted
val allFiles = (csvDF.inputFiles ++ parquetDF.inputFiles).distinct.sorted
assert(unioned === allFiles)
}
}
}
}
ignore("show") {
// This test case is intended ignored, but to make sure it compiles correctly
testData.select($"*").show()
testData.select($"*").show(1000)
}
test("getRows: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = Seq(
Seq("value"),
Seq("1"),
Seq("111111111111111111111"))
assert(df.getRows(10, 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = Seq(
Seq("value"),
Seq("1"),
Seq("11111111111111111..."))
assert(df.getRows(10, 20) === expectedAnswerForTrue)
}
test("getRows: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = Seq(
Seq("value"),
Seq("1"),
Seq("111"))
assert(df.getRows(10, 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = Seq(
Seq("value"),
Seq("1"),
Seq("11111111111111..."))
assert(df.getRows(10, 17) === expectedAnswerForTrue)
}
test("getRows: numRows = 0") {
val expectedAnswer = Seq(Seq("key", "value"), Seq("1", "1"))
assert(testData.select($"*").getRows(0, 20) === expectedAnswer)
}
test("getRows: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = Seq(
Seq("_1", "_2"),
Seq("[1, 2, 3]", "[1, 2, 3]"),
Seq("[2, 3, 4]", "[2, 3, 4]"))
assert(df.getRows(10, 20) === expectedAnswer)
}
test("getRows: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = Seq(
Seq("_1", "_2"),
Seq("[31 32]", "[41 42 43 2E]"),
Seq("[33 34]", "[31 32 33 34 36]"))
assert(df.getRows(10, 20) === expectedAnswer)
}
test("showString: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+---------------------+
||value |
|+---------------------+
||1 |
||111111111111111111111|
|+---------------------+
|""".stripMargin
assert(df.showString(10, truncate = 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+--------------------+
|| value|
|+--------------------+
|| 1|
||11111111111111111...|
|+--------------------+
|""".stripMargin
assert(df.showString(10, truncate = 20) === expectedAnswerForTrue)
}
test("showString: truncate = [0, 20], vertical = true") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = "-RECORD 0----------------------\\n" +
" value | 1 \\n" +
"-RECORD 1----------------------\\n" +
" value | 111111111111111111111 \\n"
assert(df.showString(10, truncate = 0, vertical = true) === expectedAnswerForFalse)
val expectedAnswerForTrue = "-RECORD 0---------------------\\n" +
" value | 1 \\n" +
"-RECORD 1---------------------\\n" +
" value | 11111111111111111... \\n"
assert(df.showString(10, truncate = 20, vertical = true) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+-----+
||value|
|+-----+
|| 1|
|| 111|
|+-----+
|""".stripMargin
assert(df.showString(10, truncate = 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+-----------------+
|| value|
|+-----------------+
|| 1|
||11111111111111...|
|+-----------------+
|""".stripMargin
assert(df.showString(10, truncate = 17) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17], vertical = true") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = "-RECORD 0----\\n" +
" value | 1 \\n" +
"-RECORD 1----\\n" +
" value | 111 \\n"
assert(df.showString(10, truncate = 3, vertical = true) === expectedAnswerForFalse)
val expectedAnswerForTrue = "-RECORD 0------------------\\n" +
" value | 1 \\n" +
"-RECORD 1------------------\\n" +
" value | 11111111111111... \\n"
assert(df.showString(10, truncate = 17, vertical = true) === expectedAnswerForTrue)
}
test("showString(negative)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(-1) === expectedAnswer)
}
test("showString(negative), vertical = true") {
val expectedAnswer = "(0 rows)\\n"
assert(testData.select($"*").showString(-1, vertical = true) === expectedAnswer)
}
test("showString(0)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(0) === expectedAnswer)
}
test("showString(Int.MaxValue)") {
val df = Seq((1, 2), (3, 4)).toDF("a", "b")
val expectedAnswer = """+---+---+
|| a| b|
|+---+---+
|| 1| 2|
|| 3| 4|
|+---+---+
|""".stripMargin
assert(df.showString(Int.MaxValue) === expectedAnswer)
}
test("showString(0), vertical = true") {
val expectedAnswer = "(0 rows)\\n"
assert(testData.select($"*").showString(0, vertical = true) === expectedAnswer)
}
test("showString: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = """+---------+---------+
|| _1| _2|
|+---------+---------+
||[1, 2, 3]|[1, 2, 3]|
||[2, 3, 4]|[2, 3, 4]|
|+---------+---------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: array, vertical = true") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = "-RECORD 0--------\\n" +
" _1 | [1, 2, 3] \\n" +
" _2 | [1, 2, 3] \\n" +
"-RECORD 1--------\\n" +
" _1 | [2, 3, 4] \\n" +
" _2 | [2, 3, 4] \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("showString: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = """+-------+----------------+
|| _1| _2|
|+-------+----------------+
||[31 32]| [41 42 43 2E]|
||[33 34]|[31 32 33 34 36]|
|+-------+----------------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: binary, vertical = true") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = "-RECORD 0---------------\\n" +
" _1 | [31 32] \\n" +
" _2 | [41 42 43 2E] \\n" +
"-RECORD 1---------------\\n" +
" _1 | [33 34] \\n" +
" _2 | [31 32 33 34 36] \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("showString: minimum column width") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = """+---+---+
|| _1| _2|
|+---+---+
|| 1| 1|
|| 2| 2|
|+---+---+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: minimum column width, vertical = true") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = "-RECORD 0--\\n" +
" _1 | 1 \\n" +
" _2 | 1 \\n" +
"-RECORD 1--\\n" +
" _1 | 2 \\n" +
" _2 | 2 \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("SPARK-7319 showString") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|""".stripMargin
assert(testData.select($"*").showString(1) === expectedAnswer)
}
test("SPARK-7319 showString, vertical = true") {
val expectedAnswer = "-RECORD 0----\\n" +
" key | 1 \\n" +
" value | 1 \\n" +
"only showing top 1 row\\n"
assert(testData.select($"*").showString(1, vertical = true) === expectedAnswer)
}
test("SPARK-23023 Cast rows to strings in showString") {
val df1 = Seq(Seq(1, 2, 3, 4)).toDF("a")
assert(df1.showString(10) ===
s"""+------------+
|| a|
|+------------+
||[1, 2, 3, 4]|
|+------------+
|""".stripMargin)
val df2 = Seq(Map(1 -> "a", 2 -> "b")).toDF("a")
assert(df2.showString(10) ===
s"""+----------------+
|| a|
|+----------------+
||[1 -> a, 2 -> b]|
|+----------------+
|""".stripMargin)
val df3 = Seq(((1, "a"), 0), ((2, "b"), 0)).toDF("a", "b")
assert(df3.showString(10) ===
s"""+------+---+
|| a| b|
|+------+---+
||[1, a]| 0|
||[2, b]| 0|
|+------+---+
|""".stripMargin)
}
test("SPARK-7327 show with empty dataFrame") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|""".stripMargin
assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer)
}
test("SPARK-7327 show with empty dataFrame, vertical = true") {
assert(testData.select($"*").filter($"key" < 0).showString(1, vertical = true) === "(0 rows)\\n")
}
test("SPARK-18350 show with session local timezone") {
val d = Date.valueOf("2016-12-01")
val ts = Timestamp.valueOf("2016-12-01 00:00:00")
val df = Seq((d, ts)).toDF("d", "ts")
val expectedAnswer = """+----------+-------------------+
||d |ts |
|+----------+-------------------+
||2016-12-01|2016-12-01 00:00:00|
|+----------+-------------------+
|""".stripMargin
assert(df.showString(1, truncate = 0) === expectedAnswer)
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "UTC") {
val expectedAnswer = """+----------+-------------------+
||d |ts |
|+----------+-------------------+
||2016-12-01|2016-12-01 08:00:00|
|+----------+-------------------+
|""".stripMargin
assert(df.showString(1, truncate = 0) === expectedAnswer)
}
}
test("SPARK-18350 show with session local timezone, vertical = true") {
val d = Date.valueOf("2016-12-01")
val ts = Timestamp.valueOf("2016-12-01 00:00:00")
val df = Seq((d, ts)).toDF("d", "ts")
val expectedAnswer = "-RECORD 0------------------\\n" +
" d | 2016-12-01 \\n" +
" ts | 2016-12-01 00:00:00 \\n"
assert(df.showString(1, truncate = 0, vertical = true) === expectedAnswer)
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "UTC") {
val expectedAnswer = "-RECORD 0------------------\\n" +
" d | 2016-12-01 \\n" +
" ts | 2016-12-01 08:00:00 \\n"
assert(df.showString(1, truncate = 0, vertical = true) === expectedAnswer)
}
}
test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") {
val rowRDD = sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0))))
val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false)))
val df = spark.createDataFrame(rowRDD, schema)
df.rdd.collect()
}
test("SPARK-6899: type should match when using codegen") {
checkAnswer(decimalData.agg(avg("a")), Row(new java.math.BigDecimal(2)))
}
test("SPARK-7133: Implement struct, array, and map field accessor") {
assert(complexData.filter(complexData("a")(0) === 2).count() == 1)
assert(complexData.filter(complexData("m")("1") === 1).count() == 1)
assert(complexData.filter(complexData("s")("key") === 1).count() == 1)
assert(complexData.filter(complexData("m")(complexData("s")("value")) === 1).count() == 1)
assert(complexData.filter(complexData("a")(complexData("s")("key")) === 1).count() == 1)
}
test("SPARK-7551: support backticks for DataFrame attribute resolution") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
val df = spark.read.json(Seq("""{"a.b": {"c": {"d..e": {"f": 1}}}}""").toDS())
checkAnswer(
df.select(df("`a.b`.c.`d..e`.`f`")),
Row(1)
)
val df2 = spark.read.json(Seq("""{"a b": {"c": {"d e": {"f": 1}}}}""").toDS())
checkAnswer(
df2.select(df2("`a b`.c.d e.f")),
Row(1)
)
def checkError(testFun: => Unit): Unit = {
val e = intercept[org.apache.spark.sql.AnalysisException] {
testFun
}
assert(e.getMessage.contains("syntax error in attribute name:"))
}
checkError(df("`abc.`c`"))
checkError(df("`abc`..d"))
checkError(df("`a`.b."))
checkError(df("`a.b`.c.`d"))
}
}
test("SPARK-7324 dropDuplicates") {
val testData = sparkContext.parallelize(
(2, 1, 2) :: (1, 1, 1) ::
(1, 2, 1) :: (2, 1, 2) ::
(2, 2, 2) :: (2, 2, 1) ::
(2, 1, 1) :: (1, 1, 2) ::
(1, 2, 2) :: (1, 2, 1) :: Nil).toDF("key", "value1", "value2")
checkAnswer(
testData.dropDuplicates(),
Seq(Row(2, 1, 2), Row(1, 1, 1), Row(1, 2, 1),
Row(2, 2, 2), Row(2, 1, 1), Row(2, 2, 1),
Row(1, 1, 2), Row(1, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key", "value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("value1", "value2")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value2")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates("key", "value1"),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
}
test("SPARK-8621: support empty string column name") {
val df = Seq(Tuple1(1)).toDF("").as("t")
// We should allow empty string as column name
df.col("")
df.col("t.``")
}
test("SPARK-8797: sort by float column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Float.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toFloat))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("SPARK-8797: sort by double column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Double.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toDouble))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("NaN is greater than all other non-NaN numeric values") {
val maxDouble = Seq(Double.NaN, Double.PositiveInfinity, Double.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Double.isNaN(maxDouble.getDouble(0)))
val maxFloat = Seq(Float.NaN, Float.PositiveInfinity, Float.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Float.isNaN(maxFloat.getFloat(0)))
}
test("SPARK-8072: Better Exception for Duplicate Columns") {
// only one duplicate column present
val e = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3), (2, 3, 4), (3, 4, 5)).toDF("column1", "column2", "column1")
.write.format("parquet").save("temp")
}
assert(e.getMessage.contains("Found duplicate column(s) when inserting into"))
assert(e.getMessage.contains("column1"))
assert(!e.getMessage.contains("column2"))
// multiple duplicate columns present
val f = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3, 4, 5), (2, 3, 4, 5, 6), (3, 4, 5, 6, 7))
.toDF("column1", "column2", "column3", "column1", "column3")
.write.format("json").save("temp")
}
assert(f.getMessage.contains("Found duplicate column(s) when inserting into"))
assert(f.getMessage.contains("column1"))
assert(f.getMessage.contains("column3"))
assert(!f.getMessage.contains("column2"))
}
test("SPARK-6941: Better error message for inserting into RDD-based Table") {
withTempDir { dir =>
withTempView("parquet_base", "json_base", "rdd_base", "indirect_ds", "one_row") {
val tempParquetFile = new File(dir, "tmp_parquet")
val tempJsonFile = new File(dir, "tmp_json")
val df = Seq(Tuple1(1)).toDF()
val insertion = Seq(Tuple1(2)).toDF("col")
// pass case: parquet table (HadoopFsRelation)
df.write.mode(SaveMode.Overwrite).parquet(tempParquetFile.getCanonicalPath)
val pdf = spark.read.parquet(tempParquetFile.getCanonicalPath)
pdf.createOrReplaceTempView("parquet_base")
insertion.write.insertInto("parquet_base")
// pass case: json table (InsertableRelation)
df.write.mode(SaveMode.Overwrite).json(tempJsonFile.getCanonicalPath)
val jdf = spark.read.json(tempJsonFile.getCanonicalPath)
jdf.createOrReplaceTempView("json_base")
insertion.write.mode(SaveMode.Overwrite).insertInto("json_base")
// error cases: insert into an RDD
df.createOrReplaceTempView("rdd_base")
val e1 = intercept[AnalysisException] {
insertion.write.insertInto("rdd_base")
}
assert(e1.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into a logical plan that is not a LeafNode
val indirectDS = pdf.select("_1").filter($"_1" > 5)
indirectDS.createOrReplaceTempView("indirect_ds")
val e2 = intercept[AnalysisException] {
insertion.write.insertInto("indirect_ds")
}
assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into an OneRowRelation
Dataset.ofRows(spark, OneRowRelation()).createOrReplaceTempView("one_row")
val e3 = intercept[AnalysisException] {
insertion.write.insertInto("one_row")
}
assert(e3.getMessage.contains("Inserting into an RDD-based table is not allowed."))
}
}
}
test("SPARK-8608: call `show` on local DataFrame with random columns should return same value") {
val df = testData.select(rand(33))
assert(df.showString(5) == df.showString(5))
// We will reuse the same Expression object for LocalRelation.
val df1 = (1 to 10).map(Tuple1.apply).toDF().select(rand(33))
assert(df1.showString(5) == df1.showString(5))
}
test("SPARK-8609: local DataFrame with random columns should return same value after sort") {
checkAnswer(testData.sort(rand(33)), testData.sort(rand(33)))
// We will reuse the same Expression object for LocalRelation.
val df = (1 to 10).map(Tuple1.apply).toDF()
checkAnswer(df.sort(rand(33)), df.sort(rand(33)))
}
test("SPARK-9083: sort with non-deterministic expressions") {
val seed = 33
val df = (1 to 100).map(Tuple1.apply).toDF("i").repartition(1)
val random = new XORShiftRandom(seed)
val expected = (1 to 100).map(_ -> random.nextDouble()).sortBy(_._2).map(_._1)
val actual = df.sort(rand(seed)).collect().map(_.getInt(0))
assert(expected === actual)
}
test("Sorting columns are not in Filter and Project") {
checkAnswer(
upperCaseData.filter($"N" > 1).select("N").filter($"N" < 6).orderBy($"L".asc),
Row(2) :: Row(3) :: Row(4) :: Row(5) :: Nil)
}
test("SPARK-9323: DataFrame.orderBy should support nested column name") {
val df = spark.read.json(Seq("""{"a": {"b": 1}}""").toDS())
checkAnswer(df.orderBy("a.b"), Row(Row(1)))
}
test("SPARK-9950: correctly analyze grouping/aggregating on struct fields") {
val df = Seq(("x", (1, 1)), ("y", (2, 2))).toDF("a", "b")
checkAnswer(df.groupBy("b._1").agg(sum("b._2")), Row(1, 1) :: Row(2, 2) :: Nil)
}
test("SPARK-10093: Avoid transformations on executors") {
val df = Seq((1, 1)).toDF("a", "b")
df.where($"a" === 1)
.select($"a", $"b", struct($"b"))
.orderBy("a")
.select(struct($"b"))
.collect()
}
test("SPARK-10185: Read multiple Hadoop Filesystem paths and paths with a comma in it") {
withTempDir { dir =>
val df1 = Seq((1, 22)).toDF("a", "b")
val dir1 = new File(dir, "dir,1").getCanonicalPath
df1.write.format("json").save(dir1)
val df2 = Seq((2, 23)).toDF("a", "b")
val dir2 = new File(dir, "dir2").getCanonicalPath
df2.write.format("json").save(dir2)
checkAnswer(spark.read.format("json").load(dir1, dir2),
Row(1, 22) :: Row(2, 23) :: Nil)
checkAnswer(spark.read.format("json").load(dir1),
Row(1, 22) :: Nil)
}
}
test("Alias uses internally generated names 'aggOrder' and 'havingCondition'") {
val df = Seq(1 -> 2).toDF("i", "j")
val query1 = df.groupBy("i")
.agg(max("j").as("aggOrder"))
.orderBy(sum("j"))
checkAnswer(query1, Row(1, 2))
// In the plan, there are two attributes having the same name 'havingCondition'
// One is a user-provided alias name; another is an internally generated one.
val query2 = df.groupBy("i")
.agg(max("j").as("havingCondition"))
.where(sum("j") > 0)
.orderBy($"havingCondition".asc)
checkAnswer(query2, Row(1, 2))
}
test("SPARK-10316: respect non-deterministic expressions in PhysicalOperation") {
withTempDir { dir =>
(1 to 10).toDF("id").write.mode(SaveMode.Overwrite).json(dir.getCanonicalPath)
val input = spark.read.json(dir.getCanonicalPath)
val df = input.select($"id", rand(0).as("r"))
df.as("a").join(df.filter($"r" < 0.5).as("b"), $"a.id" === $"b.id").collect().foreach { row =>
assert(row.getDouble(1) - row.getDouble(3) === 0.0 +- 0.001)
}
}
}
test("SPARK-10743: keep the name of expression if possible when do cast") {
val df = (1 to 10).map(Tuple1.apply).toDF("i").as("src")
assert(df.select($"src.i".cast(StringType)).columns.head === "i")
assert(df.select($"src.i".cast(StringType).cast(IntegerType)).columns.head === "i")
}
test("SPARK-11301: fix case sensitivity for filter on partitioned columns") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("year").parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
checkAnswer(df.filter($"yEAr" > 2000).select($"val"), Row("a"))
}
}
}
/**
* Verifies that there is no Exchange between the Aggregations for `df`
*/
private def verifyNonExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
atFirstAgg = !atFirstAgg
case _ =>
if (atFirstAgg) {
fail("Should not have operators between the two aggregations")
}
}
}
/**
* Verifies that there is an Exchange between the Aggregations for `df`
*/
private def verifyExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
if (atFirstAgg) {
fail("Should not have back to back Aggregates")
}
atFirstAgg = true
case e: ShuffleExchangeExec => atFirstAgg = false
case _ =>
}
}
test("distributeBy and localSort") {
val original = testData.repartition(1)
assert(original.rdd.partitions.length == 1)
val df = original.repartition(5, $"key")
assert(df.rdd.partitions.length == 5)
checkAnswer(original.select(), df.select())
val df2 = original.repartition(10, $"key")
assert(df2.rdd.partitions.length == 10)
checkAnswer(original.select(), df2.select())
// Group by the column we are distributed by. This should generate a plan with no exchange
// between the aggregates
val df3 = testData.repartition($"key").groupBy("key").count()
verifyNonExchangingAgg(df3)
verifyNonExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key", "value").count())
// Grouping by just the first distributeBy expr, need to exchange.
verifyExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key").count())
val data = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData2(i % 10, i))).toDF()
// Distribute and order by.
val df4 = data.repartition($"a").sortWithinPartitions($"b".desc)
// Walk each partition and verify that it is sorted descending and does not contain all
// the values.
df4.rdd.foreachPartition { p =>
// Skip empty partition
if (p.hasNext) {
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue < v) throw new SparkException("Partition is not ordered.")
if (v + 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be globally ordered")
}
}
// Distribute and order by with multiple order bys
val df5 = data.repartition(2, $"a").sortWithinPartitions($"b".asc, $"a".asc)
// Walk each partition and verify that it is sorted ascending
df5.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be all sequential")
}
// Distribute into one partition and order by. This partition should contain all the values.
val df6 = data.repartition(1, $"a").sortWithinPartitions("b")
// Walk each partition and verify that it is sorted ascending and not globally sorted.
df6.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (!allSequential) throw new SparkException("Partition should contain all sequential values")
}
}
test("fix case sensitivity of partition by") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val p = path.getAbsolutePath
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("yEAr").parquet(p)
checkAnswer(spark.read.parquet(p).select("YeaR"), Row(2012))
}
}
}
// This test case is to verify a bug when making a new instance of LogicalRDD.
test("SPARK-11633: LogicalRDD throws TreeNode Exception: Failed to Copy Node") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val rdd = sparkContext.makeRDD(Seq(Row(1, 3), Row(2, 1)))
val df = spark.createDataFrame(
rdd,
new StructType().add("f1", IntegerType).add("f2", IntegerType))
.select($"F1", $"f2".as("f2"))
val df1 = df.as("a")
val df2 = df.as("b")
checkAnswer(df1.join(df2, $"a.f2" === $"b.f2"), Row(1, 3, 1, 3) :: Row(2, 1, 2, 1) :: Nil)
}
}
test("SPARK-10656: completely support special chars") {
val df = Seq(1 -> "a").toDF("i_$.a", "d^'a.")
checkAnswer(df.select(df("*")), Row(1, "a"))
checkAnswer(df.withColumnRenamed("d^'a.", "a"), Row(1, "a"))
}
test("SPARK-11725: correctly handle null inputs for ScalaUDF") {
val df = sparkContext.parallelize(Seq(
java.lang.Integer.valueOf(22) -> "John",
null.asInstanceOf[java.lang.Integer] -> "Lucy")).toDF("age", "name")
// passing null into the UDF that could handle it
val boxedUDF = udf[java.lang.Integer, java.lang.Integer] {
(i: java.lang.Integer) => if (i == null) -10 else null
}
checkAnswer(df.select(boxedUDF($"age")), Row(null) :: Row(-10) :: Nil)
spark.udf.register("boxedUDF",
(i: java.lang.Integer) => (if (i == null) -10 else null): java.lang.Integer)
checkAnswer(sql("select boxedUDF(null), boxedUDF(-1)"), Row(-10, null) :: Nil)
val primitiveUDF = udf((i: Int) => i * 2)
checkAnswer(df.select(primitiveUDF($"age")), Row(44) :: Row(null) :: Nil)
}
test("SPARK-12398 truncated toString") {
val df1 = Seq((1L, "row1")).toDF("id", "name")
assert(df1.toString() === "[id: bigint, name: string]")
val df2 = Seq((1L, "c2", false)).toDF("c1", "c2", "c3")
assert(df2.toString === "[c1: bigint, c2: string ... 1 more field]")
val df3 = Seq((1L, "c2", false, 10)).toDF("c1", "c2", "c3", "c4")
assert(df3.toString === "[c1: bigint, c2: string ... 2 more fields]")
val df4 = Seq((1L, Tuple2(1L, "val"))).toDF("c1", "c2")
assert(df4.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string>]")
val df5 = Seq((1L, Tuple2(1L, "val"), 20.0)).toDF("c1", "c2", "c3")
assert(df5.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 1 more field]")
val df6 = Seq((1L, Tuple2(1L, "val"), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(df6.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 2 more fields]")
val df7 = Seq((1L, Tuple3(1L, "val", 2), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df7.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 1 more field> ... 2 more fields]")
val df8 = Seq((1L, Tuple7(1L, "val", 2, 3, 4, 5, 6), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df8.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 5 more fields> ... 2 more fields]")
val df9 =
Seq((1L, Tuple4(1L, Tuple4(1L, 2L, 3L, 4L), 2L, 3L), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df9.toString ===
"[c1: bigint, c2: struct<_1: bigint," +
" _2: struct<_1: bigint," +
" _2: bigint ... 2 more fields> ... 2 more fields> ... 2 more fields]")
}
test("reuse exchange") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2") {
val df = spark.range(100).toDF()
val join = df.join(df, "id")
val plan = join.queryExecution.executedPlan
checkAnswer(join, df)
assert(
collect(join.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => true }.size === 1)
assert(
collect(join.queryExecution.executedPlan) { case e: ReusedExchangeExec => true }.size === 1)
val broadcasted = broadcast(join)
val join2 = join.join(broadcasted, "id").join(broadcasted, "id")
checkAnswer(join2, df)
assert(
collect(join2.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => true }.size == 1)
assert(
collect(join2.queryExecution.executedPlan) {
case e: BroadcastExchangeExec => true }.size === 1)
assert(
collect(join2.queryExecution.executedPlan) { case e: ReusedExchangeExec => true }.size == 4)
}
}
test("sameResult() on aggregate") {
val df = spark.range(100)
val agg1 = df.groupBy().count()
val agg2 = df.groupBy().count()
// two aggregates with different ExprId within them should have same result
assert(agg1.queryExecution.executedPlan.sameResult(agg2.queryExecution.executedPlan))
val agg3 = df.groupBy().sum()
assert(!agg1.queryExecution.executedPlan.sameResult(agg3.queryExecution.executedPlan))
val df2 = spark.range(101)
val agg4 = df2.groupBy().count()
assert(!agg1.queryExecution.executedPlan.sameResult(agg4.queryExecution.executedPlan))
}
test("SPARK-12512: support `.` in column name for withColumn()") {
val df = Seq("a" -> "b").toDF("col.a", "col.b")
checkAnswer(df.select(df("*")), Row("a", "b"))
checkAnswer(df.withColumn("col.a", lit("c")), Row("c", "b"))
checkAnswer(df.withColumn("col.c", lit("c")), Row("a", "b", "c"))
}
test("SPARK-12841: cast in filter") {
checkAnswer(
Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"),
Row(1, "a"))
}
test("SPARK-12982: Add table name validation in temp table registration") {
val df = Seq("foo", "bar").map(Tuple1.apply).toDF("col")
// invalid table names
Seq("11111", "t~", "#$@sum", "table!#").foreach { name =>
withTempView(name) {
val m = intercept[AnalysisException](df.createOrReplaceTempView(name)).getMessage
assert(m.contains(s"Invalid view name: $name"))
}
}
// valid table names
Seq("table1", "`11111`", "`t~`", "`#$@sum`", "`table!#`").foreach { name =>
withTempView(name) {
df.createOrReplaceTempView(name)
}
}
}
test("assertAnalyzed shouldn't replace original stack trace") {
val e = intercept[AnalysisException] {
spark.range(1).select($"id" as "a", $"id" as "b").groupBy("a").agg($"b")
}
assert(e.getStackTrace.head.getClassName != classOf[QueryExecution].getName)
}
test("SPARK-13774: Check error message for non existent path without globbed paths") {
val uuid = UUID.randomUUID().toString
val baseDir = Utils.createTempDir()
try {
val e = intercept[AnalysisException] {
spark.read.format("csv").load(
new File(baseDir, "file").getAbsolutePath,
new File(baseDir, "file2").getAbsolutePath,
new File(uuid, "file3").getAbsolutePath,
uuid).rdd
}
assert(e.getMessage.startsWith("Path does not exist"))
} finally {
}
}
test("SPARK-13774: Check error message for not existent globbed paths") {
// Non-existent initial path component:
val nonExistentBasePath = "/" + UUID.randomUUID().toString
assert(!new File(nonExistentBasePath).exists())
val e = intercept[AnalysisException] {
spark.read.format("text").load(s"$nonExistentBasePath/*")
}
assert(e.getMessage.startsWith("Path does not exist"))
// Existent initial path component, but no matching files:
val baseDir = Utils.createTempDir()
val childDir = Utils.createTempDir(baseDir.getAbsolutePath)
assert(childDir.exists())
try {
val e1 = intercept[AnalysisException] {
spark.read.json(s"${baseDir.getAbsolutePath}/*/*-xyz.json").rdd
}
assert(e1.getMessage.startsWith("Path does not exist"))
} finally {
Utils.deleteRecursively(baseDir)
}
}
test("SPARK-15230: distinct() does not handle column name with dot properly") {
val df = Seq(1, 1, 2).toDF("column.with.dot")
checkAnswer(df.distinct(), Row(1) :: Row(2) :: Nil)
}
test("SPARK-16181: outer join with isNull filter") {
val left = Seq("x").toDF("col")
val right = Seq("y").toDF("col").withColumn("new", lit(true))
val joined = left.join(right, left("col") === right("col"), "left_outer")
checkAnswer(joined, Row("x", null, null))
checkAnswer(joined.filter($"new".isNull), Row("x", null, null))
}
test("SPARK-16664: persist with more than 200 columns") {
val size = 201L
val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(Seq.range(0, size))))
val schemas = List.range(0, size).map(a => StructField("name" + a, LongType, true))
val df = spark.createDataFrame(rdd, StructType(schemas))
assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100)
}
test("SPARK-17409: Do Not Optimize Query in CTAS (Data source tables) More Than Once") {
withTable("bar") {
withTempView("foo") {
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
sql("select 0 as id").createOrReplaceTempView("foo")
val df = sql("select * from foo group by id")
// If we optimize the query in CTAS more than once, the following saveAsTable will fail
// with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])`
df.write.mode("overwrite").saveAsTable("bar")
checkAnswer(spark.table("bar"), Row(0) :: Nil)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar"))
assert(tableMetadata.provider == Some("json"),
"the expected table is a data source table using json")
}
}
}
}
test("copy results for sampling with replacement") {
val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b")
val sampleDf = df.sample(true, 2.00)
val d = sampleDf.withColumn("c", monotonically_increasing_id).select($"c").collect
assert(d.size == d.distinct.size)
}
private def verifyNullabilityInFilterExec(
df: DataFrame,
expr: String,
expectedNonNullableColumns: Seq[String]): Unit = {
val dfWithFilter = df.where(s"isnotnull($expr)").selectExpr(expr)
dfWithFilter.queryExecution.executedPlan.collect {
// When the child expression in isnotnull is null-intolerant (i.e. any null input will
// result in null output), the involved columns are converted to not nullable;
// otherwise, no change should be made.
case e: FilterExec =>
assert(e.output.forall { o =>
if (expectedNonNullableColumns.contains(o.name)) !o.nullable else o.nullable
})
}
}
test("SPARK-17957: no change on nullability in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> java.lang.Integer.valueOf(3),
java.lang.Integer.valueOf(1) -> null.asInstanceOf[java.lang.Integer],
java.lang.Integer.valueOf(2) -> java.lang.Integer.valueOf(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, _2)", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, 0) + Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "cast(coalesce(cast(coalesce(_1, _2) as double), 0.0) as int)",
expectedNonNullableColumns = Seq.empty[String])
}
test("SPARK-17957: set nullability to false in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> java.lang.Integer.valueOf(3),
java.lang.Integer.valueOf(1) -> null.asInstanceOf[java.lang.Integer],
java.lang.Integer.valueOf(2) -> java.lang.Integer.valueOf(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "_1 + _2 * 3", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1 + _2", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1", expectedNonNullableColumns = Seq("_1"))
// `constructIsNotNullConstraints` infers the IsNotNull(_2) from IsNotNull(_2 + Rand())
// Thus, we are able to set nullability of _2 to false.
// If IsNotNull(_2) is not given from `constructIsNotNullConstraints`, the impl of
// isNullIntolerant in `FilterExec` needs an update for more advanced inference.
verifyNullabilityInFilterExec(df,
expr = "_2 + Rand()", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "_2 * 3 + coalesce(_1, 0)", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "cast((_1 + _2) as boolean)", expectedNonNullableColumns = Seq("_1", "_2"))
}
test("SPARK-17897: Fixed IsNotNull Constraint Inference Rule") {
val data = Seq[java.lang.Integer](1, null).toDF("key")
checkAnswer(data.filter(!$"key".isNotNull), Row(null))
checkAnswer(data.filter(!(- $"key").isNotNull), Row(null))
}
test("SPARK-17957: outer join + na.fill") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
val df1 = Seq((1, 2), (2, 3)).toDF("a", "b")
val df2 = Seq((2, 5), (3, 4)).toDF("a", "c")
val joinedDf = df1.join(df2, Seq("a"), "outer").na.fill(0)
val df3 = Seq((3, 1)).toDF("a", "d")
checkAnswer(joinedDf.join(df3, "a"), Row(3, 0, 4, 1))
}
}
test("SPARK-18070 binary operator should not consider nullability when comparing input types") {
val rows = Seq(Row(Seq(1), Seq(1)))
val schema = new StructType()
.add("array1", ArrayType(IntegerType))
.add("array2", ArrayType(IntegerType, containsNull = false))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
assert(df.filter($"array1" === $"array2").count() == 1)
}
test("SPARK-17913: compare long and string type column may return confusing result") {
val df = Seq(123L -> "123", 19157170390056973L -> "19157170390056971").toDF("i", "j")
checkAnswer(df.select($"i" === $"j"), Row(true) :: Row(false) :: Nil)
}
test("SPARK-19691 Calculating percentile of decimal column fails with ClassCastException") {
val df = spark.range(1).selectExpr("CAST(id as DECIMAL) as x").selectExpr("percentile(x, 0.5)")
checkAnswer(df, Row(BigDecimal(0)) :: Nil)
}
test("SPARK-20359: catalyst outer join optimization should not throw npe") {
val df1 = Seq("a", "b", "c").toDF("x")
.withColumn("y", udf{ (x: String) => x.substring(0, 1) + "!" }.apply($"x"))
val df2 = Seq("a", "b").toDF("x1")
df1
.join(df2, df1("x") === df2("x1"), "left_outer")
.filter($"x1".isNotNull || !$"y".isin("a!"))
.count
}
// The fix of SPARK-21720 avoid an exception regarding JVM code size limit
// TODO: When we make a threshold of splitting statements (1024) configurable,
// we will re-enable this with max threshold to cause an exception
// See https://github.com/apache/spark/pull/18972/files#r150223463
ignore("SPARK-19372: Filter can be executed w/o generated code due to JVM code size limit") {
val N = 400
val rows = Seq(Row.fromSeq(Seq.fill(N)("string")))
val schema = StructType(Seq.tabulate(N)(i => StructField(s"_c$i", StringType)))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
val filter = (0 until N)
.foldLeft(lit(false))((e, index) => e.or(df.col(df.columns(index)) =!= "string"))
withSQLConf(SQLConf.CODEGEN_FALLBACK.key -> "true") {
df.filter(filter).count()
}
withSQLConf(SQLConf.CODEGEN_FALLBACK.key -> "false") {
val e = intercept[SparkException] {
df.filter(filter).count()
}.getMessage
assert(e.contains("grows beyond 64 KiB"))
}
}
test("SPARK-20897: cached self-join should not fail") {
// force to plan sort merge join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0") {
val df = Seq(1 -> "a").toDF("i", "j")
val df1 = df.as("t1")
val df2 = df.as("t2")
assert(df1.join(df2, $"t1.i" === $"t2.i").cache().count() == 1)
}
}
test("order-by ordinal.") {
checkAnswer(
testData2.select(lit(7), $"a", $"b").orderBy(lit(1), lit(2), lit(3)),
Seq(Row(7, 1, 1), Row(7, 1, 2), Row(7, 2, 1), Row(7, 2, 2), Row(7, 3, 1), Row(7, 3, 2)))
}
test("SPARK-22271: mean overflows and returns null for some decimal variables") {
val d = 0.034567890
val df = Seq(d, d, d, d, d, d, d, d, d, d).toDF("DecimalCol")
val result = df.select($"DecimalCol" cast DecimalType(38, 33))
.select(col("DecimalCol")).describe()
val mean = result.select("DecimalCol").where($"summary" === "mean")
assert(mean.collect().toSet === Set(Row("0.0345678900000000000000000000000000000")))
}
test("SPARK-22520: support code generation for large CaseWhen") {
val N = 30
var expr1 = when($"id" === lit(0), 0)
var expr2 = when($"id" === lit(0), 10)
(1 to N).foreach { i =>
expr1 = expr1.when($"id" === lit(i), -i)
expr2 = expr2.when($"id" === lit(i + 10), i)
}
val df = spark.range(1).select(expr1, expr2.otherwise(0))
checkAnswer(df, Row(0, 10) :: Nil)
assert(df.queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
}
test("SPARK-24165: CaseWhen/If - nullability of nested types") {
val rows = new java.util.ArrayList[Row]()
rows.add(Row(true, ("x", 1), Seq("x", "y"), Map(0 -> "x")))
rows.add(Row(false, (null, 2), Seq(null, "z"), Map(0 -> null)))
val schema = StructType(Seq(
StructField("cond", BooleanType, true),
StructField("s", StructType(Seq(
StructField("val1", StringType, true),
StructField("val2", IntegerType, false)
)), false),
StructField("a", ArrayType(StringType, true)),
StructField("m", MapType(IntegerType, StringType, true))
))
val sourceDF = spark.createDataFrame(rows, schema)
def structWhenDF: DataFrame = sourceDF
.select(when($"cond",
struct(lit("a").as("val1"), lit(10).as("val2"))).otherwise($"s") as "res")
.select($"res".getField("val1"))
def arrayWhenDF: DataFrame = sourceDF
.select(when($"cond", array(lit("a"), lit("b"))).otherwise($"a") as "res")
.select($"res".getItem(0))
def mapWhenDF: DataFrame = sourceDF
.select(when($"cond", map(lit(0), lit("a"))).otherwise($"m") as "res")
.select($"res".getItem(0))
def structIfDF: DataFrame = sourceDF
.select(expr("if(cond, struct('a' as val1, 10 as val2), s)") as "res")
.select($"res".getField("val1"))
def arrayIfDF: DataFrame = sourceDF
.select(expr("if(cond, array('a', 'b'), a)") as "res")
.select($"res".getItem(0))
def mapIfDF: DataFrame = sourceDF
.select(expr("if(cond, map(0, 'a'), m)") as "res")
.select($"res".getItem(0))
def checkResult(): Unit = {
checkAnswer(structWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(arrayWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(mapWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(structIfDF, Seq(Row("a"), Row(null)))
checkAnswer(arrayIfDF, Seq(Row("a"), Row(null)))
checkAnswer(mapIfDF, Seq(Row("a"), Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
checkResult()
// Test with cached relation, the Project will be evaluated with codegen
sourceDF.cache()
checkResult()
}
test("Uuid expressions should produce same results at retries in the same DataFrame") {
val df = spark.range(1).select($"id", new Column(Uuid()))
checkAnswer(df, df.collect())
}
test("SPARK-24313: access map with binary keys") {
val mapWithBinaryKey = map(lit(Array[Byte](1.toByte)), lit(1))
checkAnswer(spark.range(1).select(mapWithBinaryKey.getItem(Array[Byte](1.toByte))), Row(1))
}
test("SPARK-24781: Using a reference from Dataset in Filter/Sort") {
val df = Seq(("test1", 0), ("test2", 1)).toDF("name", "id")
val filter1 = df.select(df("name")).filter(df("id") === 0)
val filter2 = df.select(col("name")).filter(col("id") === 0)
checkAnswer(filter1, filter2.collect())
val sort1 = df.select(df("name")).orderBy(df("id"))
val sort2 = df.select(col("name")).orderBy(col("id"))
checkAnswer(sort1, sort2.collect())
}
test("SPARK-24781: Using a reference not in aggregation in Filter/Sort") {
withSQLConf(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key -> "false") {
val df = Seq(("test1", 0), ("test2", 1)).toDF("name", "id")
val aggPlusSort1 = df.groupBy(df("name")).agg(count(df("name"))).orderBy(df("name"))
val aggPlusSort2 = df.groupBy(col("name")).agg(count(col("name"))).orderBy(col("name"))
checkAnswer(aggPlusSort1, aggPlusSort2.collect())
val aggPlusFilter1 = df.groupBy(df("name")).agg(count(df("name"))).filter(df("name") === 0)
val aggPlusFilter2 = df.groupBy(col("name")).agg(count(col("name"))).filter(col("name") === 0)
checkAnswer(aggPlusFilter1, aggPlusFilter2.collect())
}
}
test("SPARK-25159: json schema inference should only trigger one job") {
withTempPath { path =>
// This test is to prove that the `JsonInferSchema` does not use `RDD#toLocalIterator` which
// triggers one Spark job per RDD partition.
Seq(1 -> "a", 2 -> "b").toDF("i", "p")
// The data set has 2 partitions, so Spark will write at least 2 json files.
// Use a non-splittable compression (gzip), to make sure the json scan RDD has at least 2
// partitions.
.write.partitionBy("p").option("compression", "gzip").json(path.getCanonicalPath)
val numJobs = new AtomicLong(0)
sparkContext.addSparkListener(new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
numJobs.incrementAndGet()
}
})
val df = spark.read.json(path.getCanonicalPath)
assert(df.columns === Array("i", "p"))
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(numJobs.get() == 1L)
}
}
test("SPARK-25402 Null handling in BooleanSimplification") {
val schema = StructType.fromDDL("a boolean, b int")
val rows = Seq(Row(null, 1))
val rdd = sparkContext.parallelize(rows)
val df = spark.createDataFrame(rdd, schema)
checkAnswer(df.where("(NOT a) OR a"), Seq.empty)
}
test("SPARK-25714 Null handling in BooleanSimplification") {
withSQLConf(SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> ConvertToLocalRelation.ruleName) {
val df = Seq(("abc", 1), (null, 3)).toDF("col1", "col2")
checkAnswer(
df.filter("col1 = 'abc' OR (col1 != 'abc' AND col2 == 3)"),
Row ("abc", 1))
}
}
test("SPARK-25816 ResolveReferences works with nested extractors") {
val df = Seq((1, Map(1 -> "a")), (2, Map(2 -> "b"))).toDF("key", "map")
val swappedDf = df.select($"key".as("map"), $"map".as("key"))
checkAnswer(swappedDf.filter($"key"($"map") > "a"), Row(2, Map(2 -> "b")))
}
test("SPARK-26057: attribute deduplication on already analyzed plans") {
withTempView("a", "b", "v") {
val df1 = Seq(("1-1", 6)).toDF("id", "n")
df1.createOrReplaceTempView("a")
val df3 = Seq("1-1").toDF("id")
df3.createOrReplaceTempView("b")
spark.sql(
"""
|SELECT a.id, n as m
|FROM a
|WHERE EXISTS(
| SELECT 1
| FROM b
| WHERE b.id = a.id)
""".stripMargin).createOrReplaceTempView("v")
val res = spark.sql(
"""
|SELECT a.id, n, m
| FROM a
| LEFT OUTER JOIN v ON v.id = a.id
""".stripMargin)
checkAnswer(res, Row("1-1", 6, 6))
}
}
test("SPARK-27671: Fix analysis exception when casting null in nested field in struct") {
val df = sql("SELECT * FROM VALUES (('a', (10, null))), (('b', (10, 50))), " +
"(('c', null)) AS tab(x, y)")
checkAnswer(df, Row("a", Row(10, null)) :: Row("b", Row(10, 50)) :: Row("c", null) :: Nil)
val cast = sql("SELECT cast(struct(1, null) AS struct<a:int,b:int>)")
checkAnswer(cast, Row(Row(1, null)) :: Nil)
}
test("SPARK-27439: Explain result should match collected result after view change") {
withTempView("test", "test2", "tmp") {
spark.range(10).createOrReplaceTempView("test")
spark.range(5).createOrReplaceTempView("test2")
spark.sql("select * from test").createOrReplaceTempView("tmp")
val df = spark.sql("select * from tmp")
spark.sql("select * from test2").createOrReplaceTempView("tmp")
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
df.explain(extended = true)
}
checkAnswer(df, spark.range(10).toDF)
val output = captured.toString
assert(output.contains(
"""== Parsed Logical Plan ==
|'Project [*]
|+- 'UnresolvedRelation [tmp]""".stripMargin))
assert(output.contains(
"""== Physical Plan ==
|*(1) Range (0, 10, step=1, splits=2)""".stripMargin))
}
}
test("SPARK-29442 Set `default` mode should override the existing mode") {
val df = Seq(Tuple1(1)).toDF()
val writer = df.write.mode("overwrite").mode("default")
val modeField = classOf[DataFrameWriter[Tuple1[Int]]].getDeclaredField("mode")
modeField.setAccessible(true)
assert(SaveMode.ErrorIfExists === modeField.get(writer).asInstanceOf[SaveMode])
}
test("sample should not duplicated the input data") {
val df1 = spark.range(10).select($"id" as "id1", $"id" % 5 as "key1")
val df2 = spark.range(10).select($"id" as "id2", $"id" % 5 as "key2")
val sampled = df1.join(df2, $"key1" === $"key2")
.sample(0.5, 42)
.select("id1", "id2")
val idTuples = sampled.collect().map(row => row.getLong(0) -> row.getLong(1))
assert(idTuples.length == idTuples.toSet.size)
}
test("groupBy.as") {
val df1 = Seq((1, 2, 3), (2, 3, 4)).toDF("a", "b", "c")
.repartition($"a", $"b").sortWithinPartitions("a", "b")
val df2 = Seq((1, 2, 4), (2, 3, 5)).toDF("a", "b", "c")
.repartition($"a", $"b").sortWithinPartitions("a", "b")
implicit val valueEncoder = RowEncoder(df1.schema)
val df3 = df1.groupBy("a", "b").as[GroupByKey, Row]
.cogroup(df2.groupBy("a", "b").as[GroupByKey, Row]) { case (_, data1, data2) =>
data1.zip(data2).map { p =>
p._1.getInt(2) + p._2.getInt(2)
}
}.toDF
checkAnswer(df3.sort("value"), Row(7) :: Row(9) :: Nil)
// Assert that no extra shuffle introduced by cogroup.
val exchanges = collect(df3.queryExecution.executedPlan) {
case h: ShuffleExchangeExec => h
}
assert(exchanges.size == 2)
}
test("groupBy.as: custom grouping expressions") {
val df1 = Seq((1, 2, 3), (2, 3, 4)).toDF("a1", "b", "c")
.repartition($"a1", $"b").sortWithinPartitions("a1", "b")
val df2 = Seq((1, 2, 4), (2, 3, 5)).toDF("a1", "b", "c")
.repartition($"a1", $"b").sortWithinPartitions("a1", "b")
implicit val valueEncoder = RowEncoder(df1.schema)
val groupedDataset1 = df1.groupBy(($"a1" + 1).as("a"), $"b").as[GroupByKey, Row]
val groupedDataset2 = df2.groupBy(($"a1" + 1).as("a"), $"b").as[GroupByKey, Row]
val df3 = groupedDataset1
.cogroup(groupedDataset2) { case (_, data1, data2) =>
data1.zip(data2).map { p =>
p._1.getInt(2) + p._2.getInt(2)
}
}.toDF
checkAnswer(df3.sort("value"), Row(7) :: Row(9) :: Nil)
}
test("groupBy.as: throw AnalysisException for unresolved grouping expr") {
val df = Seq((1, 2, 3), (2, 3, 4)).toDF("a", "b", "c")
implicit val valueEncoder = RowEncoder(df.schema)
val err = intercept[AnalysisException] {
df.groupBy($"d", $"b").as[GroupByKey, Row]
}
assert(err.getMessage.contains("cannot resolve '`d`'"))
}
test("emptyDataFrame should be foldable") {
val emptyDf = spark.emptyDataFrame.withColumn("id", lit(1L))
val joined = spark.range(10).join(emptyDf, "id")
joined.queryExecution.optimizedPlan match {
case LocalRelation(Seq(id), Nil, _) =>
assert(id.name == "id")
case _ =>
fail("emptyDataFrame should be foldable")
}
}
test("SPARK-30811: CTE should not cause stack overflow when " +
"it refers to non-existent table with same name") {
val e = intercept[AnalysisException] {
sql("WITH t AS (SELECT 1 FROM nonexist.t) SELECT * FROM t")
}
assert(e.getMessage.contains("Table or view not found:"))
}
test("CalendarInterval reflection support") {
val df = Seq((1, new CalendarInterval(1, 2, 3))).toDF("a", "b")
checkAnswer(df.selectExpr("b"), Row(new CalendarInterval(1, 2, 3)))
}
test("SPARK-31552: array encoder with different types") {
// primitives
val booleans = Array(true, false)
checkAnswer(Seq(booleans).toDF(), Row(booleans))
val bytes = Array(1.toByte, 2.toByte)
checkAnswer(Seq(bytes).toDF(), Row(bytes))
val shorts = Array(1.toShort, 2.toShort)
checkAnswer(Seq(shorts).toDF(), Row(shorts))
val ints = Array(1, 2)
checkAnswer(Seq(ints).toDF(), Row(ints))
val longs = Array(1L, 2L)
checkAnswer(Seq(longs).toDF(), Row(longs))
val floats = Array(1.0F, 2.0F)
checkAnswer(Seq(floats).toDF(), Row(floats))
val doubles = Array(1.0D, 2.0D)
checkAnswer(Seq(doubles).toDF(), Row(doubles))
val strings = Array("2020-04-24", "2020-04-25")
checkAnswer(Seq(strings).toDF(), Row(strings))
// tuples
val decOne = Decimal(1, 38, 18)
val decTwo = Decimal(2, 38, 18)
val tuple1 = (1, 2.2, "3.33", decOne, Date.valueOf("2012-11-22"))
val tuple2 = (2, 3.3, "4.44", decTwo, Date.valueOf("2022-11-22"))
checkAnswer(Seq(Array(tuple1, tuple2)).toDF(), Seq(Seq(tuple1, tuple2)).toDF())
// case classes
val gbks = Array(GroupByKey(1, 2), GroupByKey(4, 5))
checkAnswer(Seq(gbks).toDF(), Row(Array(Row(1, 2), Row(4, 5))))
// We can move this implicit def to [[SQLImplicits]] when we eventually make fully
// support for array encoder like Seq and Set
// For now cases below, decimal/datetime/interval/binary/nested types, etc,
// are not supported by array
implicit def newArrayEncoder[T <: Array[_] : TypeTag]: Encoder[T] = ExpressionEncoder()
// decimals
val decSpark = Array(decOne, decTwo)
val decScala = decSpark.map(_.toBigDecimal)
val decJava = decSpark.map(_.toJavaBigDecimal)
checkAnswer(Seq(decSpark).toDF(), Row(decJava))
checkAnswer(Seq(decScala).toDF(), Row(decJava))
checkAnswer(Seq(decJava).toDF(), Row(decJava))
// datetimes and intervals
val dates = strings.map(Date.valueOf)
checkAnswer(Seq(dates).toDF(), Row(dates))
val localDates = dates.map(d => DateTimeUtils.daysToLocalDate(DateTimeUtils.fromJavaDate(d)))
checkAnswer(Seq(localDates).toDF(), Row(dates))
val timestamps =
Array(Timestamp.valueOf("2020-04-24 12:34:56"), Timestamp.valueOf("2020-04-24 11:22:33"))
checkAnswer(Seq(timestamps).toDF(), Row(timestamps))
val instants =
timestamps.map(t => DateTimeUtils.microsToInstant(DateTimeUtils.fromJavaTimestamp(t)))
checkAnswer(Seq(instants).toDF(), Row(timestamps))
val intervals = Array(new CalendarInterval(1, 2, 3), new CalendarInterval(4, 5, 6))
checkAnswer(Seq(intervals).toDF(), Row(intervals))
// binary
val bins = Array(Array(1.toByte), Array(2.toByte), Array(3.toByte), Array(4.toByte))
checkAnswer(Seq(bins).toDF(), Row(bins))
// nested
val nestedIntArray = Array(Array(1), Array(2))
checkAnswer(Seq(nestedIntArray).toDF(), Row(nestedIntArray.map(wrapIntArray)))
val nestedDecArray = Array(decSpark)
checkAnswer(Seq(nestedDecArray).toDF(), Row(Array(wrapRefArray(decJava))))
}
test("SPARK-31750: eliminate UpCast if child's dataType is DecimalType") {
withTempPath { f =>
sql("select cast(1 as decimal(38, 0)) as d")
.write.mode("overwrite")
.parquet(f.getAbsolutePath)
val df = spark.read.parquet(f.getAbsolutePath).as[BigDecimal]
assert(df.schema === new StructType().add(StructField("d", DecimalType(38, 0))))
}
}
}
case class GroupByKey(a: Int, b: Int)
|
dbtsai/spark
|
sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
|
Scala
|
apache-2.0
| 98,005
|
package io.peregrine
object ErrorHandler {
def apply(request: Request, e: Throwable, controllers: ControllerCollection) = {
request.error = Some(e)
ResponseAdapter(request, controllers.errorHandler(request))
}
}
|
dvarelap/stilt
|
src/main/scala/io/peregrine/ErrorHandler.scala
|
Scala
|
apache-2.0
| 225
|
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.vfs
import java.io.{File, ByteArrayInputStream}
import org.eknet.publet.vfs.util.{ClasspathResource, UrlResource}
import org.eknet.publet.vfs.fs.FileResource
import java.net.URL
/**
* A resource is an abstract named content, like a file on a
* local or remote file system. It may also be a directory or
* any other container like resource.
*
* This resource abstraction may point to an non-existing
* resource.
*
* @author <a href="mailto:eike.kettner@gmail.com">Eike Kettner</a>
* @since 01.04.12 13:59
*/
trait Resource {
/**
* If available, returns the alst modification timestamp
* of this resource.
*
* @return
*/
def lastModification: Option[Long]
/**
* The name of this resource.
*
* @return
*/
def name: ResourceName
/**
* Tells, whether this resource exists.
*
* @return
*/
def exists: Boolean
/**
* Applies the specified function to this resource, if `exists`
* returns `true`. Otherwise returns `None`
*
* @param f
* @tparam A
* @return
*/
def map[A](f:Resource.this.type=>Option[A]):Option[A] = {
if (exists) f(this)
else None
}
}
trait ContentResource extends Resource with Content
trait ContainerResource extends Resource with Container
object Resource {
val resourceComparator = (r1: Resource, r2: Resource) => {
if (isContainer(r1) && !isContainer(r2)) true
else if (isContainer(r2) && !isContainer(r1)) false
else r1.name.compareTo(r2.name) < 0
}
def classpath(uri: String, loader: Option[ClassLoader] = None, name: Option[ResourceName] = None): ContentResource = {
new ClasspathResource(uri, loader, name)
}
def file(path: String): ContentResource = new UrlResource(new File(path).toURI.toURL)
def isContainer(r:Resource):Boolean = r match {
case r:Container => true
case _ => false
}
def isContent(r:Resource): Boolean = r match {
case r:Content => true
case _ => false
}
def toModifyable(r: Resource): Option[Modifyable] = {
r match {
case m:Modifyable=> Some(m)
case _ => None
}
}
def emptyContainer(name: ResourceName):ContainerResource = new EmptyContainer(name)
def emptyContent(name: ResourceName, ct: ContentType = ContentType.unknown): ContentResource = new EmptyContent(name, ct)
private class EmptyContainer(val name: ResourceName) extends ContainerResource {
import ResourceName._
def exists = false
def children = List()
def content(name: String) = emptyContent(name.rn)
def container(name: String) = emptyContainer(name.rn)
def child(name: String) = None
def lastModification = None
lazy val isWriteable = false
}
private class EmptyContent(val name: ResourceName, val contentType: ContentType) extends ContentResource {
def exists = false
def inputStream = new ByteArrayInputStream(Array[Byte]())
}
}
|
eikek/publet
|
publet/src/main/scala/org/eknet/publet/vfs/Resource.scala
|
Scala
|
apache-2.0
| 3,499
|
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package stream
package ichan
/**
* IChan that contains some state S
*
*/
trait StatefulIChan[+S, +B] extends IChan[B] {
def state: S
}
|
molecule-labs/molecule
|
molecule-core/src/main/scala/molecule/stream/ichan/StatefulIChan.scala
|
Scala
|
apache-2.0
| 881
|
package scorex.network.message
import java.net.{InetAddress, InetSocketAddress}
import java.util
import com.google.common.primitives.{Bytes, Ints}
case class PeersMessage(peers: Seq[InetSocketAddress]) extends Message {
import scorex.network.message.PeersMessage._
override val messageType = Message.PeersType
override lazy val dataBytes = {
val length = peers.size
val lengthBytes = Bytes.ensureCapacity(Ints.toByteArray(length), DataLength, 0)
peers.foldLeft(lengthBytes) { case (bs, peer) =>
Bytes.concat(bs,
peer.getAddress.getAddress, Bytes.ensureCapacity(Ints.toByteArray(peer.getPort), 4, 0))
}
}
}
object PeersMessage {
private val AddressLength = 4
private val PortLength = 4
private val DataLength = 4
def apply(data: Array[Byte]): PeersMessage = {
//READ LENGTH
val lengthBytes = util.Arrays.copyOfRange(data, 0, DataLength)
val length = Ints.fromByteArray(lengthBytes)
//CHECK IF DATA MATCHES LENGTH
if (data.length != DataLength + (length * (AddressLength + PortLength)))
throw new Exception("Data does not match length")
val peers = (0 to length - 1).map { i =>
val position = lengthBytes.length + (i * (AddressLength + PortLength))
val addressBytes = util.Arrays.copyOfRange(data, position, position + AddressLength)
val address = InetAddress.getByAddress(addressBytes)
val portBytes = util.Arrays.copyOfRange(data, position + AddressLength, position + AddressLength + PortLength)
new InetSocketAddress(address, Ints.fromByteArray(portBytes))
}
new PeersMessage(peers)
}
}
|
beni55/Scorex-Lagonaki
|
src/main/scala/scorex/network/message/PeersMessage.scala
|
Scala
|
cc0-1.0
| 1,617
|
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.utils
import java.io.File
import com.typesafe.config._
import play.api.{ Environment, Configuration }
import scala.collection.immutable.TreeMap
import scala.util.matching.Regex
/** This object provides a set of operations to create `Configuration` values.
*
* For example, to load a `Configuration` in a running application:
* {{{
* val config = Configuration.load()
* val foo = config.getString("foo").getOrElse("boo")
* }}}
*
* The underlying implementation is provided by https://github.com/typesafehub/config.
*/
object ConfigHelpers extends ScrupalComponent {
// The configuration key that says where to get the database configuration data.
val scrupal_storage_config_file_key = "scrupal.storage.config.file"
/** Pimp The Play Configuration class
* @param under The underlying Configuration implementation
*/
implicit class ConfigurationPimps(under : Configuration) extends ScrupalComponent {
type FlatConfig = TreeMap[String, ConfigValue]
def flatConfig : FlatConfig = { TreeMap[String, ConfigValue](under.entrySet.toSeq : _*) }
def interestingFlatConfig : FlatConfig = {
val elide : Regex = "^(akka|java|sun|user|awt|os|path|line).*".r
val entries = under.entrySet.toSeq.filter { case (x, y) ⇒ elide.findPrefixOf(x).isEmpty }
TreeMap[String, ConfigValue](entries.toSeq : _*)
}
}
def from(env : Environment) = {
Configuration.load(env)
}
def from(fileName : String) : Option[Configuration] = {
val env = Environment.simple()
env.getExistingFile(fileName) map { file : File ⇒
Configuration(ConfigFactory.parseFileAnySyntax(file))
}
}
def from(underlying : Config) = {
new Configuration(underlying)
}
def default() = {
Configuration.load(Environment.simple())
}
}
|
scrupal/scrupal
|
scrupal-utils/src/main/scala/scrupal/utils/ConfigHelpers.scala
|
Scala
|
apache-2.0
| 3,500
|
package org.apache.predictionio.examples.experimental.cleanupapp
import org.apache.predictionio.controller.IEngineFactory
import org.apache.predictionio.controller.Engine
import org.apache.predictionio.controller._
case class Query(q: String) extends Serializable
case class PredictedResult(p: String) extends Serializable
object VanillaEngine extends IEngineFactory {
def apply() = {
new Engine(
classOf[DataSource],
PIdentityPreparator(classOf[DataSource]),
Map("" -> classOf[Algorithm]),
classOf[Serving])
}
}
|
alex9311/PredictionIO
|
examples/experimental/scala-cleanup-app/src/main/scala/Engine.scala
|
Scala
|
apache-2.0
| 549
|
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.instruments.options
/**
* Swaption (also known as a Swap Option)
*
* The option to enter into an interest rate swap. In exchange for an option premium,
* the buyer gains the right but not the obligation to enter into a specified swap
* agreemenet with the issuer on a specified futur date
* - source Investopedia
*
* @author Paul Bernard
*/
class Swaption extends Option {
}
|
quantintel/spectrum
|
financial/src/main/scala/org/quantintel/ql/instruments/options/Swaption.scala
|
Scala
|
apache-2.0
| 1,099
|
package org.elasticmq.rest.sqs.directives
import akka.http.scaladsl.server.{Directives, Route}
import org.elasticmq.rest.sqs.{ActorSystemModule, QueueManagerActorModule}
import org.elasticmq.util.Logging
trait ElasticMQDirectives
extends Directives
with RespondDirectives
with FutureDirectives
with ExceptionDirectives
with QueueDirectives
with QueueManagerActorModule
with ActorSystemModule
with AnyParamDirectives
with RejectionDirectives
with Logging {
/** A valid FIFO parameter value is at most 128 characters and can contain
* - alphanumeric characters (a-z , A-Z , 0-9 ) and
* - punctuation (!"#$%'()*+,-./:;=?@[\\]^_`{|}~ ).
*/
private val validFifoParameterValueCharsRe = """^[a-zA-Z0-9!"#\\$%&'\\(\\)\\*\\+,-\\./:;<=>?@\\[\\\\\\]\\^_`\\{|\\}~]{1,128}$""".r
def rootPath(body: Route): Route = {
path("") {
body
}
}
/** Valid values are alphanumeric characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~). The maximum length is
* 128 characters
*
* @param propValue
* The string to validate
* @return
* `true` if the string is valid, false otherwise
*/
protected def isValidFifoPropertyValue(propValue: String): Boolean =
validFifoParameterValueCharsRe.findFirstIn(propValue).isDefined
}
|
adamw/elasticmq
|
rest/rest-sqs/src/main/scala/org/elasticmq/rest/sqs/directives/ElasticMQDirectives.scala
|
Scala
|
apache-2.0
| 1,312
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp
import org.apache.spark.ml.param.{BooleanParam, Param}
import org.apache.spark.ml.util.{DefaultParamsReadable, Identifiable}
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{ArrayType, MetadataBuilder, StringType, StructType}
import org.slf4j.LoggerFactory
/**
* Converts `DOCUMENT` type annotations into `CHUNK` type with the contents of a `chunkCol`.
* Chunk text must be contained within input `DOCUMENT`. May be either `StringType` or `ArrayType[StringType]`
* (using [[setIsArray]]). Useful for annotators that require a CHUNK type input.
*
* For more extended examples on document pre-processing see the
* [[https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb Spark NLP Workshop]].
*
* ==Example==
* {{{
* import spark.implicits._
* import com.johnsnowlabs.nlp.{Doc2Chunk, DocumentAssembler}
* import org.apache.spark.ml.Pipeline
*
* val documentAssembler = new DocumentAssembler().setInputCol("text").setOutputCol("document")
* val chunkAssembler = new Doc2Chunk()
* .setInputCols("document")
* .setChunkCol("target")
* .setOutputCol("chunk")
* .setIsArray(true)
*
* val data = Seq(
* ("Spark NLP is an open-source text processing library for advanced natural language processing.",
* Seq("Spark NLP", "text processing library", "natural language processing"))
* ).toDF("text", "target")
*
* val pipeline = new Pipeline().setStages(Array(documentAssembler, chunkAssembler)).fit(data)
* val result = pipeline.transform(data)
*
* result.selectExpr("chunk.result", "chunk.annotatorType").show(false)
* +-----------------------------------------------------------------+---------------------+
* |result |annotatorType |
* +-----------------------------------------------------------------+---------------------+
* |[Spark NLP, text processing library, natural language processing]|[chunk, chunk, chunk]|
* +-----------------------------------------------------------------+---------------------+
* }}}
*
* @see [[Chunk2Doc]] for converting `CHUNK` annotations to `DOCUMENT`
* @param uid required uid for storing annotator to disk
* @groupname anno Annotator types
* @groupdesc anno Required input and expected output annotator types
* @groupname Ungrouped Members
* @groupname param Parameters
* @groupname setParam Parameter setters
* @groupname getParam Parameter getters
* @groupname Ungrouped Members
* @groupprio param 1
* @groupprio anno 2
* @groupprio Ungrouped 3
* @groupprio setParam 4
* @groupprio getParam 5
* @groupdesc param A list of (hyper-)parameter keys this annotator can take. Users can set and get the parameter values through setters and getters, respectively.
*/
class Doc2Chunk(override val uid: String) extends RawAnnotator[Doc2Chunk] {
import com.johnsnowlabs.nlp.AnnotatorType._
/**
* Output annotator types: CHUNK
*
* @group anno
*/
override val outputAnnotatorType: AnnotatorType = CHUNK
/**
* Input annotator types: DOCUMENT
*
* @group anno
*/
override val inputAnnotatorTypes: Array[String] = Array(DOCUMENT)
private val logger = LoggerFactory.getLogger("ChunkAssembler")
/**
* Column that contains string. Must be part of DOCUMENT
*
* @group param
*/
val chunkCol = new Param[String](this, "chunkCol", "Column that contains string. Must be part of DOCUMENT")
/**
* Column that has a reference of where the chunk begins
*
* @group param
*/
val startCol = new Param[String](this, "startCol", "Column that has a reference of where the chunk begins")
/**
* Whether start col is by whitespace tokens (Default: `false`)
*
* @group param
*/
val startColByTokenIndex = new BooleanParam(this, "startColByTokenIndex", "Whether start col is by whitespace tokens (Default: `false`)")
/**
* Whether the chunkCol is an array of strings (Default: `false`)
*
* @group param
*/
val isArray = new BooleanParam(this, "isArray", "Whether the chunkCol is an array of strings (Default: `false")
/**
* Whether to fail the job if a chunk is not found within document, return empty otherwise (Default: `false`)
*
* @group param
*/
val failOnMissing = new BooleanParam(this, "failOnMissing", "Whether to fail the job if a chunk is not found within document, return empty otherwise (Default: `false`)")
/**
* Whether to lower case for matching case (Default: `true`)
*
* @group param
*/
val lowerCase = new BooleanParam(this, "lowerCase", "Whether to lower case for matching case (Default: `true")
setDefault(
startColByTokenIndex -> false,
isArray -> false,
failOnMissing -> false,
lowerCase -> true
)
/**
* Column that contains string. Must be part of DOCUMENT
*
* @group setParam
*/
def setChunkCol(value: String): this.type = set(chunkCol, value)
/**
* Column that contains string. Must be part of DOCUMENT
*
* @group getParam
*/
def getChunkCol: String = $(chunkCol)
/**
* Column that has a reference of where the chunk begins
*
* @group setParam
*/
def setStartCol(value: String): this.type = set(startCol, value)
/**
* Column that has a reference of where the chunk begins
*
* @group getParam
*/
def getStartCol: String = $(startCol)
/**
* Whether start col is by whitespace tokens (Default: `false`)
*
* @group setParam
*/
def setStartColByTokenIndex(value: Boolean): this.type = set(startColByTokenIndex, value)
/**
* Whether start col is by whitespace tokens (Default: `false`)
*
* @group getParam
*/
def getStartColByTokenIndex: Boolean = $(startColByTokenIndex)
/**
* Whether the chunkCol is an array of strings (Default: `false`)
*
* @group setParam
*/
def setIsArray(value: Boolean): this.type = set(isArray, value)
/**
* Whether the chunkCol is an array of strings (Default: `false`)
*
* @group getParam
*/
def getIsArray: Boolean = $(isArray)
/**
* Whether to fail the job if a chunk is not found within document, return empty otherwise (Default: `false`)
*
* @group setParam
*/
def setFailOnMissing(value: Boolean): this.type = set(failOnMissing, value)
/**
* Whether to fail the job if a chunk is not found within document, return empty otherwise (Default: `false`)
*
* @group getParam
*/
def getFailOnMissing: Boolean = $(failOnMissing)
/**
* Whether to lower case for matching case (Default: `true`)
*
* @group setParam
*/
def setLowerCase(value: Boolean): this.type = set(lowerCase, value)
/**
* Whether to lower case for matching case (Default: `true`)
*
* @group getParam
*/
def getLowerCase: Boolean = $(lowerCase)
def this() = this(Identifiable.randomUID("DOC2CHUNK"))
override protected def extraValidate(structType: StructType): Boolean = {
if (get(chunkCol).isEmpty)
true
else if ($(isArray))
structType.fields.find(_.name == $(chunkCol)).exists(_.dataType == ArrayType(StringType, containsNull = true))
else
structType.fields.find(_.name == $(chunkCol)).exists(_.dataType == StringType)
}
override protected def extraValidateMsg: AnnotatorType =
if ($(isArray)) s"${$(chunkCol)} must be ArrayType(StringType)"
else s"${$(chunkCol)} must be StringType"
private def buildFromChunk(annotation: Annotation, chunk: String, startIndex: Int, chunkIdx: Int) = {
/** This will break if there are two identical chunks */
val beginning = get(lowerCase) match {
case Some(true) => annotation.result.toLowerCase.indexOf(chunk, startIndex)
case _ => annotation.result.indexOf(chunk, startIndex)
}
val ending = beginning + chunk.length - 1
if (chunk.trim.isEmpty || beginning == -1) {
val message = s"Cannot proceed to assemble CHUNK, because could not find: `$chunk` within: `${annotation.result}`"
if ($(failOnMissing))
throw new Exception(message)
else
logger.warn(message)
None
} else {
Some(Annotation(
outputAnnotatorType,
beginning,
ending,
chunk,
annotation.metadata ++ Map("chunk" -> chunkIdx.toString)
))
}
}
def tokenIndexToCharIndex(text: String, tokenIndex: Int): Int = {
var i = 0
text.split(" ").map(token => {
val o = (token, i)
i += token.length + 1
o
}).apply(tokenIndex)._2
}
private def convertDocumentToChunk = udf {
document: Seq[Row] =>
val annotations = document.map(Annotation(_))
annotations.map { annotation =>
Annotation(
AnnotatorType.CHUNK,
annotation.begin,
annotation.end,
annotation.result,
annotation.metadata ++ Map("chunk" -> "0")
)
}
}
private def assembleChunks = udf {
(annotationProperties: Seq[Row], chunks: Seq[String]) =>
val annotations = annotationProperties.map(Annotation(_))
annotations.flatMap(annotation => {
chunks.zipWithIndex.flatMap { case (chunk, idx) => buildFromChunk(annotation, chunk, 0, idx) }
})
}
private def assembleChunk = udf {
(annotationProperties: Seq[Row], chunk: String) =>
val annotations = annotationProperties.map(Annotation(_))
annotations.flatMap(annotation => {
buildFromChunk(annotation, chunk, 0, 0)
})
}
private def assembleChunkWithStart = udf {
(annotationProperties: Seq[Row], chunk: String, start: Int) =>
val annotations = annotationProperties.map(Annotation(_))
annotations.flatMap(annotation => {
if ($(startColByTokenIndex))
buildFromChunk(annotation, chunk, tokenIndexToCharIndex(annotation.result, start), 0)
else
buildFromChunk(annotation, chunk, start, 0)
})
}
override def transform(dataset: Dataset[_]): DataFrame = {
if (get(chunkCol).isEmpty)
dataset.withColumn($(outputCol), wrapColumnMetadata(convertDocumentToChunk(col(getInputCols.head))))
else if ($(isArray))
dataset.withColumn($(outputCol), wrapColumnMetadata(assembleChunks(col(getInputCols.head), col($(chunkCol)))))
else if (get(startCol).isDefined)
dataset.withColumn($(outputCol), wrapColumnMetadata(assembleChunkWithStart(
col($(inputCols).head),
col($(chunkCol)),
col($(startCol))
)))
else
dataset.withColumn($(outputCol), wrapColumnMetadata(assembleChunk(col(getInputCols.head), col($(chunkCol)))))
}
}
/**
* This is the companion object of [[Doc2Chunk]]. Please refer to that class for the documentation.
*/
object Doc2Chunk extends DefaultParamsReadable[Doc2Chunk]
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/nlp/Doc2Chunk.scala
|
Scala
|
apache-2.0
| 11,516
|
package se.culvertsoft.mgen.cpppack.generator.impl.classh
import scala.collection.JavaConversions.asScalaBuffer
import se.culvertsoft.mgen.api.model.ClassType
import se.culvertsoft.mgen.compiler.util.SourceCodeBuffer
object MkUsingStatements {
def apply(t: ClassType)(implicit txtBuffer: SourceCodeBuffer) {
if (t.fields.exists(_.isPolymorphic)) {
txtBuffer.tabs(0).textln(s"using mgen::Polymorphic;")
txtBuffer.endl()
}
}
}
|
culvertsoft/mgen
|
mgen-cppgenerator/src/main/scala/se/culvertsoft/mgen/cpppack/generator/impl/classh/MkUsingStatememts.scala
|
Scala
|
mit
| 455
|
package graphics
import java.awt.Color
import java.awt.Font
import java.awt.Graphics
import java.util.ArrayList
import game._
import structures._
/**
* Class for the Menu Screen functions.
* @author Sean Lewis
*/
object MenuScreen {
private def instructionStrings = Array(
"P: pause",
"R: reset",
"Right arrow: speed up",
"Left arrow: slow down",
// break
"V: show gravity vectors",
"D: show gravity resultant",
"T: show ball trail",
"E: show special effects")
private var menuLevel: Level = null
/**
* Returns the start level that the game uses.
* @return the menu Level
*/
def getMenuLevel(): Level = {
if (menuLevel == null) {
val b = new Ball(340, 335, 3, Color.red)
val bod = new ArrayList[Body]()
bod.add(new Body(495, 335, 100, Color.magenta))
val ws = new ArrayList[WarpPoint]()
val gs = new ArrayList[GoalPost]()
val bs = new ArrayList[Blockage]()
b.setLaunched(true)
b.accelerate(new Vector2d(0.0, 1.8))
menuLevel = new Level(b, bod, ws, gs, bs, 0, 3.5)
menuLevel.generateLevelData()
}
menuLevel
}
/**
* Draws the menu screen and its information.
* @param menuLevel the level that is displayed
* @param settings the current settings in the game
* @param g the Graphics component to draw with
*/
def draw(menuLevel: Level, settings: Array[Boolean], g: Graphics) = {
if (settings(GamePanel.VectorsNum))
GravityVectorsEffect.draw(menuLevel, g)
if (settings(GamePanel.ResultantNum))
ResultantDrawer.draw(menuLevel, g)
g.setColor(Color.blue)
g.setFont(new Font("Tahoma", Font.ITALIC, 80))
g.drawString("Gravity Golf", 275, 100)
g.setFont(new Font("Times new Roman", Font.ITALIC, 25))
g.setColor(Color.blue)
for (i <- 0 to 3)
g.drawString(instructionStrings(i), 50, 60 * i + 235)
for (i <- 4 to 7)
g.drawString(instructionStrings(i), 700, 60 * (i - 4) + 235)
g.setFont(new Font("Times new Roman", Font.ITALIC, 20))
g.setColor(Color.green)
g.drawString("Your goal is to give the ball an initial velocity that "
+ "allows it reach the white goal.", 140, 550)
g.setColor(Color.white)
g.drawString("Chose an option below to begin", 345, 590)
}
}
|
splewis/GravityGolf
|
src/main/scala/graphics/MenuScreen.scala
|
Scala
|
mit
| 2,293
|
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.Done
import akka.stream.scaladsl.Source
import scala.concurrent.Promise
import scala.util.Success
/**
* Reference to a source and a promise that can be completed to stop the
* source.
*/
private[stream] case class SourceRef[T, M](source: Source[T, M], promise: Promise[Done]) {
def stop(): Unit = promise.complete(Success(Done))
}
|
brharrington/atlas
|
atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/SourceRef.scala
|
Scala
|
apache-2.0
| 995
|
package org.http4s
package headers
object `Content-Location` extends HeaderKey.Default
|
ZizhengTai/http4s
|
core/src/main/scala/org/http4s/headers/Content-Location.scala
|
Scala
|
apache-2.0
| 89
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._
/**
* A Scala extractor that builds a [[org.apache.spark.sql.types.StructField]] from a Catalyst
* complex type extractor. For example, consider a relation with the following schema:
*
* {{{
* root
* |-- name: struct (nullable = true)
* | |-- first: string (nullable = true)
* | |-- last: string (nullable = true)
* }}}
*
* Further, suppose we take the select expression `name.first`. This will parse into an
* `Alias(child, "first")`. Ignoring the alias, `child` matches the following pattern:
*
* {{{
* GetStructFieldObject(
* AttributeReference("name", StructType(_), _, _),
* StructField("first", StringType, _, _))
* }}}
*
* [[SelectedField]] converts that expression into
*
* {{{
* StructField("name", StructType(Array(StructField("first", StringType))))
* }}}
*
* by mapping each complex type extractor to a [[org.apache.spark.sql.types.StructField]] with the
* same name as its child (or "parent" going right to left in the select expression) and a data
* type appropriate to the complex type extractor. In our example, the name of the child expression
* is "name" and its data type is a [[org.apache.spark.sql.types.StructType]] with a single string
* field named "first".
*
* @param expr the top-level complex type extractor
*/
private[execution] object SelectedField {
def unapply(expr: Expression): Option[StructField] = {
// If this expression is an alias, work on its child instead
val unaliased = expr match {
case Alias(child, _) => child
case expr => expr
}
selectField(unaliased, None)
}
private def selectField(expr: Expression, fieldOpt: Option[StructField]): Option[StructField] = {
expr match {
// No children. Returns a StructField with the attribute name or None if fieldOpt is None.
case AttributeReference(name, dataType, nullable, metadata) =>
fieldOpt.map(field =>
StructField(name, wrapStructType(dataType, field), nullable, metadata))
// Handles case "expr0.field[n]", where "expr0" is of struct type and "expr0.field" is of
// array type.
case GetArrayItem(x @ GetStructFieldObject(child, field @ StructField(name,
dataType, nullable, metadata)), _) =>
val childField = fieldOpt.map(field => StructField(name,
wrapStructType(dataType, field), nullable, metadata)).getOrElse(field)
selectField(child, Some(childField))
// Handles case "expr0.field[n]", where "expr0.field" is of array type.
case GetArrayItem(child, _) =>
selectField(child, fieldOpt)
// Handles case "expr0.field.subfield", where "expr0" and "expr0.field" are of array type.
case GetArrayStructFields(child: GetArrayStructFields,
field @ StructField(name, dataType, nullable, metadata), _, _, _) =>
val childField = fieldOpt.map(field => StructField(name,
wrapStructType(dataType, field),
nullable, metadata)).orElse(Some(field))
selectField(child, childField)
// Handles case "expr0.field", where "expr0" is of array type.
case GetArrayStructFields(child,
field @ StructField(name, dataType, nullable, metadata), _, _, _) =>
val childField =
fieldOpt.map(field => StructField(name,
wrapStructType(dataType, field),
nullable, metadata)).orElse(Some(field))
selectField(child, childField)
// Handles case "expr0.field[key]", where "expr0" is of struct type and "expr0.field" is of
// map type.
case GetMapValue(x @ GetStructFieldObject(child, field @ StructField(name,
dataType,
nullable, metadata)), _) =>
val childField = fieldOpt.map(field => StructField(name,
wrapStructType(dataType, field),
nullable, metadata)).orElse(Some(field))
selectField(child, childField)
// Handles case "expr0.field[key]", where "expr0.field" is of map type.
case GetMapValue(child, _) =>
selectField(child, fieldOpt)
// Handles case "expr0.field", where expr0 is of struct type.
case GetStructFieldObject(child,
field @ StructField(name, dataType, nullable, metadata)) =>
val childField = fieldOpt.map(field => StructField(name,
wrapStructType(dataType, field),
nullable, metadata)).orElse(Some(field))
selectField(child, childField)
case _ =>
None
}
}
// Constructs a composition of complex types with a StructType(Array(field)) at its core. Returns
// a StructType for a StructType, an ArrayType for an ArrayType and a MapType for a MapType.
private def wrapStructType(dataType: DataType, field: StructField): DataType = {
dataType match {
case _: StructType =>
StructType(Array(field))
case ArrayType(elementType, containsNull) =>
ArrayType(wrapStructType(elementType, field), containsNull)
case MapType(keyType, valueType, valueContainsNull) =>
MapType(keyType, wrapStructType(valueType, field), valueContainsNull)
}
}
}
|
michalsenkyr/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/SelectedField.scala
|
Scala
|
apache-2.0
| 5,983
|
/*
* Copyright (C) 2013 FURYU CORPORATION
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package jp.furyu.play.c3p0
import play.api.db.DBPlugin
import play.api.db.DBApi
import com.mchange.v2.c3p0.{ DataSources, ComboPooledDataSource }
import play.api._
import play.api.libs._
import java.sql._
import javax.sql._
/**
* the implementation of DBPlugin for c3p0.
*
* @param app
*/
class C3p0Plugin(app: Application) extends DBPlugin {
lazy val dbConfig = app.configuration.getConfig("db").getOrElse(Configuration.empty)
private def dbURL(conn: Connection): String = {
val u = conn.getMetaData.getURL
conn.close()
u
}
// should be accessed in onStart first
private lazy val dbApi: DBApi = new C3p0Api(dbConfig, app.classloader)
/**
* Is this plugin enabled.
*/
override def enabled = true
/**
* Retrieves the underlying `DBApi` managing the data sources.
*/
def api: DBApi = dbApi
/**
* Reads the configuration and connects to every data source.
*/
override def onStart() {
// Try to connect to each, this should be the first access to dbApi
dbApi.datasources.map { ds =>
try {
ds._1.getConnection.close()
app.mode match {
case Mode.Test =>
case mode => play.api.Logger.info("database [" + ds._2 + "] connected at " + dbURL(ds._1.getConnection))
}
} catch {
case t: Throwable => {
throw dbConfig.reportError(ds._2 + ".url", "Cannot connect to database [" + ds._2 + "]", Some(t.getCause))
}
}
}
}
/**
* Closes all data sources.
*/
override def onStop() {
dbApi.datasources.foreach {
case (ds, _) => try {
dbApi.shutdownPool(ds)
} catch { case t: Throwable => }
}
val drivers = DriverManager.getDrivers()
while (drivers.hasMoreElements) {
val driver = drivers.nextElement
DriverManager.deregisterDriver(driver)
}
}
}
private class C3p0Api(configuration: Configuration, classloader: ClassLoader) extends DBApi {
private def error(db: String, message: String = "") = throw configuration.reportError(db, message)
private val dbNames = configuration.subKeys
private def register(driver: String, c: Configuration) {
try {
DriverManager.registerDriver(new play.utils.ProxyDriver(Class.forName(driver, true, classloader).newInstance.asInstanceOf[Driver]))
} catch {
case t: Throwable => throw c.reportError("driver", "Driver not found: [" + driver + "]", Some(t))
}
}
private def createDataSource(dbName: String, url: String, driver: String, conf: Configuration): DataSource = {
val datasource = new ComboPooledDataSource
// Try to load the driver
conf.getString("driver").map { driver =>
try {
DriverManager.registerDriver(new play.utils.ProxyDriver(Class.forName(driver, true, classloader).newInstance.asInstanceOf[Driver]))
} catch {
case t: Throwable => throw conf.reportError("driver", "Driver not found: [" + driver + "]", Some(t))
}
}
val PostgresFullUrl = "^postgres://([a-zA-Z0-9_]+):([^@]+)@([^/]+)/([^\\s]+)$".r
val MysqlFullUrl = "^mysql://([a-zA-Z0-9_]+):([^@]+)@([^/]+)/([^\\s]+)$".r
val MysqlCustomProperties = ".*\\?(.*)".r
val H2DefaultUrl = "^jdbc:h2:mem:.+".r
conf.getString("url") match {
case Some(PostgresFullUrl(username, password, host, dbname)) =>
datasource.setJdbcUrl("jdbc:postgresql://%s/%s".format(host, dbname))
datasource.setUser(username)
datasource.setPassword(password)
case Some(url @ MysqlFullUrl(username, password, host, dbname)) =>
val defaultProperties = """?useUnicode=yes&characterEncoding=UTF-8&connectionCollation=utf8_general_ci"""
val addDefaultPropertiesIfNeeded = MysqlCustomProperties.findFirstMatchIn(url).map(_ => "").getOrElse(defaultProperties)
datasource.setJdbcUrl("jdbc:mysql://%s/%s".format(host, dbname + addDefaultPropertiesIfNeeded))
datasource.setUser(username)
datasource.setPassword(password)
case Some(url @ H2DefaultUrl()) if !url.contains("DB_CLOSE_DELAY") =>
if (Play.maybeApplication.exists(_.mode == Mode.Dev)) {
datasource.setJdbcUrl(url + ";DB_CLOSE_DELAY=-1")
} else {
datasource.setJdbcUrl(url)
}
case Some(s: String) =>
datasource.setJdbcUrl(s)
case _ =>
throw conf.globalError("Missing url configuration for database [%s]".format(conf))
}
conf.getString("user").foreach(datasource.setUser(_))
conf.getString("pass").foreach(datasource.setPassword(_))
conf.getString("password").foreach(datasource.setPassword(_))
datasource.setDriverClass(driver)
// Pool configuration
conf.getInt("maxPoolSize").foreach(datasource.setMaxPoolSize(_))
conf.getInt("minPoolSize").foreach(datasource.setMinPoolSize(_))
conf.getInt("initialPoolSize").foreach(datasource.setInitialPoolSize(_))
conf.getInt("acquireIncrement").foreach(datasource.setAcquireIncrement(_))
conf.getInt("acquireRetryAttempts").foreach(datasource.setAcquireRetryAttempts(_))
conf.getMilliseconds("acquireRetryDelay").foreach(v => datasource.setAcquireRetryDelay(v.toInt)) // ms
conf.getMilliseconds("maxIdleTime").foreach(v => datasource.setMaxIdleTime((v / 1000L).toInt)) // s
conf.getMilliseconds("maxConnectionAge").foreach(v => datasource.setMaxConnectionAge((v / 1000L).toInt)) // s
conf.getMilliseconds("idleConnectionTestPeriod").foreach(v => datasource.setIdleConnectionTestPeriod((v / 1000L).toInt)) // s
conf.getString("preferredTestQuery").foreach(datasource.setPreferredTestQuery(_))
conf.getMilliseconds("checkoutTimeout").foreach(v => datasource.setCheckoutTimeout(v.toInt)) // ms
// Bind in JNDI
conf.getString("jndiName").map { name =>
JNDI.initialContext.rebind(name, datasource)
play.api.Logger.info("datasource [" + conf.getString("url").get + "] bound to JNDI as " + name)
}
datasource
}
val datasources: List[(DataSource, String)] = dbNames.map { dbName =>
val url = configuration.getString(dbName + ".url").getOrElse(error(dbName, "Missing configuration [db." + dbName + ".url]"))
val driver = configuration.getString(dbName + ".driver").getOrElse(error(dbName, "Missing configuration [db." + dbName + ".driver]"))
val extraConfig = configuration.getConfig(dbName).getOrElse(error(dbName, "Missing configuration [db." + dbName + "]"))
register(driver, extraConfig)
createDataSource(dbName, url, driver, extraConfig) -> dbName
}.toList
def shutdownPool(ds: DataSource) = {
ds match {
case ds: ComboPooledDataSource => DataSources.destroy(ds)
case _ => error(" - could not recognize DataSource, therefore unable to shutdown this pool")
}
}
/**
* Retrieves a JDBC connection, with auto-commit set to `true`.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String): DataSource = {
datasources.filter(_._2 == name).headOption.map(e => e._1).getOrElse(error(" - could not find datasource for " + name))
}
}
|
Furyu/play-c3p0-plugin
|
src/main/scala/jp/furyu/play/c3p0/C3p0Plugin.scala
|
Scala
|
lgpl-2.1
| 8,008
|
package services
import scala.concurrent.Future
import javax.inject.Singleton
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.modules.reactivemongo.ReactiveMongoPlugin
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.bson.BSONDocument
import reactivemongo.core.commands.{FindAndModify, Update}
/**
* A counter implementation that uses an atomically incremented value in a document in Mongo.
*
* The benefit of this counter is that it generates a completely ordered sequence across all application server
* nodes with small numbers being generated at first, which translates to shorter url hashes. The drawback is that
* it is relatively slow (compared to generating the hash locally).
*/
@Singleton
class MongoCounterService extends CounterService {
private def collection = ReactiveMongoPlugin.db.collection[JSONCollection]("counters")
def nextValue: Future[BigInt] = {
// Atomically increment the counter using Mongo's findAndModify command
val selector = BSONDocument("_id" -> "counter")
val modifier = BSONDocument("$inc" -> BSONDocument("count" -> 1L))
val command = FindAndModify(
collection.name,
selector,
Update(modifier, fetchNewObject = true),
upsert = true
)
collection.db.command(command).map { maybeCount =>
// Since we're upserting, the counter should never be null, but in case it is, just return 1
BigInt(maybeCount.flatMap(_.getAs[Long]("count")).getOrElse(1L))
}
}
}
|
jriecken/shorty
|
app/services/MongoCounterService.scala
|
Scala
|
mit
| 1,563
|
package models
import anorm._
import scala.language.postfixOps
import java.sql.Connection
object PasswordDictionary {
def isNaivePassword(password: String)(implicit conn: Connection) =
SQL(
"select count(*) from password_dict where password = {password}"
).on(
'password -> password
).as(SqlParser.scalar[Long].single) != 0
}
|
ruimo/store2
|
app/models/PasswordDictionary.scala
|
Scala
|
apache-2.0
| 355
|
package debop4s.core.parallels
import java.util.concurrent.{Callable, ThreadLocalRandom}
import debop4s.core.{JAction1, JFunction}
import scala.collection.Seq
/**
* 작업을 병렬로 처리하게 해주는 Object 입니다.
*
* @author 배성혁 sunghyouk.bae@gmail.com
* @since 2013. 12. 11. 오후 1:18
*/
object Parallels {
private[this] lazy val random: ThreadLocalRandom = ThreadLocalRandom.current()
private[this] lazy val processCount: Int = Runtime.getRuntime.availableProcessors()
private[this] lazy val workerCount: Int = processCount * 2
def mapAsOrdered[@miniboxed T <: Ordered[T], @miniboxed V](items: Iterable[T], mapper: T => V): Seq[V] = {
items.par
.map(x => (x, mapper(x)))
.toList
.sortBy(_._1)
.map(_._2)
}
def mapAsParallel[@miniboxed T, @miniboxed V](items: Iterable[T], mapper: T => V): Iterable[V] =
items.par.map(item => mapper(item)).seq.toList
def run(count: Int)(r: Runnable): Unit = {
require(r != null)
run(Range(0, count))(r)
}
def run(start: Int, end: Int, step: Int = 1)(r: Runnable): Unit = {
require(r != null)
run(Range(start, end, step))(r)
}
def run(range: Seq[Int])(r: Runnable): Unit = {
require(range != null)
require(r != null)
range.par.foreach(_ => r.run())
}
def run(count: Int, action1: JAction1[java.lang.Integer]): Unit = {
runAction1(Range(0, count)) { i =>
action1.perform(i)
}
}
def run(start: Int, end: Int, action1: JAction1[java.lang.Integer]): Unit = {
runAction1(Range(start, end)) { i =>
action1.perform(i)
}
}
def runAction(count: Int)(block: => Unit): Unit = {
runAction(Range(0, count))(block)
}
def runAction(range: Seq[Int])(block: => Unit): Unit = {
require(range != null)
range.par.foreach(_ => block)
}
def runAction1(count: Int)(block: Int => Unit): Unit = {
runAction1(Range(0, count))(block)
}
def runAction1(count: Int, action1: JAction1[java.lang.Integer]): Unit = {
runAction1(Range(0, count)) { i =>
action1.perform(i)
}
}
def runAction1(start: Int, end: Int, action1: JAction1[java.lang.Integer]): Unit = {
runAction1(Range(start, end)) { i =>
action1.perform(i)
}
}
def runAction1(start: Int, end: Int, step: Int, action1: JAction1[java.lang.Integer]): Unit = {
runAction1(Range(start, end, step)) { i =>
action1.perform(i)
}
}
def runAction1(range: Seq[Int])(block: Int => Unit): Unit = {
require(range != null)
range.par.foreach {
i => block(i)
}
}
def runEach[@miniboxed V](elements: Iterable[V])(block: V => Unit): Unit = {
require(elements != null)
elements.par.foreach(block)
}
/**
* 컬렉션을 지정된 갯수로 나누어서 작업합니다.
*/
def runEach[@miniboxed V](elements: Iterable[V], size: Int = workerCount)(block: V => Unit) {
require(elements != null)
elements.grouped(size).foreach(_.foreach(block))
}
def call[@miniboxed V](count: Int)(callable: Callable[V]): Seq[V] = {
require(callable != null)
call[V](Range(0, count))(callable)
}
def call[@miniboxed V](start: Int, end: Int, step: Int)(callable: Callable[V]): Seq[V] = {
require(callable != null)
call[V](Range(start, end, step))(callable)
}
def call[@miniboxed V](range: Seq[Int])(callable: Callable[V]): Seq[V] = {
require(range != null)
range.par.map(_ => callable.call()).seq
}
def call[@miniboxed V](count: Int, func: JFunction[V]): Seq[V] = {
require(func != null)
call[V](Range(0, count), func)
}
def call[@miniboxed V](start: Int, end: Int, step: Int, func: JFunction[V]): Seq[V] = {
require(func != null)
call[V](Range(start, end, step), func)
}
def call[@miniboxed V](range: Seq[Int], func: JFunction[V]): Seq[V] = {
require(range != null)
range.par.map(_ => func.execute()).seq
}
def callFunction[@miniboxed V](count: Int)(func: () => V): Seq[V] = {
require(func != null)
callFunction(Range(0, count))(func)
}
def callFunction[@miniboxed V](start: Int, end: Int, step: Int)(func: () => V): Seq[V] = {
require(func != null)
callFunction(Range(start, end, step))(func)
}
def callFunction[@miniboxed V](range: Seq[Int])(func: () => V): Seq[V] = {
require(func != null)
range.par.map(_ => func()).seq
}
def callFunction1[@miniboxed V](count: Int)(func: Int => V): Seq[V] = {
require(func != null)
callFunction1(Range(0, count))(func).toSeq
}
def callFunction1[@miniboxed V](start: Int, end: Int, step: Int)(func: Int => V): Seq[V] = {
require(func != null)
callFunction1(Range(start, end, step))(func).toSeq
}
def callFunction1[@miniboxed V](range: Seq[Int])(func: Int => V): Seq[V] = {
require(func != null)
range.par.map(i => func(i)).seq.toSeq
}
def callEach[@miniboxed S, @miniboxed T](elements: Iterable[S])(func: S => T): Seq[T] = {
require(func != null)
elements.par.map(x => func(x)).seq.toSeq
}
def callEach[@miniboxed S, @miniboxed T](elements: Iterable[S], size: Int)(func: S => T): Seq[(S, T)] = {
elements.grouped(size).map(_.map(s => (s, func(s)))).flatten.toSeq
}
}
|
debop/debop4s
|
debop4s-core/src/main/scala/debop4s/core/parallels/Parallels.scala
|
Scala
|
apache-2.0
| 5,206
|
package se.culvertsoft.mgen.visualdesigner.view
import java.awt.Color
import java.awt.Graphics
import java.awt.Graphics2D
import java.awt.event.ActionEvent
import java.awt.event.FocusAdapter
import java.awt.event.FocusEvent
import java.awt.event.KeyEvent
import java.awt.event.MouseAdapter
import java.awt.event.MouseEvent
import Graphics2DOps.RichGraphics2D
import javax.swing.AbstractAction
import javax.swing.JPanel
import javax.swing.JTextField
import se.culvertsoft.mgen.visualdesigner.HotKey
import se.culvertsoft.mgen.visualdesigner.HotKey.toStroke
object Labeled {
val BG_COLOR = new Color(240, 240, 240)
val DEFAULT_PADDING = 10
val DEFAULT_BORDER_COLOR = Bordered.DEFAULT_COLOR
val DEFAULT_TEXT_COLOR = Color.DARK_GRAY
val LABEL_PADDING = 10
val LABEL_HEIGHT = 22
val OVERFLOW_TEXT_ENDING = "..."
}
trait Labeled {
self: AbstractView =>
import Graphics2DOps._
import Labeled._
var labelTextWidth = 0
var overflowTextEndingWidth = 0
val labelPanel = new JPanel() {
override def paintComponent(g: Graphics) {
drawLabel(g.asInstanceOf[Graphics2D])
}
}
addOverlayOffsetBounds(labelPanel, 0, -labelHeight(), labelWidth(), labelHeight())
val acceptAction = new AbstractAction() {
override def actionPerformed(e: ActionEvent) {
acceptRename()
}
}
val cancelAction = new AbstractAction() {
override def actionPerformed(e: ActionEvent) {
cancelRename()
}
}
val renameTextField = new JTextField
renameTextField.getInputMap().put(HotKey(KeyEvent.VK_ENTER), "accept")
renameTextField.getInputMap().put(HotKey(KeyEvent.VK_ESCAPE), "cancel")
renameTextField.getActionMap().put("accept", acceptAction)
renameTextField.getActionMap().put("cancel", cancelAction)
val renameDblClickMouseListener = new MouseAdapter() {
override def mouseClicked(e: MouseEvent) {
if (e.getClickCount() == 2 && renameTextField.getParent() == null) {
initRename()
}
}
}
renameTextField.addFocusListener(new FocusAdapter() {
override def focusLost(e: FocusEvent) {
acceptRename()
}
})
def initRename() {
renameTextField.setText(self.entity.getName())
renameTextField.setSelectionStart(0)
renameTextField.setSelectionEnd(self.entity.getName().length())
renameTextField.setPreferredSize(labelPanel.getSize())
labelPanel.add(renameTextField)
labelPanel.validate()
labelPanel.repaint()
renameTextField.requestFocusInWindow()
}
def cancelRename() {
labelPanel.remove(renameTextField)
labelPanel.repaint()
}
def acceptRename() {
if (renameTextField.getParent() != null) {
labelPanel.remove(renameTextField)
labelPanel.repaint()
controller.rename(entity, renameTextField.getText())
}
}
labelPanel.addMouseListener(renameDblClickMouseListener)
def getLabelTextWidth(g: Graphics2D, label: String = labelText): Int = g.getFontMetrics().stringWidth(label)
def getLabelTextHeight(g: Graphics2D): Int = g.getFontMetrics().getHeight()
def labelHeight(): Int = LABEL_HEIGHT
def labelPadding(): Int = LABEL_HEIGHT
def labelText(): String
def labelTextColor(): Color = { DEFAULT_TEXT_COLOR }
def labelBackgroundColor(): Color = { BG_COLOR }
def labelBorderColor(): Color = {
this match {
case _this: Selectable if (_this.isSelected()) => Selectable.DEFAULT_SELECTED_BORDER_COLOR
case _this: Selectable if (_this.isHovered()) => Selectable.DEFAULT_HOVERED_BORDER_COLOR
case _ => Labeled.DEFAULT_BORDER_COLOR
}
}
def labelWidth(s: Int = labelTextWidth): Int = {
return Math.min(s + DEFAULT_PADDING, self.width())
}
def drawLabel(g: Graphics2D) {
if (labelTextWidth != getLabelTextWidth(g, labelText)) {
labelTextWidth = getLabelTextWidth(g, labelText)
labelPanel.setSize(labelWidth(), labelPanel.getHeight())
overflowTextEndingWidth = getLabelTextWidth(g, OVERFLOW_TEXT_ENDING)
}
val width = labelWidth()
val height = labelHeight()
g.color(labelBackgroundColor()) {
g.fillRoundRect(0, 0, width - 1, height, 5, 5);
}
g.color(labelBorderColor()) {
g.drawRoundRect(0, 0, width - 1, height, 5, 5);
}
drawLabelText(g)
}
def drawLabelText(g: Graphics2D) {
val width = labelWidth()
val height = labelHeight()
g.color(labelTextColor()) {
if (labelTextWidth + DEFAULT_PADDING <= width) {
g.drawString(labelText, (width - labelTextWidth) / 2, height - (height - getLabelTextHeight(g)) / 2 - 2)
} else {
var textWidth = labelTextWidth
var text = labelText
var removedLetters = 1
while (textWidth > self.width() && removedLetters != labelText.size) {
text = labelText.substring(0, labelText.size - removedLetters) + OVERFLOW_TEXT_ENDING
textWidth = getLabelTextWidth(g, text)
removedLetters += 1
}
g.drawString(text, DEFAULT_PADDING / 2, height - (height - getLabelTextHeight(g)) / 2 - 2)
}
}
}
}
|
culvertsoft/mgen-visualdesigner
|
src/main/scala/se/culvertsoft/mgen/visualdesigner/view/Labeled.scala
|
Scala
|
gpl-2.0
| 5,267
|
package club.diybio.bank.domain
case class DNASeq(seq: String, features: Set[Feature] = Set.empty)
|
denigma/plasmid-bank
|
shared/src/main/scala/club/diybio/bank/domain/DNASeq.scala
|
Scala
|
mpl-2.0
| 100
|
package glasskey.model.fetchers
import glasskey.model.ProtectedResourceRequest
import glasskey.resource.OIDCTokenData
import glasskey.util.{JWK, JWTTokenDecoder}
/**
* Created by loande on 3/4/15.
*/
trait IDTokenRequestParameter extends AccessTokenFetcher[Option[OIDCTokenData]] {
def decoder : JWTTokenDecoder
def fetch(paramValue: String): Option[OIDCTokenData] = {
getOIDCToken(paramValue)
}
override def matches(request: ProtectedResourceRequest): Boolean = request.idToken.isDefined
override def fetch(request: ProtectedResourceRequest): Option[OIDCTokenData] =
request.idToken match {
case Some(idTokenStr) => Some(new OIDCTokenData(idTokenStr, decoder.verify(idTokenStr)))
case None => None
}
private def getOIDCToken(token: String): Option[OIDCTokenData] = Some(new OIDCTokenData(token, decoder.verify(token)))
}
object IDTokenRequestParameter {
class Default(jwksUri: String) extends IDTokenRequestParameter {
override val decoder = JWTTokenDecoder(jwksUri, JWK)
}
}
|
MonsantoCo/glass-key
|
glass-key-common/src/main/scala/glasskey/model/fetchers/IDTokenRequestParameter.scala
|
Scala
|
bsd-3-clause
| 1,032
|
package sample.clustered
import language.postfixOps
import akka.actor._
import com.typesafe.config.ConfigFactory
import sample.clustered.ClusteredMessages.BackendRegistration
import akka.actor.Terminated
import sample.clustered.ClusteredMessages.StartVideo
//#frontend
class ClientShowVideo extends Actor {
var backends = IndexedSeq.empty[ActorRef]
def receive = {
case BackendRegistration if !backends.contains(sender()) =>
context watch sender()
backends = backends :+ sender()
val consumerActorRef = video.Display.createActorRef(context.system)
sender() ! StartVideo(consumerActorRef.path.name)
case Terminated(a) =>
backends = backends.filterNot(_ == a)
}
}
//#frontend
object ClientShowVideo {
/**
*
* run:
* ./activator -Dconfig.resource=clustering.conf 'runMain sample.clustered.ClientShowVideo 2551'
*/
def main(args: Array[String]): Unit = {
// Override the configuration of the port when specified as program argument
val port = if (args.isEmpty) "0" else args(0)
val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port").
withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", config)
val frontend = system.actorOf(Props[ClientShowVideo], name = "frontend")
}
}
|
retroryan/streams-workshop
|
src/examples/sample/clustered/ClientShowVideo.scala
|
Scala
|
cc0-1.0
| 1,398
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.nio.charset.StandardCharsets
import scala.util.Random
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
/**
* Test suite for functions in [[org.apache.spark.sql.functions]].
*/
class DataFrameFunctionsSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("array with column name") {
val df = Seq((0, 1)).toDF("a", "b")
val row = df.select(array("a", "b")).first()
val expectedType = ArrayType(IntegerType, containsNull = false)
assert(row.schema(0).dataType === expectedType)
assert(row.getAs[Seq[Int]](0) === Seq(0, 1))
}
test("array with column expression") {
val df = Seq((0, 1)).toDF("a", "b")
val row = df.select(array(col("a"), col("b") + col("b"))).first()
val expectedType = ArrayType(IntegerType, containsNull = false)
assert(row.schema(0).dataType === expectedType)
assert(row.getSeq[Int](0) === Seq(0, 2))
}
test("map with column expressions") {
val df = Seq(1 -> "a").toDF("a", "b")
val row = df.select(map($"a" + 1, $"b")).first()
val expectedType = MapType(IntegerType, StringType, valueContainsNull = true)
assert(row.schema(0).dataType === expectedType)
assert(row.getMap[Int, String](0) === Map(2 -> "a"))
}
test("map with arrays") {
val df1 = Seq((Seq(1, 2), Seq("a", "b"))).toDF("k", "v")
val expectedType = MapType(IntegerType, StringType, valueContainsNull = true)
val row = df1.select(map_from_arrays($"k", $"v")).first()
assert(row.schema(0).dataType === expectedType)
assert(row.getMap[Int, String](0) === Map(1 -> "a", 2 -> "b"))
checkAnswer(df1.select(map_from_arrays($"k", $"v")), Seq(Row(Map(1 -> "a", 2 -> "b"))))
val df2 = Seq((Seq(1, 2), Seq(null, "b"))).toDF("k", "v")
checkAnswer(df2.select(map_from_arrays($"k", $"v")), Seq(Row(Map(1 -> null, 2 -> "b"))))
val df3 = Seq((null, null)).toDF("k", "v")
checkAnswer(df3.select(map_from_arrays($"k", $"v")), Seq(Row(null)))
val df4 = Seq((1, "a")).toDF("k", "v")
intercept[AnalysisException] {
df4.select(map_from_arrays($"k", $"v"))
}
val df5 = Seq((Seq("a", null), Seq(1, 2))).toDF("k", "v")
intercept[RuntimeException] {
df5.select(map_from_arrays($"k", $"v")).collect
}
val df6 = Seq((Seq(1, 2), Seq("a"))).toDF("k", "v")
intercept[RuntimeException] {
df6.select(map_from_arrays($"k", $"v")).collect
}
}
test("struct with column name") {
val df = Seq((1, "str")).toDF("a", "b")
val row = df.select(struct("a", "b")).first()
val expectedType = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType)
))
assert(row.schema(0).dataType === expectedType)
assert(row.getAs[Row](0) === Row(1, "str"))
}
test("struct with column expression") {
val df = Seq((1, "str")).toDF("a", "b")
val row = df.select(struct((col("a") * 2).as("c"), col("b"))).first()
val expectedType = StructType(Seq(
StructField("c", IntegerType, nullable = false),
StructField("b", StringType)
))
assert(row.schema(0).dataType === expectedType)
assert(row.getAs[Row](0) === Row(2, "str"))
}
test("struct with column expression to be automatically named") {
val df = Seq((1, "str")).toDF("a", "b")
val result = df.select(struct((col("a") * 2), col("b")))
val expectedType = StructType(Seq(
StructField("col1", IntegerType, nullable = false),
StructField("b", StringType)
))
assert(result.first.schema(0).dataType === expectedType)
checkAnswer(result, Row(Row(2, "str")))
}
test("struct with literal columns") {
val df = Seq((1, "str1"), (2, "str2")).toDF("a", "b")
val result = df.select(struct((col("a") * 2), lit(5.0)))
val expectedType = StructType(Seq(
StructField("col1", IntegerType, nullable = false),
StructField("col2", DoubleType, nullable = false)
))
assert(result.first.schema(0).dataType === expectedType)
checkAnswer(result, Seq(Row(Row(2, 5.0)), Row(Row(4, 5.0))))
}
test("struct with all literal columns") {
val df = Seq((1, "str1"), (2, "str2")).toDF("a", "b")
val result = df.select(struct(lit("v"), lit(5.0)))
val expectedType = StructType(Seq(
StructField("col1", StringType, nullable = false),
StructField("col2", DoubleType, nullable = false)
))
assert(result.first.schema(0).dataType === expectedType)
checkAnswer(result, Seq(Row(Row("v", 5.0)), Row(Row("v", 5.0))))
}
test("constant functions") {
checkAnswer(
sql("SELECT E()"),
Row(scala.math.E)
)
checkAnswer(
sql("SELECT PI()"),
Row(scala.math.Pi)
)
}
test("bitwiseNOT") {
checkAnswer(
testData2.select(bitwiseNOT($"a")),
testData2.collect().toSeq.map(r => Row(~r.getInt(0))))
}
test("bin") {
val df = Seq[(Integer, Integer)]((12, null)).toDF("a", "b")
checkAnswer(
df.select(bin("a"), bin("b")),
Row("1100", null))
checkAnswer(
df.selectExpr("bin(a)", "bin(b)"),
Row("1100", null))
}
test("if function") {
val df = Seq((1, 2)).toDF("a", "b")
checkAnswer(
df.selectExpr("if(a = 1, 'one', 'not_one')", "if(b = 1, 'one', 'not_one')"),
Row("one", "not_one"))
}
test("misc md5 function") {
val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b")
checkAnswer(
df.select(md5($"a"), md5($"b")),
Row("902fbdd2b1df0c4f70b4a5d23525e932", "6ac1e56bc78f031059be7be854522c4c"))
checkAnswer(
df.selectExpr("md5(a)", "md5(b)"),
Row("902fbdd2b1df0c4f70b4a5d23525e932", "6ac1e56bc78f031059be7be854522c4c"))
}
test("misc sha1 function") {
val df = Seq(("ABC", "ABC".getBytes(StandardCharsets.UTF_8))).toDF("a", "b")
checkAnswer(
df.select(sha1($"a"), sha1($"b")),
Row("3c01bdbb26f358bab27f267924aa2c9a03fcfdb8", "3c01bdbb26f358bab27f267924aa2c9a03fcfdb8"))
val dfEmpty = Seq(("", "".getBytes(StandardCharsets.UTF_8))).toDF("a", "b")
checkAnswer(
dfEmpty.selectExpr("sha1(a)", "sha1(b)"),
Row("da39a3ee5e6b4b0d3255bfef95601890afd80709", "da39a3ee5e6b4b0d3255bfef95601890afd80709"))
}
test("misc sha2 function") {
val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b")
checkAnswer(
df.select(sha2($"a", 256), sha2($"b", 256)),
Row("b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78",
"7192385c3c0605de55bb9476ce1d90748190ecb32a8eed7f5207b30cf6a1fe89"))
checkAnswer(
df.selectExpr("sha2(a, 256)", "sha2(b, 256)"),
Row("b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78",
"7192385c3c0605de55bb9476ce1d90748190ecb32a8eed7f5207b30cf6a1fe89"))
intercept[IllegalArgumentException] {
df.select(sha2($"a", 1024))
}
}
test("misc crc32 function") {
val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b")
checkAnswer(
df.select(crc32($"a"), crc32($"b")),
Row(2743272264L, 2180413220L))
checkAnswer(
df.selectExpr("crc32(a)", "crc32(b)"),
Row(2743272264L, 2180413220L))
}
test("string function find_in_set") {
val df = Seq(("abc,b,ab,c,def", "abc,b,ab,c,def")).toDF("a", "b")
checkAnswer(
df.selectExpr("find_in_set('ab', a)", "find_in_set('x', b)"),
Row(3, 0))
}
test("conditional function: least") {
checkAnswer(
testData2.select(least(lit(-1), lit(0), col("a"), col("b"))).limit(1),
Row(-1)
)
checkAnswer(
sql("SELECT least(a, 2) as l from testData2 order by l"),
Seq(Row(1), Row(1), Row(2), Row(2), Row(2), Row(2))
)
}
test("conditional function: greatest") {
checkAnswer(
testData2.select(greatest(lit(2), lit(3), col("a"), col("b"))).limit(1),
Row(3)
)
checkAnswer(
sql("SELECT greatest(a, 2) as g from testData2 order by g"),
Seq(Row(2), Row(2), Row(2), Row(2), Row(3), Row(3))
)
}
test("pmod") {
val intData = Seq((7, 3), (-7, 3)).toDF("a", "b")
checkAnswer(
intData.select(pmod('a, 'b)),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.select(pmod('a, lit(3))),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.select(pmod(lit(-7), 'b)),
Seq(Row(2), Row(2))
)
checkAnswer(
intData.selectExpr("pmod(a, b)"),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.selectExpr("pmod(a, 3)"),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.selectExpr("pmod(-7, b)"),
Seq(Row(2), Row(2))
)
val doubleData = Seq((7.2, 4.1)).toDF("a", "b")
checkAnswer(
doubleData.select(pmod('a, 'b)),
Seq(Row(3.1000000000000005)) // same as hive
)
checkAnswer(
doubleData.select(pmod(lit(2), lit(Int.MaxValue))),
Seq(Row(2))
)
}
test("mask functions") {
val df = Seq("TestString-123", "", null).toDF("a")
checkAnswer(df.select(mask($"a")), Seq(Row("XxxxXxxxxx-nnn"), Row(""), Row(null)))
checkAnswer(df.select(mask_first_n($"a", 4)), Seq(Row("XxxxString-123"), Row(""), Row(null)))
checkAnswer(df.select(mask_last_n($"a", 4)), Seq(Row("TestString-nnn"), Row(""), Row(null)))
checkAnswer(df.select(mask_show_first_n($"a", 4)),
Seq(Row("TestXxxxxx-nnn"), Row(""), Row(null)))
checkAnswer(df.select(mask_show_last_n($"a", 4)),
Seq(Row("XxxxXxxxxx-123"), Row(""), Row(null)))
checkAnswer(df.select(mask_hash($"a")),
Seq(Row("dd78d68ad1b23bde126812482dd70ac6"),
Row("d41d8cd98f00b204e9800998ecf8427e"),
Row(null)))
checkAnswer(df.select(mask($"a", "U", "l", "#")),
Seq(Row("UlllUlllll-###"), Row(""), Row(null)))
checkAnswer(df.select(mask_first_n($"a", 4, "U", "l", "#")),
Seq(Row("UlllString-123"), Row(""), Row(null)))
checkAnswer(df.select(mask_last_n($"a", 4, "U", "l", "#")),
Seq(Row("TestString-###"), Row(""), Row(null)))
checkAnswer(df.select(mask_show_first_n($"a", 4, "U", "l", "#")),
Seq(Row("TestUlllll-###"), Row(""), Row(null)))
checkAnswer(df.select(mask_show_last_n($"a", 4, "U", "l", "#")),
Seq(Row("UlllUlllll-123"), Row(""), Row(null)))
checkAnswer(
df.selectExpr("mask(a)", "mask(a, 'U')", "mask(a, 'U', 'l')", "mask(a, 'U', 'l', '#')"),
Seq(Row("XxxxXxxxxx-nnn", "UxxxUxxxxx-nnn", "UlllUlllll-nnn", "UlllUlllll-###"),
Row("", "", "", ""),
Row(null, null, null, null)))
checkAnswer(sql("select mask(null)"), Row(null))
checkAnswer(sql("select mask('AAaa11', null, null, null)"), Row("XXxxnn"))
intercept[AnalysisException] {
checkAnswer(df.selectExpr("mask(a, a)"), Seq(Row("XxxxXxxxxx-nnn"), Row(""), Row(null)))
}
checkAnswer(
df.selectExpr(
"mask_first_n(a)",
"mask_first_n(a, 6)",
"mask_first_n(a, 6, 'U')",
"mask_first_n(a, 6, 'U', 'l')",
"mask_first_n(a, 6, 'U', 'l', '#')"),
Seq(Row("XxxxString-123", "XxxxXxring-123", "UxxxUxring-123", "UlllUlring-123",
"UlllUlring-123"),
Row("", "", "", "", ""),
Row(null, null, null, null, null)))
checkAnswer(sql("select mask_first_n(null)"), Row(null))
checkAnswer(sql("select mask_first_n('A1aA1a', null, null, null, null)"), Row("XnxX1a"))
intercept[AnalysisException] {
checkAnswer(spark.range(1).selectExpr("mask_first_n('A1aA1a', id)"), Row("XnxX1a"))
}
checkAnswer(
df.selectExpr(
"mask_last_n(a)",
"mask_last_n(a, 6)",
"mask_last_n(a, 6, 'U')",
"mask_last_n(a, 6, 'U', 'l')",
"mask_last_n(a, 6, 'U', 'l', '#')"),
Seq(Row("TestString-nnn", "TestStrixx-nnn", "TestStrixx-nnn", "TestStrill-nnn",
"TestStrill-###"),
Row("", "", "", "", ""),
Row(null, null, null, null, null)))
checkAnswer(sql("select mask_last_n(null)"), Row(null))
checkAnswer(sql("select mask_last_n('A1aA1a', null, null, null, null)"), Row("A1xXnx"))
intercept[AnalysisException] {
checkAnswer(spark.range(1).selectExpr("mask_last_n('A1aA1a', id)"), Row("A1xXnx"))
}
checkAnswer(
df.selectExpr(
"mask_show_first_n(a)",
"mask_show_first_n(a, 6)",
"mask_show_first_n(a, 6, 'U')",
"mask_show_first_n(a, 6, 'U', 'l')",
"mask_show_first_n(a, 6, 'U', 'l', '#')"),
Seq(Row("TestXxxxxx-nnn", "TestStxxxx-nnn", "TestStxxxx-nnn", "TestStllll-nnn",
"TestStllll-###"),
Row("", "", "", "", ""),
Row(null, null, null, null, null)))
checkAnswer(sql("select mask_show_first_n(null)"), Row(null))
checkAnswer(sql("select mask_show_first_n('A1aA1a', null, null, null, null)"), Row("A1aAnx"))
intercept[AnalysisException] {
checkAnswer(spark.range(1).selectExpr("mask_show_first_n('A1aA1a', id)"), Row("A1aAnx"))
}
checkAnswer(
df.selectExpr(
"mask_show_last_n(a)",
"mask_show_last_n(a, 6)",
"mask_show_last_n(a, 6, 'U')",
"mask_show_last_n(a, 6, 'U', 'l')",
"mask_show_last_n(a, 6, 'U', 'l', '#')"),
Seq(Row("XxxxXxxxxx-123", "XxxxXxxxng-123", "UxxxUxxxng-123", "UlllUlllng-123",
"UlllUlllng-123"),
Row("", "", "", "", ""),
Row(null, null, null, null, null)))
checkAnswer(sql("select mask_show_last_n(null)"), Row(null))
checkAnswer(sql("select mask_show_last_n('A1aA1a', null, null, null, null)"), Row("XnaA1a"))
intercept[AnalysisException] {
checkAnswer(spark.range(1).selectExpr("mask_show_last_n('A1aA1a', id)"), Row("XnaA1a"))
}
checkAnswer(sql("select mask_hash(null)"), Row(null))
}
test("sort_array/array_sort functions") {
val df = Seq(
(Array[Int](2, 1, 3), Array("b", "c", "a")),
(Array.empty[Int], Array.empty[String]),
(null, null)
).toDF("a", "b")
checkAnswer(
df.select(sort_array($"a"), sort_array($"b")),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.select(sort_array($"a", false), sort_array($"b", false)),
Seq(
Row(Seq(3, 2, 1), Seq("c", "b", "a")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("sort_array(a)", "sort_array(b)"),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("sort_array(a, true)", "sort_array(b, false)"),
Seq(
Row(Seq(1, 2, 3), Seq("c", "b", "a")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
val df2 = Seq((Array[Array[Int]](Array(2), Array(1), Array(2, 4), null), "x")).toDF("a", "b")
checkAnswer(
df2.selectExpr("sort_array(a, true)", "sort_array(a, false)"),
Seq(
Row(
Seq[Seq[Int]](null, Seq(1), Seq(2), Seq(2, 4)),
Seq[Seq[Int]](Seq(2, 4), Seq(2), Seq(1), null)))
)
val df3 = Seq(("xxx", "x")).toDF("a", "b")
assert(intercept[AnalysisException] {
df3.selectExpr("sort_array(a)").collect()
}.getMessage().contains("only supports array input"))
checkAnswer(
df.select(array_sort($"a"), array_sort($"b")),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("array_sort(a)", "array_sort(b)"),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df2.selectExpr("array_sort(a)"),
Seq(Row(Seq[Seq[Int]](Seq(1), Seq(2), Seq(2, 4), null)))
)
assert(intercept[AnalysisException] {
df3.selectExpr("array_sort(a)").collect()
}.getMessage().contains("only supports array input"))
}
test("array size function") {
val df = Seq(
(Seq[Int](1, 2), "x"),
(Seq[Int](), "y"),
(Seq[Int](1, 2, 3), "z"),
(null, "empty")
).toDF("a", "b")
checkAnswer(
df.select(size($"a")),
Seq(Row(2), Row(0), Row(3), Row(-1))
)
checkAnswer(
df.selectExpr("size(a)"),
Seq(Row(2), Row(0), Row(3), Row(-1))
)
checkAnswer(
df.selectExpr("cardinality(a)"),
Seq(Row(2L), Row(0L), Row(3L), Row(-1L))
)
}
test("dataframe arrays_zip function") {
val df1 = Seq((Seq(9001, 9002, 9003), Seq(4, 5, 6))).toDF("val1", "val2")
val df2 = Seq((Seq("a", "b"), Seq(true, false), Seq(10, 11))).toDF("val1", "val2", "val3")
val df3 = Seq((Seq("a", "b"), Seq(4, 5, 6))).toDF("val1", "val2")
val df4 = Seq((Seq("a", "b", null), Seq(4L))).toDF("val1", "val2")
val df5 = Seq((Seq(-1), Seq(null), Seq(), Seq(null, null))).toDF("val1", "val2", "val3", "val4")
val df6 = Seq((Seq(192.toByte, 256.toByte), Seq(1.1), Seq(), Seq(null, null)))
.toDF("v1", "v2", "v3", "v4")
val df7 = Seq((Seq(Seq(1, 2, 3), Seq(4, 5)), Seq(1.1, 2.2))).toDF("v1", "v2")
val df8 = Seq((Seq(Array[Byte](1.toByte, 5.toByte)), Seq(null))).toDF("v1", "v2")
val expectedValue1 = Row(Seq(Row(9001, 4), Row(9002, 5), Row(9003, 6)))
checkAnswer(df1.select(arrays_zip($"val1", $"val2")), expectedValue1)
checkAnswer(df1.selectExpr("arrays_zip(val1, val2)"), expectedValue1)
val expectedValue2 = Row(Seq(Row("a", true, 10), Row("b", false, 11)))
checkAnswer(df2.select(arrays_zip($"val1", $"val2", $"val3")), expectedValue2)
checkAnswer(df2.selectExpr("arrays_zip(val1, val2, val3)"), expectedValue2)
val expectedValue3 = Row(Seq(Row("a", 4), Row("b", 5), Row(null, 6)))
checkAnswer(df3.select(arrays_zip($"val1", $"val2")), expectedValue3)
checkAnswer(df3.selectExpr("arrays_zip(val1, val2)"), expectedValue3)
val expectedValue4 = Row(Seq(Row("a", 4L), Row("b", null), Row(null, null)))
checkAnswer(df4.select(arrays_zip($"val1", $"val2")), expectedValue4)
checkAnswer(df4.selectExpr("arrays_zip(val1, val2)"), expectedValue4)
val expectedValue5 = Row(Seq(Row(-1, null, null, null), Row(null, null, null, null)))
checkAnswer(df5.select(arrays_zip($"val1", $"val2", $"val3", $"val4")), expectedValue5)
checkAnswer(df5.selectExpr("arrays_zip(val1, val2, val3, val4)"), expectedValue5)
val expectedValue6 = Row(Seq(
Row(192.toByte, 1.1, null, null), Row(256.toByte, null, null, null)))
checkAnswer(df6.select(arrays_zip($"v1", $"v2", $"v3", $"v4")), expectedValue6)
checkAnswer(df6.selectExpr("arrays_zip(v1, v2, v3, v4)"), expectedValue6)
val expectedValue7 = Row(Seq(
Row(Seq(1, 2, 3), 1.1), Row(Seq(4, 5), 2.2)))
checkAnswer(df7.select(arrays_zip($"v1", $"v2")), expectedValue7)
checkAnswer(df7.selectExpr("arrays_zip(v1, v2)"), expectedValue7)
val expectedValue8 = Row(Seq(
Row(Array[Byte](1.toByte, 5.toByte), null)))
checkAnswer(df8.select(arrays_zip($"v1", $"v2")), expectedValue8)
checkAnswer(df8.selectExpr("arrays_zip(v1, v2)"), expectedValue8)
}
test("map size function") {
val df = Seq(
(Map[Int, Int](1 -> 1, 2 -> 2), "x"),
(Map[Int, Int](), "y"),
(Map[Int, Int](1 -> 1, 2 -> 2, 3 -> 3), "z"),
(null, "empty")
).toDF("a", "b")
checkAnswer(
df.select(size($"a")),
Seq(Row(2), Row(0), Row(3), Row(-1))
)
checkAnswer(
df.selectExpr("size(a)"),
Seq(Row(2), Row(0), Row(3), Row(-1))
)
}
test("map_keys/map_values function") {
val df = Seq(
(Map[Int, Int](1 -> 100, 2 -> 200), "x"),
(Map[Int, Int](), "y"),
(Map[Int, Int](1 -> 100, 2 -> 200, 3 -> 300), "z")
).toDF("a", "b")
checkAnswer(
df.selectExpr("map_keys(a)"),
Seq(Row(Seq(1, 2)), Row(Seq.empty), Row(Seq(1, 2, 3)))
)
checkAnswer(
df.selectExpr("map_values(a)"),
Seq(Row(Seq(100, 200)), Row(Seq.empty), Row(Seq(100, 200, 300)))
)
}
test("map_entries") {
val dummyFilter = (c: Column) => c.isNotNull || c.isNull
// Primitive-type elements
val idf = Seq(
Map[Int, Int](1 -> 100, 2 -> 200, 3 -> 300),
Map[Int, Int](),
null
).toDF("m")
val iExpected = Seq(
Row(Seq(Row(1, 100), Row(2, 200), Row(3, 300))),
Row(Seq.empty),
Row(null)
)
checkAnswer(idf.select(map_entries('m)), iExpected)
checkAnswer(idf.selectExpr("map_entries(m)"), iExpected)
checkAnswer(idf.filter(dummyFilter('m)).select(map_entries('m)), iExpected)
checkAnswer(
spark.range(1).selectExpr("map_entries(map(1, null, 2, null))"),
Seq(Row(Seq(Row(1, null), Row(2, null)))))
checkAnswer(
spark.range(1).filter(dummyFilter('id)).selectExpr("map_entries(map(1, null, 2, null))"),
Seq(Row(Seq(Row(1, null), Row(2, null)))))
// Non-primitive-type elements
val sdf = Seq(
Map[String, String]("a" -> "f", "b" -> "o", "c" -> "o"),
Map[String, String]("a" -> null, "b" -> null),
Map[String, String](),
null
).toDF("m")
val sExpected = Seq(
Row(Seq(Row("a", "f"), Row("b", "o"), Row("c", "o"))),
Row(Seq(Row("a", null), Row("b", null))),
Row(Seq.empty),
Row(null)
)
checkAnswer(sdf.select(map_entries('m)), sExpected)
checkAnswer(sdf.selectExpr("map_entries(m)"), sExpected)
checkAnswer(sdf.filter(dummyFilter('m)).select(map_entries('m)), sExpected)
}
test("array contains function") {
val df = Seq(
(Seq[Int](1, 2), "x"),
(Seq[Int](), "x")
).toDF("a", "b")
// Simple test cases
checkAnswer(
df.select(array_contains(df("a"), 1)),
Seq(Row(true), Row(false))
)
checkAnswer(
df.selectExpr("array_contains(a, 1)"),
Seq(Row(true), Row(false))
)
// In hive, this errors because null has no type information
intercept[AnalysisException] {
df.select(array_contains(df("a"), null))
}
intercept[AnalysisException] {
df.selectExpr("array_contains(a, null)")
}
intercept[AnalysisException] {
df.selectExpr("array_contains(null, 1)")
}
checkAnswer(
df.selectExpr("array_contains(array(array(1), null)[0], 1)"),
Seq(Row(true), Row(true))
)
checkAnswer(
df.selectExpr("array_contains(array(1, null), array(1, null)[0])"),
Seq(Row(true), Row(true))
)
}
test("arrays_overlap function") {
val df = Seq(
(Seq[Option[Int]](Some(1), Some(2)), Seq[Option[Int]](Some(-1), Some(10))),
(Seq[Option[Int]](Some(1), Some(2)), Seq[Option[Int]](Some(-1), None)),
(Seq[Option[Int]](Some(3), Some(2)), Seq[Option[Int]](Some(1), Some(2)))
).toDF("a", "b")
val answer = Seq(Row(false), Row(null), Row(true))
checkAnswer(df.select(arrays_overlap(df("a"), df("b"))), answer)
checkAnswer(df.selectExpr("arrays_overlap(a, b)"), answer)
checkAnswer(
Seq((Seq(1, 2, 3), Seq(2.0, 2.5))).toDF("a", "b").selectExpr("arrays_overlap(a, b)"),
Row(true))
intercept[AnalysisException] {
sql("select arrays_overlap(array(1, 2, 3), array('a', 'b', 'c'))")
}
intercept[AnalysisException] {
sql("select arrays_overlap(null, null)")
}
intercept[AnalysisException] {
sql("select arrays_overlap(map(1, 2), map(3, 4))")
}
}
test("slice function") {
val df = Seq(
Seq(1, 2, 3),
Seq(4, 5)
).toDF("x")
val answer = Seq(Row(Seq(2, 3)), Row(Seq(5)))
checkAnswer(df.select(slice(df("x"), 2, 2)), answer)
checkAnswer(df.selectExpr("slice(x, 2, 2)"), answer)
val answerNegative = Seq(Row(Seq(3)), Row(Seq(5)))
checkAnswer(df.select(slice(df("x"), -1, 1)), answerNegative)
checkAnswer(df.selectExpr("slice(x, -1, 1)"), answerNegative)
}
test("array_join function") {
val df = Seq(
(Seq[String]("a", "b"), ","),
(Seq[String]("a", null, "b"), ","),
(Seq.empty[String], ",")
).toDF("x", "delimiter")
checkAnswer(
df.select(array_join(df("x"), ";")),
Seq(Row("a;b"), Row("a;b"), Row(""))
)
checkAnswer(
df.select(array_join(df("x"), ";", "NULL")),
Seq(Row("a;b"), Row("a;NULL;b"), Row(""))
)
checkAnswer(
df.selectExpr("array_join(x, delimiter)"),
Seq(Row("a,b"), Row("a,b"), Row("")))
checkAnswer(
df.selectExpr("array_join(x, delimiter, 'NULL')"),
Seq(Row("a,b"), Row("a,NULL,b"), Row("")))
}
test("array_min function") {
val df = Seq(
Seq[Option[Int]](Some(1), Some(3), Some(2)),
Seq.empty[Option[Int]],
Seq[Option[Int]](None),
Seq[Option[Int]](None, Some(1), Some(-100))
).toDF("a")
val answer = Seq(Row(1), Row(null), Row(null), Row(-100))
checkAnswer(df.select(array_min(df("a"))), answer)
checkAnswer(df.selectExpr("array_min(a)"), answer)
}
test("array_max function") {
val df = Seq(
Seq[Option[Int]](Some(1), Some(3), Some(2)),
Seq.empty[Option[Int]],
Seq[Option[Int]](None),
Seq[Option[Int]](None, Some(1), Some(-100))
).toDF("a")
val answer = Seq(Row(3), Row(null), Row(null), Row(1))
checkAnswer(df.select(array_max(df("a"))), answer)
checkAnswer(df.selectExpr("array_max(a)"), answer)
}
test("reverse function") {
val dummyFilter = (c: Column) => c.isNull || c.isNotNull // switch codegen on
// String test cases
val oneRowDF = Seq(("Spark", 3215)).toDF("s", "i")
checkAnswer(
oneRowDF.select(reverse('s)),
Seq(Row("krapS"))
)
checkAnswer(
oneRowDF.selectExpr("reverse(s)"),
Seq(Row("krapS"))
)
checkAnswer(
oneRowDF.select(reverse('i)),
Seq(Row("5123"))
)
checkAnswer(
oneRowDF.selectExpr("reverse(i)"),
Seq(Row("5123"))
)
checkAnswer(
oneRowDF.selectExpr("reverse(null)"),
Seq(Row(null))
)
// Array test cases (primitive-type elements)
val idf = Seq(
Seq(1, 9, 8, 7),
Seq(5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
checkAnswer(
idf.select(reverse('i)),
Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null))
)
checkAnswer(
idf.filter(dummyFilter('i)).select(reverse('i)),
Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null))
)
checkAnswer(
idf.selectExpr("reverse(i)"),
Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null))
)
checkAnswer(
oneRowDF.selectExpr("reverse(array(1, null, 2, null))"),
Seq(Row(Seq(null, 2, null, 1)))
)
checkAnswer(
oneRowDF.filter(dummyFilter('i)).selectExpr("reverse(array(1, null, 2, null))"),
Seq(Row(Seq(null, 2, null, 1)))
)
// Array test cases (non-primitive-type elements)
val sdf = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("s")
checkAnswer(
sdf.select(reverse('s)),
Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null))
)
checkAnswer(
sdf.filter(dummyFilter('s)).select(reverse('s)),
Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null))
)
checkAnswer(
sdf.selectExpr("reverse(s)"),
Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null))
)
checkAnswer(
oneRowDF.selectExpr("reverse(array(array(1, 2), array(3, 4)))"),
Seq(Row(Seq(Seq(3, 4), Seq(1, 2))))
)
checkAnswer(
oneRowDF.filter(dummyFilter('s)).selectExpr("reverse(array(array(1, 2), array(3, 4)))"),
Seq(Row(Seq(Seq(3, 4), Seq(1, 2))))
)
// Error test cases
intercept[AnalysisException] {
oneRowDF.selectExpr("reverse(struct(1, 'a'))")
}
intercept[AnalysisException] {
oneRowDF.selectExpr("reverse(map(1, 'a'))")
}
}
test("array position function") {
val df = Seq(
(Seq[Int](1, 2), "x"),
(Seq[Int](), "x")
).toDF("a", "b")
checkAnswer(
df.select(array_position(df("a"), 1)),
Seq(Row(1L), Row(0L))
)
checkAnswer(
df.selectExpr("array_position(a, 1)"),
Seq(Row(1L), Row(0L))
)
checkAnswer(
df.select(array_position(df("a"), null)),
Seq(Row(null), Row(null))
)
checkAnswer(
df.selectExpr("array_position(a, null)"),
Seq(Row(null), Row(null))
)
checkAnswer(
df.selectExpr("array_position(array(array(1), null)[0], 1)"),
Seq(Row(1L), Row(1L))
)
checkAnswer(
df.selectExpr("array_position(array(1, null), array(1, null)[0])"),
Seq(Row(1L), Row(1L))
)
val e = intercept[AnalysisException] {
Seq(("a string element", "a")).toDF().selectExpr("array_position(_1, _2)")
}
assert(e.message.contains("argument 1 requires array type, however, '`_1`' is of string type"))
}
test("element_at function") {
val df = Seq(
(Seq[String]("1", "2", "3")),
(Seq[String](null, "")),
(Seq[String]())
).toDF("a")
intercept[Exception] {
checkAnswer(
df.select(element_at(df("a"), 0)),
Seq(Row(null), Row(null), Row(null))
)
}.getMessage.contains("SQL array indices start at 1")
intercept[Exception] {
checkAnswer(
df.select(element_at(df("a"), 1.1)),
Seq(Row(null), Row(null), Row(null))
)
}
checkAnswer(
df.select(element_at(df("a"), 4)),
Seq(Row(null), Row(null), Row(null))
)
checkAnswer(
df.select(element_at(df("a"), 1)),
Seq(Row("1"), Row(null), Row(null))
)
checkAnswer(
df.select(element_at(df("a"), -1)),
Seq(Row("3"), Row(""), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, 4)"),
Seq(Row(null), Row(null), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, 1)"),
Seq(Row("1"), Row(null), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, -1)"),
Seq(Row("3"), Row(""), Row(null))
)
val e = intercept[AnalysisException] {
Seq(("a string element", 1)).toDF().selectExpr("element_at(_1, _2)")
}
assert(e.message.contains(
"argument 1 requires (array or map) type, however, '`_1`' is of string type"))
}
test("concat function - arrays") {
val nseqi : Seq[Int] = null
val nseqs : Seq[String] = null
val df = Seq(
(Seq(1), Seq(2, 3), Seq(5L, 6L), nseqi, Seq("a", "b", "c"), Seq("d", "e"), Seq("f"), nseqs),
(Seq(1, 0), Seq.empty[Int], Seq(2L), nseqi, Seq("a"), Seq.empty[String], Seq(null), nseqs)
).toDF("i1", "i2", "i3", "in", "s1", "s2", "s3", "sn")
val dummyFilter = (c: Column) => c.isNull || c.isNotNull // switch codeGen on
// Simple test cases
checkAnswer(
df.selectExpr("array(1, 2, 3L)"),
Seq(Row(Seq(1L, 2L, 3L)), Row(Seq(1L, 2L, 3L)))
)
checkAnswer (
df.select(concat($"i1", $"s1")),
Seq(Row(Seq("1", "a", "b", "c")), Row(Seq("1", "0", "a")))
)
checkAnswer(
df.select(concat($"i1", $"i2", $"i3")),
Seq(Row(Seq(1, 2, 3, 5, 6)), Row(Seq(1, 0, 2)))
)
checkAnswer(
df.filter(dummyFilter($"i1")).select(concat($"i1", $"i2", $"i3")),
Seq(Row(Seq(1, 2, 3, 5, 6)), Row(Seq(1, 0, 2)))
)
checkAnswer(
df.selectExpr("concat(array(1, null), i2, i3)"),
Seq(Row(Seq(1, null, 2, 3, 5, 6)), Row(Seq(1, null, 2)))
)
checkAnswer(
df.select(concat($"s1", $"s2", $"s3")),
Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null)))
)
checkAnswer(
df.selectExpr("concat(s1, s2, s3)"),
Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null)))
)
checkAnswer(
df.filter(dummyFilter($"s1"))select(concat($"s1", $"s2", $"s3")),
Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null)))
)
// Null test cases
checkAnswer(
df.select(concat($"i1", $"in")),
Seq(Row(null), Row(null))
)
checkAnswer(
df.select(concat($"in", $"i1")),
Seq(Row(null), Row(null))
)
checkAnswer(
df.select(concat($"s1", $"sn")),
Seq(Row(null), Row(null))
)
checkAnswer(
df.select(concat($"sn", $"s1")),
Seq(Row(null), Row(null))
)
// Type error test cases
intercept[AnalysisException] {
df.selectExpr("concat(i1, i2, null)")
}
intercept[AnalysisException] {
df.selectExpr("concat(i1, array(i1, i2))")
}
val e = intercept[AnalysisException] {
df.selectExpr("concat(map(1, 2), map(3, 4))")
}
assert(e.getMessage.contains("string, binary or array"))
}
test("flatten function") {
val dummyFilter = (c: Column) => c.isNull || c.isNotNull // to switch codeGen on
val oneRowDF = Seq((1, "a", Seq(1, 2, 3))).toDF("i", "s", "arr")
// Test cases with a primitive type
val intDF = Seq(
(Seq(Seq(1, 2, 3), Seq(4, 5), Seq(6))),
(Seq(Seq(1, 2))),
(Seq(Seq(1), Seq.empty)),
(Seq(Seq.empty, Seq(1))),
(Seq(Seq.empty, Seq.empty)),
(Seq(Seq(1), null)),
(Seq(null, Seq(1))),
(Seq(null, null))
).toDF("i")
val intDFResult = Seq(
Row(Seq(1, 2, 3, 4, 5, 6)),
Row(Seq(1, 2)),
Row(Seq(1)),
Row(Seq(1)),
Row(Seq.empty),
Row(null),
Row(null),
Row(null))
checkAnswer(intDF.select(flatten($"i")), intDFResult)
checkAnswer(intDF.filter(dummyFilter($"i"))select(flatten($"i")), intDFResult)
checkAnswer(intDF.selectExpr("flatten(i)"), intDFResult)
checkAnswer(
oneRowDF.selectExpr("flatten(array(arr, array(null, 5), array(6, null)))"),
Seq(Row(Seq(1, 2, 3, null, 5, 6, null))))
// Test cases with non-primitive types
val strDF = Seq(
(Seq(Seq("a", "b"), Seq("c"), Seq("d", "e", "f"))),
(Seq(Seq("a", "b"))),
(Seq(Seq("a", null), Seq(null, "b"), Seq(null, null))),
(Seq(Seq("a"), Seq.empty)),
(Seq(Seq.empty, Seq("a"))),
(Seq(Seq.empty, Seq.empty)),
(Seq(Seq("a"), null)),
(Seq(null, Seq("a"))),
(Seq(null, null))
).toDF("s")
val strDFResult = Seq(
Row(Seq("a", "b", "c", "d", "e", "f")),
Row(Seq("a", "b")),
Row(Seq("a", null, null, "b", null, null)),
Row(Seq("a")),
Row(Seq("a")),
Row(Seq.empty),
Row(null),
Row(null),
Row(null))
checkAnswer(strDF.select(flatten($"s")), strDFResult)
checkAnswer(strDF.filter(dummyFilter($"s")).select(flatten($"s")), strDFResult)
checkAnswer(strDF.selectExpr("flatten(s)"), strDFResult)
checkAnswer(
oneRowDF.selectExpr("flatten(array(array(arr, arr), array(arr)))"),
Seq(Row(Seq(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3)))))
// Error test cases
intercept[AnalysisException] {
oneRowDF.select(flatten($"arr"))
}
intercept[AnalysisException] {
oneRowDF.select(flatten($"i"))
}
intercept[AnalysisException] {
oneRowDF.select(flatten($"s"))
}
intercept[AnalysisException] {
oneRowDF.selectExpr("flatten(null)")
}
}
test("array_repeat function") {
val dummyFilter = (c: Column) => c.isNull || c.isNotNull // to switch codeGen on
val strDF = Seq(
("hi", 2),
(null, 2)
).toDF("a", "b")
val strDFTwiceResult = Seq(
Row(Seq("hi", "hi")),
Row(Seq(null, null))
)
checkAnswer(strDF.select(array_repeat($"a", 2)), strDFTwiceResult)
checkAnswer(strDF.filter(dummyFilter($"a")).select(array_repeat($"a", 2)), strDFTwiceResult)
checkAnswer(strDF.select(array_repeat($"a", $"b")), strDFTwiceResult)
checkAnswer(strDF.filter(dummyFilter($"a")).select(array_repeat($"a", $"b")), strDFTwiceResult)
checkAnswer(strDF.selectExpr("array_repeat(a, 2)"), strDFTwiceResult)
checkAnswer(strDF.selectExpr("array_repeat(a, b)"), strDFTwiceResult)
val intDF = {
val schema = StructType(Seq(
StructField("a", IntegerType),
StructField("b", IntegerType)))
val data = Seq(
Row(3, 2),
Row(null, 2)
)
spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
}
val intDFTwiceResult = Seq(
Row(Seq(3, 3)),
Row(Seq(null, null))
)
checkAnswer(intDF.select(array_repeat($"a", 2)), intDFTwiceResult)
checkAnswer(intDF.filter(dummyFilter($"a")).select(array_repeat($"a", 2)), intDFTwiceResult)
checkAnswer(intDF.select(array_repeat($"a", $"b")), intDFTwiceResult)
checkAnswer(intDF.filter(dummyFilter($"a")).select(array_repeat($"a", $"b")), intDFTwiceResult)
checkAnswer(intDF.selectExpr("array_repeat(a, 2)"), intDFTwiceResult)
checkAnswer(intDF.selectExpr("array_repeat(a, b)"), intDFTwiceResult)
val nullCountDF = {
val schema = StructType(Seq(
StructField("a", StringType),
StructField("b", IntegerType)))
val data = Seq(
Row("hi", null),
Row(null, null)
)
spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
}
checkAnswer(
nullCountDF.select(array_repeat($"a", $"b")),
Seq(
Row(null),
Row(null)
)
)
// Error test cases
val invalidTypeDF = Seq(("hi", "1")).toDF("a", "b")
intercept[AnalysisException] {
invalidTypeDF.select(array_repeat($"a", $"b"))
}
intercept[AnalysisException] {
invalidTypeDF.select(array_repeat($"a", lit("1")))
}
intercept[AnalysisException] {
invalidTypeDF.selectExpr("array_repeat(a, 1.0)")
}
}
test("array remove") {
val df = Seq(
(Array[Int](2, 1, 2, 3), Array("a", "b", "c", "a"), Array("", "")),
(Array.empty[Int], Array.empty[String], Array.empty[String]),
(null, null, null)
).toDF("a", "b", "c")
checkAnswer(
df.select(array_remove($"a", 2), array_remove($"b", "a"), array_remove($"c", "")),
Seq(
Row(Seq(1, 3), Seq("b", "c"), Seq.empty[String]),
Row(Seq.empty[Int], Seq.empty[String], Seq.empty[String]),
Row(null, null, null))
)
checkAnswer(
df.selectExpr("array_remove(a, 2)", "array_remove(b, \\"a\\")",
"array_remove(c, \\"\\")"),
Seq(
Row(Seq(1, 3), Seq("b", "c"), Seq.empty[String]),
Row(Seq.empty[Int], Seq.empty[String], Seq.empty[String]),
Row(null, null, null))
)
val e = intercept[AnalysisException] {
Seq(("a string element", "a")).toDF().selectExpr("array_remove(_1, _2)")
}
assert(e.message.contains("argument 1 requires array type, however, '`_1`' is of string type"))
}
private def assertValuesDoNotChangeAfterCoalesceOrUnion(v: Column): Unit = {
import DataFrameFunctionsSuite.CodegenFallbackExpr
for ((codegenFallback, wholeStage) <- Seq((true, false), (false, false), (false, true))) {
val c = if (codegenFallback) {
Column(CodegenFallbackExpr(v.expr))
} else {
v
}
withSQLConf(
(SQLConf.CODEGEN_FALLBACK.key, codegenFallback.toString),
(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStage.toString)) {
val df = spark.range(0, 4, 1, 4).withColumn("c", c)
val rows = df.collect()
val rowsAfterCoalesce = df.coalesce(2).collect()
assert(rows === rowsAfterCoalesce, "Values changed after coalesce when " +
s"codegenFallback=$codegenFallback and wholeStage=$wholeStage.")
val df1 = spark.range(0, 2, 1, 2).withColumn("c", c)
val rows1 = df1.collect()
val df2 = spark.range(2, 4, 1, 2).withColumn("c", c)
val rows2 = df2.collect()
val rowsAfterUnion = df1.union(df2).collect()
assert(rowsAfterUnion === rows1 ++ rows2, "Values changed after union when " +
s"codegenFallback=$codegenFallback and wholeStage=$wholeStage.")
}
}
}
test("SPARK-14393: values generated by non-deterministic functions shouldn't change after " +
"coalesce or union") {
Seq(
monotonically_increasing_id(), spark_partition_id(),
rand(Random.nextLong()), randn(Random.nextLong())
).foreach(assertValuesDoNotChangeAfterCoalesceOrUnion(_))
}
test("SPARK-21281 use string types by default if array and map have no argument") {
val ds = spark.range(1)
var expectedSchema = new StructType()
.add("x", ArrayType(StringType, containsNull = false), nullable = false)
assert(ds.select(array().as("x")).schema == expectedSchema)
expectedSchema = new StructType()
.add("x", MapType(StringType, StringType, valueContainsNull = false), nullable = false)
assert(ds.select(map().as("x")).schema == expectedSchema)
}
test("SPARK-21281 fails if functions have no argument") {
val df = Seq(1).toDF("a")
val funcsMustHaveAtLeastOneArg =
("coalesce", (df: DataFrame) => df.select(coalesce())) ::
("coalesce", (df: DataFrame) => df.selectExpr("coalesce()")) ::
("named_struct", (df: DataFrame) => df.select(struct())) ::
("named_struct", (df: DataFrame) => df.selectExpr("named_struct()")) ::
("hash", (df: DataFrame) => df.select(hash())) ::
("hash", (df: DataFrame) => df.selectExpr("hash()")) :: Nil
funcsMustHaveAtLeastOneArg.foreach { case (name, func) =>
val errMsg = intercept[AnalysisException] { func(df) }.getMessage
assert(errMsg.contains(s"input to function $name requires at least one argument"))
}
val funcsMustHaveAtLeastTwoArgs =
("greatest", (df: DataFrame) => df.select(greatest())) ::
("greatest", (df: DataFrame) => df.selectExpr("greatest()")) ::
("least", (df: DataFrame) => df.select(least())) ::
("least", (df: DataFrame) => df.selectExpr("least()")) :: Nil
funcsMustHaveAtLeastTwoArgs.foreach { case (name, func) =>
val errMsg = intercept[AnalysisException] { func(df) }.getMessage
assert(errMsg.contains(s"input to function $name requires at least two arguments"))
}
}
}
object DataFrameFunctionsSuite {
case class CodegenFallbackExpr(child: Expression) extends Expression with CodegenFallback {
override def children: Seq[Expression] = Seq(child)
override def nullable: Boolean = child.nullable
override def dataType: DataType = child.dataType
override lazy val resolved = true
override def eval(input: InternalRow): Any = child.eval(input)
}
}
|
lxsmnv/spark
|
sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
|
Scala
|
apache-2.0
| 43,430
|
package com.chaos.pingplusplus.exception
/**
* Created by zcfrank1st on 11/14/14.
*/
class APIConnectionException(message: String, cause: Throwable = null) extends PingappException(message, cause){}
|
zcfrank1st/chaos-pingapp-scala
|
src/main/scala/com/chaos/pingplusplus/exception/APIConnectionException.scala
|
Scala
|
mit
| 202
|
// Copyright (c) 2014 The omegaUp Contributors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import com.omegaup.libinteractive.target._
import com.omegaup.libinteractive.idl.Parser
import java.io.IOException
import java.io.BufferedReader
import java.io.InputStreamReader
import java.nio.charset.Charset
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.Files
import java.nio.file.FileVisitResult
import java.nio.file.Path
import java.nio.file.Paths
import java.nio.file.SimpleFileVisitor
import scala.io.Source
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConversions.asJavaIterable
import org.scalatest._
object Transact extends Tag("com.omegaup.libinteractive.Transact")
class TargetSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
val testRoot = Paths.get(".tests")
val parentLanguages = List("cpp", "java", "py")
val childLanguages =
if (System.getProperty("os.name").toLowerCase.startsWith("mac"))
List("c", "cpp", "java", "py")
else
List("c", "cpp", "java", "py", "pas", "cs")
val transactSupportedLanguages =
Set("c", "cpp", "java", "py", "cs")
override def beforeAll() = {
if (Files.exists(testRoot)) {
Files.walkFileTree(testRoot, new SimpleFileVisitor[Path] {
override def visitFile(file: Path, attrs: BasicFileAttributes):
FileVisitResult = {
Files.delete(file)
FileVisitResult.CONTINUE
}
override def postVisitDirectory(dir: Path, exc: IOException):
FileVisitResult = {
Files.delete(dir)
FileVisitResult.CONTINUE
}
})
}
Files.createDirectory(testRoot)
}
def deploy(path: Path) = {
val resource = getClass.getResource(path.toString)
val deployPath = testRoot.resolve(path)
if (resource != null) {
val source = Source.fromURL(resource)
if (!Files.exists(deployPath.getParent)) {
Files.createDirectories(deployPath.getParent)
}
Files.write(deployPath, source.getLines.toIterable, Charset.forName("UTF-8"))
}
deployPath
}
def run(parentLang: String, childLang: String, path: Path, output: String,
optionsTemplate: Options = Options()) = {
val moduleName = path.getName(path.getNameCount - 1).toString
val idlFile = deploy(path.resolve(s"$moduleName.idl"))
val root = testRoot.resolve(path).resolve(s"${parentLang}_${childLang}")
val options = optionsTemplate.copy(
parentLang = parentLang,
childLang = childLang,
idlFile = idlFile,
makefile = true,
moduleName = moduleName,
root = root.toAbsolutePath,
quiet = true
)
val parser = new Parser
val idl = parser.parse(Source.fromFile(idlFile.toFile).mkString)
val installer = new InstallVisitor
val problemsetter = deploy(path.resolve(
s"${idl.main.name}.${options.parentLang}")).toAbsolutePath
val contestant = deploy(path.resolve(
s"${options.moduleName}.${options.childLang}")).toAbsolutePath
installer.apply(new OutputDirectory(Paths.get(".")))
Generator.generate(idl, options, problemsetter, contestant).foreach(
installer.apply)
Files.copy(problemsetter, root.resolve(problemsetter.getFileName))
Files.copy(contestant, root.resolve(contestant.getFileName))
val process = Runtime.getRuntime.exec(Array(
"/usr/bin/make", "-s", "run", "-C", root.toString
))
val reader = new BufferedReader(new InputStreamReader(process.getInputStream))
val lines = ListBuffer.empty[String]
var line: String = null
while ( { line = reader.readLine ; line != null } ) {
lines += line
}
withClue(root.toString) {
lines.mkString("\\n") should equal (output)
process.waitFor should be (0)
}
}
def runDirectory(directory: Path) = {
val output = Source.fromFile(deploy(directory.resolve("output")).toFile).mkString.trim
for (lang <- parentLanguages) {
run(lang, "c", directory, output)
}
for (lang <- childLanguages) {
run("c", lang, directory, output)
}
}
"libinteractive" should "support multi-process targets" in {
runDirectory(Paths.get("mega"))
}
"libinteractive" should "produce working templates" in {
val directory = Paths.get("templates")
val output = Source.fromFile(deploy(directory.resolve("output")).toFile).mkString.trim
for (lang <- childLanguages) {
run("c", lang, directory, output, Options(generateTemplate = true, verbose = true))
}
}
"libinteractive" should "support transact" taggedAs(Transact) in {
assume(Files.isDirectory(Paths.get("/sys/module/transact")),
"The 'transact' module is not loaded")
val directory = Paths.get("templates_transact")
val output = Source.fromFile(deploy(directory.resolve("output")).toFile).mkString.trim
for (lang <- childLanguages.filter(transactSupportedLanguages)) {
run("c", lang, directory, output, Options(generateTemplate = true, verbose = true, transact = true))
}
}
}
/* vim: set noexpandtab: */
|
omegaup/libinteractive
|
src/test/scala/TargetSpec.scala
|
Scala
|
bsd-3-clause
| 4,925
|
package com.cloudera.hue.livy.server
import com.cloudera.hue.livy.msgs.ExecuteRequest
import org.json4s.JValue
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}
import scala.util.{Failure, Success}
class Statement(val id: Int, val request: ExecuteRequest, val output: Future[JValue]) {
sealed trait State
case class Running() extends State
case class Available() extends State
case class Error() extends State
protected implicit def executor: ExecutionContextExecutor = ExecutionContext.global
private[this] var _state: State = Running()
def state = _state
output.onComplete {
case Success(_) => _state = Available()
case Failure(_) => _state = Error()
}
}
|
erickt/hue
|
apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/Statement.scala
|
Scala
|
apache-2.0
| 717
|
/*
Copyright (c) 2009, 2010 Hanno Braun <mail@hannobraun.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hannobraun.sd.math
object Scalar {
implicit def doubleToScalar( d: Double ) = new Scalar( d )
implicit def intToScalar( i: Int ) = new Scalar( i )
}
/**
* Represents a scalar number.
* This class is not meant to be used directly, as Double is used to represent scalar values. The sole
* purpose of this class is to allow - in combination with the implicit conversions defined in the companion
* object - to write expressions like "2 * vec", in addition "vec * 2".
*/
class Scalar( value: Double ) {
def * ( vector: Vector2 ) = vector * value
}
|
hannobraun/ScalableDynamics
|
src/main/scala/com/hannobraun/sd/math/Scalar.scala
|
Scala
|
apache-2.0
| 1,172
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Locale
import java.util.function.Supplier
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.spark.broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* An interface for those physical operators that support codegen.
*/
trait CodegenSupport extends SparkPlan {
/** Prefix used in the current operator's variable names. */
private def variablePrefix: String = this match {
case _: HashAggregateExec => "agg"
case _: BroadcastHashJoinExec => "bhj"
case _: SortMergeJoinExec => "smj"
case _: RDDScanExec => "rdd"
case _: DataSourceScanExec => "scan"
case _ => nodeName.toLowerCase(Locale.ROOT)
}
/**
* Creates a metric using the specified name.
*
* @return name of the variable representing the metric
*/
def metricTerm(ctx: CodegenContext, name: String): String = {
ctx.addReferenceObj(name, longMetric(name))
}
/**
* Whether this SparkPlan supports whole stage codegen or not.
*/
def supportCodegen: Boolean = true
/**
* Which SparkPlan is calling produce() of this one. It's itself for the first SparkPlan.
*/
protected var parent: CodegenSupport = null
/**
* Returns all the RDDs of InternalRow which generates the input rows.
*
* @note Right now we support up to two RDDs
*/
def inputRDDs(): Seq[RDD[InternalRow]]
/**
* Returns Java source code to process the rows from input RDD.
*/
final def produce(ctx: CodegenContext, parent: CodegenSupport): String = executeQuery {
this.parent = parent
ctx.freshNamePrefix = variablePrefix
s"""
|${ctx.registerComment(s"PRODUCE: ${this.simpleString}")}
|${doProduce(ctx)}
""".stripMargin
}
/**
* Generate the Java source code to process, should be overridden by subclass to support codegen.
*
* doProduce() usually generate the framework, for example, aggregation could generate this:
*
* if (!initialized) {
* # create a hash map, then build the aggregation hash map
* # call child.produce()
* initialized = true;
* }
* while (hashmap.hasNext()) {
* row = hashmap.next();
* # build the aggregation results
* # create variables for results
* # call consume(), which will call parent.doConsume()
* if (shouldStop()) return;
* }
*/
protected def doProduce(ctx: CodegenContext): String
private def prepareRowVar(ctx: CodegenContext, row: String, colVars: Seq[ExprCode]): ExprCode = {
if (row != null) {
ExprCode.forNonNullValue(JavaCode.variable(row, classOf[UnsafeRow]))
} else {
if (colVars.nonEmpty) {
val colExprs = output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable)
}
val evaluateInputs = evaluateVariables(colVars)
// generate the code to create a UnsafeRow
ctx.INPUT_ROW = row
ctx.currentVars = colVars
val ev = GenerateUnsafeProjection.createCode(ctx, colExprs, false)
val code = code"""
|$evaluateInputs
|${ev.code}
""".stripMargin
ExprCode(code, FalseLiteral, ev.value)
} else {
// There are no columns
ExprCode.forNonNullValue(JavaCode.variable("unsafeRow", classOf[UnsafeRow]))
}
}
}
/**
* Consume the generated columns or row from current SparkPlan, call its parent's `doConsume()`.
*
* Note that `outputVars` and `row` can't both be null.
*/
final def consume(ctx: CodegenContext, outputVars: Seq[ExprCode], row: String = null): String = {
val inputVars =
if (outputVars != null) {
assert(outputVars.length == output.length)
// outputVars will be used to generate the code for UnsafeRow, so we should copy them
outputVars.map(_.copy())
} else {
assert(row != null, "outputVars and row cannot both be null.")
ctx.currentVars = null
ctx.INPUT_ROW = row
output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable).genCode(ctx)
}
}
val rowVar = prepareRowVar(ctx, row, outputVars)
// Set up the `currentVars` in the codegen context, as we generate the code of `inputVars`
// before calling `parent.doConsume`. We can't set up `INPUT_ROW`, because parent needs to
// generate code of `rowVar` manually.
ctx.currentVars = inputVars
ctx.INPUT_ROW = null
ctx.freshNamePrefix = parent.variablePrefix
val evaluated = evaluateRequiredVariables(output, inputVars, parent.usedInputs)
// Under certain conditions, we can put the logic to consume the rows of this operator into
// another function. So we can prevent a generated function too long to be optimized by JIT.
// The conditions:
// 1. The config "spark.sql.codegen.splitConsumeFuncByOperator" is enabled.
// 2. `inputVars` are all materialized. That is guaranteed to be true if the parent plan uses
// all variables in output (see `requireAllOutput`).
// 3. The number of output variables must less than maximum number of parameters in Java method
// declaration.
val confEnabled = SQLConf.get.wholeStageSplitConsumeFuncByOperator
val requireAllOutput = output.forall(parent.usedInputs.contains(_))
val paramLength = CodeGenerator.calculateParamLength(output) + (if (row != null) 1 else 0)
val consumeFunc = if (confEnabled && requireAllOutput
&& CodeGenerator.isValidParamLength(paramLength)) {
constructDoConsumeFunction(ctx, inputVars, row)
} else {
parent.doConsume(ctx, inputVars, rowVar)
}
s"""
|${ctx.registerComment(s"CONSUME: ${parent.simpleString}")}
|$evaluated
|$consumeFunc
""".stripMargin
}
/**
* To prevent concatenated function growing too long to be optimized by JIT. We can separate the
* parent's `doConsume` codes of a `CodegenSupport` operator into a function to call.
*/
private def constructDoConsumeFunction(
ctx: CodegenContext,
inputVars: Seq[ExprCode],
row: String): String = {
val (args, params, inputVarsInFunc) = constructConsumeParameters(ctx, output, inputVars, row)
val rowVar = prepareRowVar(ctx, row, inputVarsInFunc)
val doConsume = ctx.freshName("doConsume")
ctx.currentVars = inputVarsInFunc
ctx.INPUT_ROW = null
val doConsumeFuncName = ctx.addNewFunction(doConsume,
s"""
| private void $doConsume(${params.mkString(", ")}) throws java.io.IOException {
| ${parent.doConsume(ctx, inputVarsInFunc, rowVar)}
| }
""".stripMargin)
s"""
| $doConsumeFuncName(${args.mkString(", ")});
""".stripMargin
}
/**
* Returns arguments for calling method and method definition parameters of the consume function.
* And also returns the list of `ExprCode` for the parameters.
*/
private def constructConsumeParameters(
ctx: CodegenContext,
attributes: Seq[Attribute],
variables: Seq[ExprCode],
row: String): (Seq[String], Seq[String], Seq[ExprCode]) = {
val arguments = mutable.ArrayBuffer[String]()
val parameters = mutable.ArrayBuffer[String]()
val paramVars = mutable.ArrayBuffer[ExprCode]()
if (row != null) {
arguments += row
parameters += s"InternalRow $row"
}
variables.zipWithIndex.foreach { case (ev, i) =>
val paramName = ctx.freshName(s"expr_$i")
val paramType = CodeGenerator.javaType(attributes(i).dataType)
arguments += ev.value
parameters += s"$paramType $paramName"
val paramIsNull = if (!attributes(i).nullable) {
// Use constant `false` without passing `isNull` for non-nullable variable.
FalseLiteral
} else {
val isNull = ctx.freshName(s"exprIsNull_$i")
arguments += ev.isNull
parameters += s"boolean $isNull"
JavaCode.isNullVariable(isNull)
}
paramVars += ExprCode(paramIsNull, JavaCode.variable(paramName, attributes(i).dataType))
}
(arguments, parameters, paramVars)
}
/**
* Returns source code to evaluate all the variables, and clear the code of them, to prevent
* them to be evaluated twice.
*/
protected def evaluateVariables(variables: Seq[ExprCode]): String = {
val evaluate = variables.filter(_.code.nonEmpty).map(_.code.toString).mkString("\\n")
variables.foreach(_.code = EmptyBlock)
evaluate
}
/**
* Returns source code to evaluate the variables for required attributes, and clear the code
* of evaluated variables, to prevent them to be evaluated twice.
*/
protected def evaluateRequiredVariables(
attributes: Seq[Attribute],
variables: Seq[ExprCode],
required: AttributeSet): String = {
val evaluateVars = new StringBuilder
variables.zipWithIndex.foreach { case (ev, i) =>
if (ev.code.nonEmpty && required.contains(attributes(i))) {
evaluateVars.append(ev.code.toString + "\\n")
ev.code = EmptyBlock
}
}
evaluateVars.toString()
}
/**
* The subset of inputSet those should be evaluated before this plan.
*
* We will use this to insert some code to access those columns that are actually used by current
* plan before calling doConsume().
*/
def usedInputs: AttributeSet = references
/**
* Generate the Java source code to process the rows from child SparkPlan. This should only be
* called from `consume`.
*
* This should be override by subclass to support codegen.
*
* Note: The operator should not assume the existence of an outer processing loop,
* which it can jump from with "continue;"!
*
* For example, filter could generate this:
* # code to evaluate the predicate expression, result is isNull1 and value2
* if (!isNull1 && value2) {
* # call consume(), which will call parent.doConsume()
* }
*
* Note: A plan can either consume the rows as UnsafeRow (row), or a list of variables (input).
* When consuming as a listing of variables, the code to produce the input is already
* generated and `CodegenContext.currentVars` is already set. When consuming as UnsafeRow,
* implementations need to put `row.code` in the generated code and set
* `CodegenContext.INPUT_ROW` manually. Some plans may need more tweaks as they have
* different inputs(join build side, aggregate buffer, etc.), or other special cases.
*/
def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
throw new UnsupportedOperationException
}
/**
* Whether or not the result rows of this operator should be copied before putting into a buffer.
*
* If any operator inside WholeStageCodegen generate multiple rows from a single row (for
* example, Join), this should be true.
*
* If an operator starts a new pipeline, this should be false.
*/
def needCopyResult: Boolean = {
if (children.isEmpty) {
false
} else if (children.length == 1) {
children.head.asInstanceOf[CodegenSupport].needCopyResult
} else {
throw new UnsupportedOperationException
}
}
/**
* Whether or not the children of this operator should generate a stop check when consuming input
* rows. This is used to suppress shouldStop() in a loop of WholeStageCodegen.
*
* This should be false if an operator starts a new pipeline, which means it consumes all rows
* produced by children but doesn't output row to buffer by calling append(), so the children
* don't require shouldStop() in the loop of producing rows.
*/
def needStopCheck: Boolean = parent.needStopCheck
}
/**
* InputAdapter is used to hide a SparkPlan from a subtree that supports codegen.
*
* This is the leaf node of a tree with WholeStageCodegen that is used to generate code
* that consumes an RDD iterator of InternalRow.
*/
case class InputAdapter(child: SparkPlan) extends UnaryExecNode with CodegenSupport {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def doExecute(): RDD[InternalRow] = {
child.execute()
}
override def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
child.doExecuteBroadcast()
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.execute() :: Nil
}
override def doProduce(ctx: CodegenContext): String = {
// Right now, InputAdapter is only used when there is one input RDD.
// Inline mutable state since an InputAdapter is used once in a task for WholeStageCodegen
val input = ctx.addMutableState("scala.collection.Iterator", "input", v => s"$v = inputs[0];",
forceInline = true)
val row = ctx.freshName("row")
s"""
| while ($input.hasNext() && !stopEarly()) {
| InternalRow $row = (InternalRow) $input.next();
| ${consume(ctx, null, row).trim}
| if (shouldStop()) return;
| }
""".stripMargin
}
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
builder: StringBuilder,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false): StringBuilder = {
child.generateTreeString(depth, lastChildren, builder, verbose, "")
}
override def needCopyResult: Boolean = false
}
object WholeStageCodegenExec {
val PIPELINE_DURATION_METRIC = "duration"
private def numOfNestedFields(dataType: DataType): Int = dataType match {
case dt: StructType => dt.fields.map(f => numOfNestedFields(f.dataType)).sum
case m: MapType => numOfNestedFields(m.keyType) + numOfNestedFields(m.valueType)
case a: ArrayType => numOfNestedFields(a.elementType)
case u: UserDefinedType[_] => numOfNestedFields(u.sqlType)
case _ => 1
}
def isTooManyFields(conf: SQLConf, dataType: DataType): Boolean = {
numOfNestedFields(dataType) > conf.wholeStageMaxNumFields
}
}
object WholeStageCodegenId {
// codegenStageId: ID for codegen stages within a query plan.
// It does not affect equality, nor does it participate in destructuring pattern matching
// of WholeStageCodegenExec.
//
// This ID is used to help differentiate between codegen stages. It is included as a part
// of the explain output for physical plans, e.g.
//
// == Physical Plan ==
// *(5) SortMergeJoin [x#3L], [y#9L], Inner
// :- *(2) Sort [x#3L ASC NULLS FIRST], false, 0
// : +- Exchange hashpartitioning(x#3L, 200)
// : +- *(1) Project [(id#0L % 2) AS x#3L]
// : +- *(1) Filter isnotnull((id#0L % 2))
// : +- *(1) Range (0, 5, step=1, splits=8)
// +- *(4) Sort [y#9L ASC NULLS FIRST], false, 0
// +- Exchange hashpartitioning(y#9L, 200)
// +- *(3) Project [(id#6L % 2) AS y#9L]
// +- *(3) Filter isnotnull((id#6L % 2))
// +- *(3) Range (0, 5, step=1, splits=8)
//
// where the ID makes it obvious that not all adjacent codegen'd plan operators are of the
// same codegen stage.
//
// The codegen stage ID is also optionally included in the name of the generated classes as
// a suffix, so that it's easier to associate a generated class back to the physical operator.
// This is controlled by SQLConf: spark.sql.codegen.useIdInClassName
//
// The ID is also included in various log messages.
//
// Within a query, a codegen stage in a plan starts counting from 1, in "insertion order".
// WholeStageCodegenExec operators are inserted into a plan in depth-first post-order.
// See CollapseCodegenStages.insertWholeStageCodegen for the definition of insertion order.
//
// 0 is reserved as a special ID value to indicate a temporary WholeStageCodegenExec object
// is created, e.g. for special fallback handling when an existing WholeStageCodegenExec
// failed to generate/compile code.
private val codegenStageCounter = ThreadLocal.withInitial(new Supplier[Integer] {
override def get() = 1 // TODO: change to Scala lambda syntax when upgraded to Scala 2.12+
})
def resetPerQuery(): Unit = codegenStageCounter.set(1)
def getNextStageId(): Int = {
val counter = codegenStageCounter
val id = counter.get()
counter.set(id + 1)
id
}
}
/**
* WholeStageCodegen compiles a subtree of plans that support codegen together into single Java
* function.
*
* Here is the call graph of to generate Java source (plan A supports codegen, but plan B does not):
*
* WholeStageCodegen Plan A FakeInput Plan B
* =========================================================================
*
* -> execute()
* |
* doExecute() ---------> inputRDDs() -------> inputRDDs() ------> execute()
* |
* +-----------------> produce()
* |
* doProduce() -------> produce()
* |
* doProduce()
* |
* doConsume() <--------- consume()
* |
* doConsume() <-------- consume()
*
* SparkPlan A should override `doProduce()` and `doConsume()`.
*
* `doCodeGen()` will create a `CodeGenContext`, which will hold a list of variables for input,
* used to generated code for [[BoundReference]].
*/
case class WholeStageCodegenExec(child: SparkPlan)(val codegenStageId: Int)
extends UnaryExecNode with CodegenSupport {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override lazy val metrics = Map(
"pipelineTime" -> SQLMetrics.createTimingMetric(sparkContext,
WholeStageCodegenExec.PIPELINE_DURATION_METRIC))
def generatedClassName(): String = if (conf.wholeStageUseIdInClassName) {
s"GeneratedIteratorForCodegenStage$codegenStageId"
} else {
"GeneratedIterator"
}
/**
* Generates code for this subtree.
*
* @return the tuple of the codegen context and the actual generated source.
*/
def doCodeGen(): (CodegenContext, CodeAndComment) = {
val ctx = new CodegenContext
val code = child.asInstanceOf[CodegenSupport].produce(ctx, this)
// main next function.
ctx.addNewFunction("processNext",
s"""
protected void processNext() throws java.io.IOException {
${code.trim}
}
""", inlineToOuterClass = true)
val className = generatedClassName()
val source = s"""
public Object generate(Object[] references) {
return new $className(references);
}
${ctx.registerComment(
s"""Codegend pipeline for stage (id=$codegenStageId)
|${this.treeString.trim}""".stripMargin,
"wsc_codegenPipeline")}
${ctx.registerComment(s"codegenStageId=$codegenStageId", "wsc_codegenStageId", true)}
final class $className extends ${classOf[BufferedRowIterator].getName} {
private Object[] references;
private scala.collection.Iterator[] inputs;
${ctx.declareMutableStates()}
public $className(Object[] references) {
this.references = references;
}
public void init(int index, scala.collection.Iterator[] inputs) {
partitionIndex = index;
this.inputs = inputs;
${ctx.initMutableStates()}
${ctx.initPartition()}
}
${ctx.emitExtraCode()}
${ctx.declareAddedFunctions()}
}
""".trim
// try to compile, helpful for debug
val cleanedSource = CodeFormatter.stripOverlappingComments(
new CodeAndComment(CodeFormatter.stripExtraNewLines(source), ctx.getPlaceHolderToComments()))
logDebug(s"\\n${CodeFormatter.format(cleanedSource)}")
(ctx, cleanedSource)
}
override def doExecute(): RDD[InternalRow] = {
val (ctx, cleanedSource) = doCodeGen()
// try to compile and fallback if it failed
val (_, maxCodeSize) = try {
CodeGenerator.compile(cleanedSource)
} catch {
case NonFatal(_) if !Utils.isTesting && sqlContext.conf.codegenFallback =>
// We should already saw the error message
logWarning(s"Whole-stage codegen disabled for plan (id=$codegenStageId):\\n $treeString")
return child.execute()
}
// Check if compiled code has a too large function
if (maxCodeSize > sqlContext.conf.hugeMethodLimit) {
logInfo(s"Found too long generated codes and JIT optimization might not work: " +
s"the bytecode size ($maxCodeSize) is above the limit " +
s"${sqlContext.conf.hugeMethodLimit}, and the whole-stage codegen was disabled " +
s"for this plan (id=$codegenStageId). To avoid this, you can raise the limit " +
s"`${SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key}`:\\n$treeString")
child match {
// The fallback solution of batch file source scan still uses WholeStageCodegenExec
case f: FileSourceScanExec if f.supportsBatch => // do nothing
case _ => return child.execute()
}
}
val references = ctx.references.toArray
val durationMs = longMetric("pipelineTime")
val rdds = child.asInstanceOf[CodegenSupport].inputRDDs()
assert(rdds.size <= 2, "Up to two input RDDs can be supported")
if (rdds.length == 1) {
rdds.head.mapPartitionsWithIndex { (index, iter) =>
val (clazz, _) = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(iter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
} else {
// Right now, we support up to two input RDDs.
rdds.head.zipPartitions(rdds(1)) { (leftIter, rightIter) =>
Iterator((leftIter, rightIter))
// a small hack to obtain the correct partition index
}.mapPartitionsWithIndex { (index, zippedIter) =>
val (leftIter, rightIter) = zippedIter.next()
val (clazz, _) = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(leftIter, rightIter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
}
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
throw new UnsupportedOperationException
}
override def doProduce(ctx: CodegenContext): String = {
throw new UnsupportedOperationException
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val doCopy = if (needCopyResult) {
".copy()"
} else {
""
}
s"""
|${row.code}
|append(${row.value}$doCopy);
""".stripMargin.trim
}
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
builder: StringBuilder,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false): StringBuilder = {
child.generateTreeString(depth, lastChildren, builder, verbose, s"*($codegenStageId) ")
}
override def needStopCheck: Boolean = true
override protected def otherCopyArgs: Seq[AnyRef] = Seq(codegenStageId.asInstanceOf[Integer])
}
/**
* Find the chained plans that support codegen, collapse them together as WholeStageCodegen.
*/
case class CollapseCodegenStages(conf: SQLConf) extends Rule[SparkPlan] {
private def supportCodegen(e: Expression): Boolean = e match {
case e: LeafExpression => true
// CodegenFallback requires the input to be an InternalRow
case e: CodegenFallback => false
case _ => true
}
private def supportCodegen(plan: SparkPlan): Boolean = plan match {
case plan: CodegenSupport if plan.supportCodegen =>
val willFallback = plan.expressions.exists(_.find(e => !supportCodegen(e)).isDefined)
// the generated code will be huge if there are too many columns
val hasTooManyOutputFields =
WholeStageCodegenExec.isTooManyFields(conf, plan.schema)
val hasTooManyInputFields =
plan.children.exists(p => WholeStageCodegenExec.isTooManyFields(conf, p.schema))
!willFallback && !hasTooManyOutputFields && !hasTooManyInputFields
case _ => false
}
/**
* Inserts an InputAdapter on top of those that do not support codegen.
*/
private def insertInputAdapter(plan: SparkPlan): SparkPlan = plan match {
case p if !supportCodegen(p) =>
// collapse them recursively
InputAdapter(insertWholeStageCodegen(p))
case j: SortMergeJoinExec =>
// The children of SortMergeJoin should do codegen separately.
j.withNewChildren(j.children.map(child => InputAdapter(insertWholeStageCodegen(child))))
case p =>
p.withNewChildren(p.children.map(insertInputAdapter))
}
/**
* Inserts a WholeStageCodegen on top of those that support codegen.
*/
private def insertWholeStageCodegen(plan: SparkPlan): SparkPlan = plan match {
// For operators that will output domain object, do not insert WholeStageCodegen for it as
// domain object can not be written into unsafe row.
case plan if plan.output.length == 1 && plan.output.head.dataType.isInstanceOf[ObjectType] =>
plan.withNewChildren(plan.children.map(insertWholeStageCodegen))
case plan: CodegenSupport if supportCodegen(plan) =>
WholeStageCodegenExec(insertInputAdapter(plan))(WholeStageCodegenId.getNextStageId())
case other =>
other.withNewChildren(other.children.map(insertWholeStageCodegen))
}
def apply(plan: SparkPlan): SparkPlan = {
if (conf.wholeStageEnabled) {
WholeStageCodegenId.resetPerQuery()
insertWholeStageCodegen(plan)
} else {
plan
}
}
}
|
michalsenkyr/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
|
Scala
|
apache-2.0
| 27,788
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.io.File
import scala.collection.JavaConverters._
import scala.util.{Random, Try}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, DataFrameWriter, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.parquet.{SpecificParquetRecordReaderBase, VectorizedParquetRecordReader}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnVector
import org.apache.spark.util.{Benchmark, Utils}
/**
* Benchmark to measure data source read performance.
* To run this:
* spark-submit --class <this class> <spark sql test jar>
*/
object DataSourceReadBenchmark {
val conf = new SparkConf()
.setAppName("DataSourceReadBenchmark")
// Since `spark.master` always exists, overrides this value
.set("spark.master", "local[1]")
.setIfMissing("spark.driver.memory", "3g")
.setIfMissing("spark.executor.memory", "3g")
.setIfMissing("spark.ui.enabled", "false")
val spark = SparkSession.builder.config(conf).getOrCreate()
// Set default configs. Individual cases will change them if necessary.
spark.conf.set(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key, "true")
spark.conf.set(SQLConf.ORC_COPY_BATCH_TO_SPARK.key, "false")
spark.conf.set(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key, "true")
spark.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true")
def withTempPath(f: File => Unit): Unit = {
val path = Utils.createTempDir()
path.delete()
try f(path) finally Utils.deleteRecursively(path)
}
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
try f finally tableNames.foreach(spark.catalog.dropTempView)
}
def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
val (keys, values) = pairs.unzip
val currentValues = keys.map(key => Try(spark.conf.get(key)).toOption)
(keys, values).zipped.foreach(spark.conf.set)
try f finally {
keys.zip(currentValues).foreach {
case (key, Some(value)) => spark.conf.set(key, value)
case (key, None) => spark.conf.unset(key)
}
}
}
private def prepareTable(dir: File, df: DataFrame, partition: Option[String] = None): Unit = {
val testDf = if (partition.isDefined) {
df.write.partitionBy(partition.get)
} else {
df.write
}
saveAsCsvTable(testDf, dir.getCanonicalPath + "/csv")
saveAsJsonTable(testDf, dir.getCanonicalPath + "/json")
saveAsParquetTable(testDf, dir.getCanonicalPath + "/parquet")
saveAsOrcTable(testDf, dir.getCanonicalPath + "/orc")
}
private def saveAsCsvTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "gzip").option("header", true).csv(dir)
spark.read.option("header", true).csv(dir).createOrReplaceTempView("csvTable")
}
private def saveAsJsonTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "gzip").json(dir)
spark.read.json(dir).createOrReplaceTempView("jsonTable")
}
private def saveAsParquetTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "snappy").parquet(dir)
spark.read.parquet(dir).createOrReplaceTempView("parquetTable")
}
private def saveAsOrcTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "snappy").orc(dir)
spark.read.orc(dir).createOrReplaceTempView("orcTable")
}
def numericScanBenchmark(values: Int, dataType: DataType): Unit = {
// Benchmarks running through spark sql.
val sqlBenchmark = new Benchmark(s"SQL Single ${dataType.sql} Column Scan", values)
// Benchmarks driving reader component directly.
val parquetReaderBenchmark = new Benchmark(
s"Parquet Reader Single ${dataType.sql} Column Scan", values)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(dir, spark.sql(s"SELECT CAST(value as ${dataType.sql}) id FROM t1"))
sqlBenchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(id) from csvTable").collect()
}
sqlBenchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(id) from jsonTable").collect()
}
sqlBenchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(id) from parquetTable").collect()
}
sqlBenchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(id) from parquetTable").collect()
}
}
sqlBenchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
sqlBenchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
sqlBenchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
SQL Single TINYINT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 22964 / 23096 0.7 1460.0 1.0X
SQL Json 8469 / 8593 1.9 538.4 2.7X
SQL Parquet Vectorized 164 / 177 95.8 10.4 139.9X
SQL Parquet MR 1687 / 1706 9.3 107.2 13.6X
SQL ORC Vectorized 191 / 197 82.3 12.2 120.2X
SQL ORC Vectorized with copy 215 / 219 73.2 13.7 106.9X
SQL ORC MR 1392 / 1412 11.3 88.5 16.5X
SQL Single SMALLINT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 24090 / 24097 0.7 1531.6 1.0X
SQL Json 8791 / 8813 1.8 558.9 2.7X
SQL Parquet Vectorized 204 / 212 77.0 13.0 117.9X
SQL Parquet MR 1813 / 1850 8.7 115.3 13.3X
SQL ORC Vectorized 226 / 230 69.7 14.4 106.7X
SQL ORC Vectorized with copy 295 / 298 53.3 18.8 81.6X
SQL ORC MR 1526 / 1549 10.3 97.1 15.8X
SQL Single INT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 25637 / 25791 0.6 1629.9 1.0X
SQL Json 9532 / 9570 1.7 606.0 2.7X
SQL Parquet Vectorized 181 / 191 86.8 11.5 141.5X
SQL Parquet MR 2210 / 2227 7.1 140.5 11.6X
SQL ORC Vectorized 309 / 317 50.9 19.6 83.0X
SQL ORC Vectorized with copy 316 / 322 49.8 20.1 81.2X
SQL ORC MR 1650 / 1680 9.5 104.9 15.5X
SQL Single BIGINT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 31617 / 31764 0.5 2010.1 1.0X
SQL Json 12440 / 12451 1.3 790.9 2.5X
SQL Parquet Vectorized 284 / 315 55.4 18.0 111.4X
SQL Parquet MR 2382 / 2390 6.6 151.5 13.3X
SQL ORC Vectorized 398 / 403 39.5 25.3 79.5X
SQL ORC Vectorized with copy 410 / 413 38.3 26.1 77.1X
SQL ORC MR 1783 / 1813 8.8 113.4 17.7X
SQL Single FLOAT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 26679 / 26742 0.6 1696.2 1.0X
SQL Json 12490 / 12541 1.3 794.1 2.1X
SQL Parquet Vectorized 174 / 183 90.4 11.1 153.3X
SQL Parquet MR 2201 / 2223 7.1 140.0 12.1X
SQL ORC Vectorized 415 / 429 37.9 26.4 64.3X
SQL ORC Vectorized with copy 422 / 428 37.2 26.9 63.2X
SQL ORC MR 1767 / 1773 8.9 112.3 15.1X
SQL Single DOUBLE Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 34223 / 34324 0.5 2175.8 1.0X
SQL Json 17784 / 17785 0.9 1130.7 1.9X
SQL Parquet Vectorized 277 / 283 56.7 17.6 123.4X
SQL Parquet MR 2356 / 2386 6.7 149.8 14.5X
SQL ORC Vectorized 533 / 536 29.5 33.9 64.2X
SQL ORC Vectorized with copy 541 / 546 29.1 34.4 63.3X
SQL ORC MR 2166 / 2177 7.3 137.7 15.8X
*/
sqlBenchmark.run()
// Driving the parquet reader in batch mode directly.
val files = SpecificParquetRecordReaderBase.listDirectory(new File(dir, "parquet")).toArray
val enableOffHeapColumnVector = spark.sessionState.conf.offHeapColumnVectorEnabled
val vectorizedReaderBatchSize = spark.sessionState.conf.parquetVectorizedReaderBatchSize
parquetReaderBenchmark.addCase("ParquetReader Vectorized") { _ =>
var longSum = 0L
var doubleSum = 0.0
val aggregateValue: (ColumnVector, Int) => Unit = dataType match {
case ByteType => (col: ColumnVector, i: Int) => longSum += col.getByte(i)
case ShortType => (col: ColumnVector, i: Int) => longSum += col.getShort(i)
case IntegerType => (col: ColumnVector, i: Int) => longSum += col.getInt(i)
case LongType => (col: ColumnVector, i: Int) => longSum += col.getLong(i)
case FloatType => (col: ColumnVector, i: Int) => doubleSum += col.getFloat(i)
case DoubleType => (col: ColumnVector, i: Int) => doubleSum += col.getDouble(i)
}
files.map(_.asInstanceOf[String]).foreach { p =>
val reader = new VectorizedParquetRecordReader(
null, enableOffHeapColumnVector, vectorizedReaderBatchSize)
try {
reader.initialize(p, ("id" :: Nil).asJava)
val batch = reader.resultBatch()
val col = batch.column(0)
while (reader.nextBatch()) {
val numRows = batch.numRows()
var i = 0
while (i < numRows) {
if (!col.isNullAt(i)) aggregateValue(col, i)
i += 1
}
}
} finally {
reader.close()
}
}
}
// Decoding in vectorized but having the reader return rows.
parquetReaderBenchmark.addCase("ParquetReader Vectorized -> Row") { num =>
var longSum = 0L
var doubleSum = 0.0
val aggregateValue: (InternalRow) => Unit = dataType match {
case ByteType => (col: InternalRow) => longSum += col.getByte(0)
case ShortType => (col: InternalRow) => longSum += col.getShort(0)
case IntegerType => (col: InternalRow) => longSum += col.getInt(0)
case LongType => (col: InternalRow) => longSum += col.getLong(0)
case FloatType => (col: InternalRow) => doubleSum += col.getFloat(0)
case DoubleType => (col: InternalRow) => doubleSum += col.getDouble(0)
}
files.map(_.asInstanceOf[String]).foreach { p =>
val reader = new VectorizedParquetRecordReader(
null, enableOffHeapColumnVector, vectorizedReaderBatchSize)
try {
reader.initialize(p, ("id" :: Nil).asJava)
val batch = reader.resultBatch()
while (reader.nextBatch()) {
val it = batch.rowIterator()
while (it.hasNext) {
val record = it.next()
if (!record.isNullAt(0)) aggregateValue(record)
}
}
} finally {
reader.close()
}
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
Single TINYINT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
ParquetReader Vectorized 198 / 202 79.4 12.6 1.0X
ParquetReader Vectorized -> Row 119 / 121 132.3 7.6 1.7X
Single SMALLINT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
ParquetReader Vectorized 282 / 287 55.8 17.9 1.0X
ParquetReader Vectorized -> Row 246 / 247 64.0 15.6 1.1X
Single INT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
ParquetReader Vectorized 258 / 262 60.9 16.4 1.0X
ParquetReader Vectorized -> Row 259 / 260 60.8 16.5 1.0X
Single BIGINT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
ParquetReader Vectorized 361 / 369 43.6 23.0 1.0X
ParquetReader Vectorized -> Row 361 / 371 43.6 22.9 1.0X
Single FLOAT Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
ParquetReader Vectorized 253 / 261 62.2 16.1 1.0X
ParquetReader Vectorized -> Row 254 / 256 61.9 16.2 1.0X
Single DOUBLE Column Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
ParquetReader Vectorized 357 / 364 44.0 22.7 1.0X
ParquetReader Vectorized -> Row 358 / 366 44.0 22.7 1.0X
*/
parquetReaderBenchmark.run()
}
}
}
def intStringScanBenchmark(values: Int): Unit = {
val benchmark = new Benchmark("Int and String Scan", values)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(
dir,
spark.sql("SELECT CAST(value AS INT) AS c1, CAST(value as STRING) AS c2 FROM t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(c1), sum(length(c2)) from csvTable").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(c1), sum(length(c2)) from jsonTable").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(c1), sum(length(c2)) from parquetTable").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(c1), sum(length(c2)) from parquetTable").collect()
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("SELECT sum(c1), sum(length(c2)) FROM orcTable").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(c1), sum(length(c2)) FROM orcTable").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(c1), sum(length(c2)) FROM orcTable").collect()
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
Int and String Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 27145 / 27158 0.4 2588.7 1.0X
SQL Json 12969 / 13337 0.8 1236.8 2.1X
SQL Parquet Vectorized 2419 / 2448 4.3 230.7 11.2X
SQL Parquet MR 4631 / 4633 2.3 441.7 5.9X
SQL ORC Vectorized 2412 / 2465 4.3 230.0 11.3X
SQL ORC Vectorized with copy 2633 / 2675 4.0 251.1 10.3X
SQL ORC MR 4280 / 4350 2.4 408.2 6.3X
*/
benchmark.run()
}
}
}
def repeatedStringScanBenchmark(values: Int): Unit = {
val benchmark = new Benchmark("Repeated String", values)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(
dir,
spark.sql("select cast((value % 200) + 10000 as STRING) as c1 from t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(length(c1)) from csvTable").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(length(c1)) from jsonTable").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(length(c1)) from parquetTable").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(length(c1)) from parquetTable").collect()
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("select sum(length(c1)) from orcTable").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("select sum(length(c1)) from orcTable").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(length(c1)) from orcTable").collect()
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
Repeated String: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 17345 / 17424 0.6 1654.1 1.0X
SQL Json 8639 / 8664 1.2 823.9 2.0X
SQL Parquet Vectorized 839 / 854 12.5 80.0 20.7X
SQL Parquet MR 1771 / 1775 5.9 168.9 9.8X
SQL ORC Vectorized 550 / 569 19.1 52.4 31.6X
SQL ORC Vectorized with copy 785 / 849 13.4 74.9 22.1X
SQL ORC MR 2168 / 2202 4.8 206.7 8.0X
*/
benchmark.run()
}
}
}
def partitionTableScanBenchmark(values: Int): Unit = {
val benchmark = new Benchmark("Partitioned Table", values)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(dir, spark.sql("SELECT value % 2 AS p, value AS id FROM t1"), Some("p"))
benchmark.addCase("Data column - CSV") { _ =>
spark.sql("select sum(id) from csvTable").collect()
}
benchmark.addCase("Data column - Json") { _ =>
spark.sql("select sum(id) from jsonTable").collect()
}
benchmark.addCase("Data column - Parquet Vectorized") { _ =>
spark.sql("select sum(id) from parquetTable").collect()
}
benchmark.addCase("Data column - Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(id) from parquetTable").collect()
}
}
benchmark.addCase("Data column - ORC Vectorized") { _ =>
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
benchmark.addCase("Data column - ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
benchmark.addCase("Data column - ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
benchmark.addCase("Partition column - CSV") { _ =>
spark.sql("select sum(p) from csvTable").collect()
}
benchmark.addCase("Partition column - Json") { _ =>
spark.sql("select sum(p) from jsonTable").collect()
}
benchmark.addCase("Partition column - Parquet Vectorized") { _ =>
spark.sql("select sum(p) from parquetTable").collect()
}
benchmark.addCase("Partition column - Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(p) from parquetTable").collect()
}
}
benchmark.addCase("Partition column - ORC Vectorized") { _ =>
spark.sql("SELECT sum(p) FROM orcTable").collect()
}
benchmark.addCase("Partition column - ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(p) FROM orcTable").collect()
}
}
benchmark.addCase("Partition column - ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(p) FROM orcTable").collect()
}
}
benchmark.addCase("Both columns - CSV") { _ =>
spark.sql("select sum(p), sum(id) from csvTable").collect()
}
benchmark.addCase("Both columns - Json") { _ =>
spark.sql("select sum(p), sum(id) from jsonTable").collect()
}
benchmark.addCase("Both columns - Parquet Vectorized") { _ =>
spark.sql("select sum(p), sum(id) from parquetTable").collect()
}
benchmark.addCase("Both columns - Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(p), sum(id) from parquetTable").collect
}
}
benchmark.addCase("Both columns - ORC Vectorized") { _ =>
spark.sql("SELECT sum(p), sum(id) FROM orcTable").collect()
}
benchmark.addCase("Both column - ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(p), sum(id) FROM orcTable").collect()
}
}
benchmark.addCase("Both columns - ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(p), sum(id) FROM orcTable").collect()
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
Partitioned Table: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
Data column - CSV 32613 / 32841 0.5 2073.4 1.0X
Data column - Json 13343 / 13469 1.2 848.3 2.4X
Data column - Parquet Vectorized 302 / 318 52.1 19.2 108.0X
Data column - Parquet MR 2908 / 2924 5.4 184.9 11.2X
Data column - ORC Vectorized 412 / 425 38.1 26.2 79.1X
Data column - ORC Vectorized with copy 442 / 446 35.6 28.1 73.8X
Data column - ORC MR 2390 / 2396 6.6 152.0 13.6X
Partition column - CSV 9626 / 9683 1.6 612.0 3.4X
Partition column - Json 10909 / 10923 1.4 693.6 3.0X
Partition column - Parquet Vectorized 69 / 76 228.4 4.4 473.6X
Partition column - Parquet MR 1898 / 1933 8.3 120.7 17.2X
Partition column - ORC Vectorized 67 / 74 236.0 4.2 489.4X
Partition column - ORC Vectorized with copy 65 / 72 241.9 4.1 501.6X
Partition column - ORC MR 1743 / 1749 9.0 110.8 18.7X
Both columns - CSV 35523 / 35552 0.4 2258.5 0.9X
Both columns - Json 13676 / 13681 1.2 869.5 2.4X
Both columns - Parquet Vectorized 317 / 326 49.5 20.2 102.7X
Both columns - Parquet MR 3333 / 3336 4.7 211.9 9.8X
Both columns - ORC Vectorized 441 / 446 35.6 28.1 73.9X
Both column - ORC Vectorized with copy 517 / 524 30.4 32.9 63.1X
Both columns - ORC MR 2574 / 2577 6.1 163.6 12.7X
*/
benchmark.run()
}
}
}
def stringWithNullsScanBenchmark(values: Int, fractionOfNulls: Double): Unit = {
val benchmark = new Benchmark("String with Nulls Scan", values)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
spark.range(values).createOrReplaceTempView("t1")
prepareTable(
dir,
spark.sql(
s"SELECT IF(RAND(1) < $fractionOfNulls, NULL, CAST(id as STRING)) AS c1, " +
s"IF(RAND(2) < $fractionOfNulls, NULL, CAST(id as STRING)) AS c2 FROM t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(length(c2)) from csvTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(length(c2)) from jsonTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(length(c2)) from parquetTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(length(c2)) from parquetTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
}
val files = SpecificParquetRecordReaderBase.listDirectory(new File(dir, "parquet")).toArray
val enableOffHeapColumnVector = spark.sessionState.conf.offHeapColumnVectorEnabled
val vectorizedReaderBatchSize = spark.sessionState.conf.parquetVectorizedReaderBatchSize
benchmark.addCase("ParquetReader Vectorized") { num =>
var sum = 0
files.map(_.asInstanceOf[String]).foreach { p =>
val reader = new VectorizedParquetRecordReader(
null, enableOffHeapColumnVector, vectorizedReaderBatchSize)
try {
reader.initialize(p, ("c1" :: "c2" :: Nil).asJava)
val batch = reader.resultBatch()
while (reader.nextBatch()) {
val rowIterator = batch.rowIterator()
while (rowIterator.hasNext) {
val row = rowIterator.next()
val value = row.getUTF8String(0)
if (!row.isNullAt(0) && !row.isNullAt(1)) sum += value.numBytes()
}
}
} finally {
reader.close()
}
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("SELECT SUM(LENGTH(c2)) FROM orcTable " +
"WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT SUM(LENGTH(c2)) FROM orcTable " +
"WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT SUM(LENGTH(c2)) FROM orcTable " +
"WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
String with Nulls Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 14875 / 14920 0.7 1418.6 1.0X
SQL Json 10974 / 10992 1.0 1046.5 1.4X
SQL Parquet Vectorized 1711 / 1750 6.1 163.2 8.7X
SQL Parquet MR 3838 / 3884 2.7 366.0 3.9X
ParquetReader Vectorized 1155 / 1168 9.1 110.2 12.9X
SQL ORC Vectorized 1341 / 1380 7.8 127.9 11.1X
SQL ORC Vectorized with copy 1659 / 1716 6.3 158.2 9.0X
SQL ORC MR 3594 / 3634 2.9 342.7 4.1X
String with Nulls Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 17219 / 17264 0.6 1642.1 1.0X
SQL Json 8843 / 8864 1.2 843.3 1.9X
SQL Parquet Vectorized 1169 / 1178 9.0 111.4 14.7X
SQL Parquet MR 2676 / 2697 3.9 255.2 6.4X
ParquetReader Vectorized 1068 / 1071 9.8 101.8 16.1X
SQL ORC Vectorized 1319 / 1319 7.9 125.8 13.1X
SQL ORC Vectorized with copy 1638 / 1639 6.4 156.2 10.5X
SQL ORC MR 3230 / 3257 3.2 308.1 5.3X
String with Nulls Scan: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 13976 / 14053 0.8 1332.8 1.0X
SQL Json 5166 / 5176 2.0 492.6 2.7X
SQL Parquet Vectorized 274 / 282 38.2 26.2 50.9X
SQL Parquet MR 1553 / 1555 6.8 148.1 9.0X
ParquetReader Vectorized 241 / 246 43.5 23.0 57.9X
SQL ORC Vectorized 476 / 479 22.0 45.4 29.3X
SQL ORC Vectorized with copy 584 / 588 17.9 55.7 23.9X
SQL ORC MR 1720 / 1734 6.1 164.1 8.1X
*/
benchmark.run()
}
}
}
def columnsBenchmark(values: Int, width: Int): Unit = {
val benchmark = new Benchmark(s"Single Column Scan from $width columns", values)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
val middle = width / 2
val selectExpr = (1 to width).map(i => s"value as c$i")
spark.range(values).map(_ => Random.nextLong).toDF()
.selectExpr(selectExpr: _*).createOrReplaceTempView("t1")
prepareTable(dir, spark.sql("SELECT * FROM t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM csvTable").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM jsonTable").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM parquetTable").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql(s"SELECT sum(c$middle) FROM parquetTable").collect()
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM orcTable").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql(s"SELECT sum(c$middle) FROM orcTable").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql(s"SELECT sum(c$middle) FROM orcTable").collect()
}
}
/*
OpenJDK 64-Bit Server VM 1.8.0_171-b10 on Linux 4.14.33-51.37.amzn1.x86_64
Intel(R) Xeon(R) CPU E5-2670 v2 @ 2.50GHz
Single Column Scan from 10 columns: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 3478 / 3481 0.3 3316.4 1.0X
SQL Json 2646 / 2654 0.4 2523.6 1.3X
SQL Parquet Vectorized 67 / 72 15.8 63.5 52.2X
SQL Parquet MR 207 / 214 5.1 197.6 16.8X
SQL ORC Vectorized 69 / 76 15.2 66.0 50.3X
SQL ORC Vectorized with copy 70 / 76 15.0 66.5 49.9X
SQL ORC MR 299 / 303 3.5 285.1 11.6X
Single Column Scan from 50 columns: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 9214 / 9236 0.1 8786.7 1.0X
SQL Json 9943 / 9978 0.1 9482.7 0.9X
SQL Parquet Vectorized 77 / 86 13.6 73.3 119.8X
SQL Parquet MR 229 / 235 4.6 218.6 40.2X
SQL ORC Vectorized 84 / 96 12.5 80.0 109.9X
SQL ORC Vectorized with copy 83 / 91 12.6 79.4 110.7X
SQL ORC MR 843 / 854 1.2 804.0 10.9X
Single Column Scan from 100 columns Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
--------------------------------------------------------------------------------------------
SQL CSV 16503 / 16622 0.1 15738.9 1.0X
SQL Json 19109 / 19184 0.1 18224.2 0.9X
SQL Parquet Vectorized 99 / 108 10.6 94.3 166.8X
SQL Parquet MR 253 / 264 4.1 241.6 65.1X
SQL ORC Vectorized 107 / 114 9.8 101.6 154.8X
SQL ORC Vectorized with copy 107 / 118 9.8 102.1 154.1X
SQL ORC MR 1526 / 1529 0.7 1455.3 10.8X
*/
benchmark.run()
}
}
}
def main(args: Array[String]): Unit = {
Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType).foreach { dataType =>
numericScanBenchmark(1024 * 1024 * 15, dataType)
}
intStringScanBenchmark(1024 * 1024 * 10)
repeatedStringScanBenchmark(1024 * 1024 * 10)
partitionTableScanBenchmark(1024 * 1024 * 15)
for (fractionOfNulls <- List(0.0, 0.50, 0.95)) {
stringWithNullsScanBenchmark(1024 * 1024 * 10, fractionOfNulls)
}
for (columnWidth <- List(10, 50, 100)) {
columnsBenchmark(1024 * 1024 * 1, columnWidth)
}
}
}
|
bravo-zhang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
|
Scala
|
apache-2.0
| 41,887
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.calcite.rex.{RexCall, RexNode}
import org.apache.calcite.sql.SemiJoinType
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.table.api.{StreamQueryConfig, StreamTableEnvironment}
import org.apache.flink.table.functions.utils.TableSqlFunction
import org.apache.flink.table.plan.nodes.CommonCorrelate
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalTableFunctionScan
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.runtime.CRowCorrelateProcessRunner
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
/**
* Flink RelNode which matches along with join a user defined table function.
*/
class DataStreamCorrelate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputSchema: RowSchema,
input: RelNode,
scan: FlinkLogicalTableFunctionScan,
condition: Option[RexNode],
schema: RowSchema,
joinSchema: RowSchema,
joinType: SemiJoinType,
ruleDescription: String)
extends SingleRel(cluster, traitSet, input)
with CommonCorrelate
with DataStreamRel {
override def deriveRowType() = schema.relDataType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataStreamCorrelate(
cluster,
traitSet,
inputSchema,
inputs.get(0),
scan,
condition,
schema,
joinSchema,
joinType,
ruleDescription)
}
override def toString: String = {
val rexCall = scan.getCall.asInstanceOf[RexCall]
val sqlFunction = rexCall.getOperator.asInstanceOf[TableSqlFunction]
correlateToString(rexCall, sqlFunction)
}
override def explainTerms(pw: RelWriter): RelWriter = {
val rexCall = scan.getCall.asInstanceOf[RexCall]
val sqlFunction = rexCall.getOperator.asInstanceOf[TableSqlFunction]
super.explainTerms(pw)
.item("invocation", scan.getCall)
.item("function", sqlFunction.getTableFunction.getClass.getCanonicalName)
.item("rowType", schema.relDataType)
.item("joinType", joinType)
.itemIf("condition", condition.orNull, condition.isDefined)
}
override def translateToPlan(
tableEnv: StreamTableEnvironment,
queryConfig: StreamQueryConfig): DataStream[CRow] = {
val config = tableEnv.getConfig
// we do not need to specify input type
val inputDS = getInput.asInstanceOf[DataStreamRel].translateToPlan(tableEnv, queryConfig)
val funcRel = scan.asInstanceOf[FlinkLogicalTableFunctionScan]
val rexCall = funcRel.getCall.asInstanceOf[RexCall]
val sqlFunction = rexCall.getOperator.asInstanceOf[TableSqlFunction]
val pojoFieldMapping = Some(sqlFunction.getPojoFieldMapping)
val udtfTypeInfo = sqlFunction.getRowTypeInfo.asInstanceOf[TypeInformation[Any]]
val process = generateFunction(
config,
inputSchema,
udtfTypeInfo,
schema,
joinType,
rexCall,
pojoFieldMapping,
ruleDescription,
classOf[ProcessFunction[CRow, CRow]])
val collector = generateCollector(
config,
inputSchema,
udtfTypeInfo,
schema,
condition,
pojoFieldMapping)
val processFunc = new CRowCorrelateProcessRunner(
process.name,
process.code,
collector.name,
collector.code,
CRowTypeInfo(process.returnType))
val inputParallelism = inputDS.getParallelism
inputDS
.process(processFunc)
// preserve input parallelism to ensure that acc and retract messages remain in order
.setParallelism(inputParallelism)
.name(correlateOpName(rexCall, sqlFunction, schema.relDataType))
}
}
|
zohar-mizrahi/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamCorrelate.scala
|
Scala
|
apache-2.0
| 4,756
|
package net.fwbrasil.smirror
import scala.reflect.runtime.universe._
import java.lang.reflect.Method
import java.lang.reflect.InvocationTargetException
trait SBehavior[C] extends Visibility[C, MethodSymbol] {
implicit val runtimeMirror: Mirror
val owner: SType[C]
val symbol: MethodSymbol
val name = symbol.name.toString.trim
type SParameterType <: SParameter[C]
val parametersSymbols =
symbol.paramss.map(_.map(_.asTerm))
val parametersGroups = {
var paramIndex = 0
def index = {
val res = paramIndex
paramIndex += 1
res
}
parametersSymbols.map(_.map(sParameter(_, index)))
}
val isAbstract =
symbol.asInstanceOf[scala.reflect.internal.Symbols#Symbol]
.hasFlag(scala.reflect.internal.Flags.DEFERRED)
protected def sParameter(symbol: TermSymbol, index: Int): SParameterType
val parameters = parametersGroups.flatten
val typeSignature = symbol.returnType
lazy val returnType = sClassOf[Any](typeSignature)
private lazy val toStringParameters =
parametersGroups.map(_.mkString(", ")).mkString(")(")
override lazy val toString =
name + "(" + toStringParameters + "): " +
returnType.name.trim
protected def safeInvoke[R](f: => R) =
try f
catch {
case e: InvocationTargetException =>
throw e.getCause
}
}
case class SConstructor[C](owner: SClass[C], symbol: MethodSymbol)(implicit val runtimeMirror: Mirror)
extends SBehavior[C] {
type SParameterType = SConstructorParameter[C]
override protected def sParameter(symbol: TermSymbol, index: Int) =
SConstructorParameter[C](this, symbol, index)
def invoke(params: Any*): C =
params.toList match {
case (outer :: params) if (!owner.symbol.isStatic) =>
val instanceMirror = runtimeMirror.reflect(outer: Any)
val classMirror = instanceMirror.reflectClass(owner.symbol)
val constructor = classMirror.reflectConstructor(symbol)
safeInvoke(constructor.apply(params: _*)).asInstanceOf[C]
case params =>
val classMirror = runtimeMirror.reflectClass(owner.symbol)
val constructor = classMirror.reflectConstructor(symbol)
safeInvoke(constructor.apply(params: _*).asInstanceOf[C])
}
}
case class SMethod[C](owner: SType[C], symbol: MethodSymbol)(implicit val runtimeMirror: Mirror)
extends SBehavior[C] with TypeParameters {
val javaMethodOption = {
runtimeMirror.getClass.getDeclaredMethods.find(_.getName == "methodToJava").flatMap { method =>
try Some(method.invoke(runtimeMirror, symbol).asInstanceOf[Method])
catch {
case e: InvocationTargetException if (e.getCause.isInstanceOf[NoSuchMethodException]) =>
None
case e: InvocationTargetException if (e.getCause.isInstanceOf[ClassNotFoundException]) =>
None
}
}
}
def getAnnotation[A <: java.lang.annotation.Annotation](cls: Class[A]) =
javaMethodOption.flatMap(m => Option(m.getAnnotation(cls)))
def getParameterAnnotations =
parameters.zip(javaMethodOption.get.getParameterAnnotations.map(_.toList)).toMap
type SParameterType = SMethodParameter[C]
override protected def sParameter(symbol: TermSymbol, index: Int) =
SMethodParameter[C](this, symbol, index)
def invoke(obj: C, params: Any*) = {
lazy val instanceMirror = runtimeMirror.reflect(obj: Any)
lazy val method = instanceMirror.reflectMethod(symbol)
safeInvoke(
javaMethodOption
.map(_.invoke(obj, params.asInstanceOf[Seq[Object]]: _*))
.getOrElse(method(params: _*))
)
}
}
|
fwbrasil/smirror
|
src/main/scala/net/fwbrasil/smirror/SBehavior.scala
|
Scala
|
lgpl-2.1
| 3,902
|
package pl.pej.trelloilaro.api.requestBuilder.builder.action
import pl.pej.trelloilaro.api.requestBuilder.RequestBuilder
trait EntitiesBuilder[T] { this: RequestBuilder[T] =>
def withEntities(value: Boolean) = withOnlyParam( "entities", value)
}
trait ActionEntitiesBuilder[T] { this: RequestBuilder[T] =>
def withEntities(value: Boolean) = withOnlyParam( "actions_entities", value)
}
|
tomaszym/trelloilaro
|
src/main/scala/pl/pej/trelloilaro/api/requestBuilder/builder/action/entities.scala
|
Scala
|
mit
| 396
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io._
import java.nio.file.Files
import scala.io.Source
import scala.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable.Stack
import sbt._
import sbt.Classpaths.publishTask
import sbt.Keys._
import sbtunidoc.Plugin.UnidocKeys.unidocGenjavadocVersion
import com.etsy.sbt.checkstyle.CheckstylePlugin.autoImport._
import com.simplytyped.Antlr4Plugin._
import com.typesafe.sbt.pom.{PomBuild, SbtPomKeys}
import com.typesafe.tools.mima.plugin.MimaKeys
import org.scalastyle.sbt.ScalastylePlugin.autoImport._
import org.scalastyle.sbt.Tasks
import spray.revolver.RevolverPlugin._
object BuildCommons {
private val buildLocation = file(".").getAbsoluteFile.getParentFile
val sqlProjects@Seq(catalyst, sql, hive, hiveThriftServer, sqlKafka010, avro) = Seq(
"catalyst", "sql", "hive", "hive-thriftserver", "sql-kafka-0-10", "avro"
).map(ProjectRef(buildLocation, _))
val streamingProjects@Seq(streaming, streamingKafka010) =
Seq("streaming", "streaming-kafka-0-10").map(ProjectRef(buildLocation, _))
val allProjects@Seq(
core, graphx, mllib, mllibLocal, repl, networkCommon, networkShuffle, launcher, unsafe, tags, sketch, kvstore, _*
) = Seq(
"core", "graphx", "mllib", "mllib-local", "repl", "network-common", "network-shuffle", "launcher", "unsafe",
"tags", "sketch", "kvstore"
).map(ProjectRef(buildLocation, _)) ++ sqlProjects ++ streamingProjects
val optionallyEnabledProjects@Seq(kubernetes, mesos, yarn,
streamingFlumeSink, streamingFlume,
streamingKafka, sparkGangliaLgpl, streamingKinesisAsl,
dockerIntegrationTests, hadoopCloud, kubernetesIntegrationTests) =
Seq("kubernetes", "mesos", "yarn",
"streaming-flume-sink", "streaming-flume",
"streaming-kafka-0-8", "ganglia-lgpl", "streaming-kinesis-asl",
"docker-integration-tests", "hadoop-cloud", "kubernetes-integration-tests").map(ProjectRef(buildLocation, _))
val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKafka010Assembly, streamingKinesisAslAssembly) =
Seq("network-yarn", "streaming-flume-assembly", "streaming-kafka-0-8-assembly", "streaming-kafka-0-10-assembly", "streaming-kinesis-asl-assembly")
.map(ProjectRef(buildLocation, _))
val copyJarsProjects@Seq(assembly, examples) = Seq("assembly", "examples")
.map(ProjectRef(buildLocation, _))
val tools = ProjectRef(buildLocation, "tools")
// Root project.
val spark = ProjectRef(buildLocation, "spark")
val sparkHome = buildLocation
val testTempDir = s"$sparkHome/target/tmp"
val javacJVMVersion = settingKey[String]("source and target JVM version for javac")
val scalacJVMVersion = settingKey[String]("source and target JVM version for scalac")
}
object SparkBuild extends PomBuild {
import BuildCommons._
import scala.collection.mutable.Map
val projectsMap: Map[String, Seq[Setting[_]]] = Map.empty
override val profiles = {
val profiles = Properties.envOrNone("SBT_MAVEN_PROFILES") match {
case None => Seq("sbt")
case Some(v) =>
v.split("(\\s+|,)").filterNot(_.isEmpty).map(_.trim.replaceAll("-P", "")).toSeq
}
if (System.getProperty("scala-2.12") == "") {
// To activate scala-2.10 profile, replace empty property value to non-empty value
// in the same way as Maven which handles -Dname as -Dname=true before executes build process.
// see: https://github.com/apache/maven/blob/maven-3.0.4/maven-embedder/src/main/java/org/apache/maven/cli/MavenCli.java#L1082
System.setProperty("scala-2.12", "true")
}
profiles
}
Properties.envOrNone("SBT_MAVEN_PROPERTIES") match {
case Some(v) =>
v.split("(\\s+|,)").filterNot(_.isEmpty).map(_.split("=")).foreach(x => System.setProperty(x(0), x(1)))
case _ =>
}
override val userPropertiesMap = System.getProperties.asScala.toMap
lazy val MavenCompile = config("m2r") extend(Compile)
lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy")
lazy val sparkGenjavadocSettings: Seq[sbt.Def.Setting[_]] = Seq(
libraryDependencies += compilerPlugin(
"com.typesafe.genjavadoc" %% "genjavadoc-plugin" % unidocGenjavadocVersion.value cross CrossVersion.full),
scalacOptions ++= Seq(
"-P:genjavadoc:out=" + (target.value / "java"),
"-P:genjavadoc:strictVisibility=true" // hide package private types
)
)
lazy val scalaStyleRules = Project("scalaStyleRules", file("scalastyle"))
.settings(
libraryDependencies += "org.scalastyle" %% "scalastyle" % "1.0.0"
)
lazy val scalaStyleOnCompile = taskKey[Unit]("scalaStyleOnCompile")
lazy val scalaStyleOnTest = taskKey[Unit]("scalaStyleOnTest")
// We special case the 'println' lint rule to only be a warning on compile, because adding
// printlns for debugging is a common use case and is easy to remember to remove.
val scalaStyleOnCompileConfig: String = {
val in = "scalastyle-config.xml"
val out = "scalastyle-on-compile.generated.xml"
val replacements = Map(
"""customId="println" level="error"""" -> """customId="println" level="warn""""
)
var contents = Source.fromFile(in).getLines.mkString("\n")
for ((k, v) <- replacements) {
require(contents.contains(k), s"Could not rewrite '$k' in original scalastyle config.")
contents = contents.replace(k, v)
}
new PrintWriter(out) {
write(contents)
close()
}
out
}
// Return a cached scalastyle task for a given configuration (usually Compile or Test)
private def cachedScalaStyle(config: Configuration) = Def.task {
val logger = streams.value.log
// We need a different cache dir per Configuration, otherwise they collide
val cacheDir = target.value / s"scalastyle-cache-${config.name}"
val cachedFun = FileFunction.cached(cacheDir, FilesInfo.lastModified, FilesInfo.exists) {
(inFiles: Set[File]) => {
val args: Seq[String] = Seq.empty
val scalaSourceV = Seq(file(scalaSource.in(config).value.getAbsolutePath))
val configV = (baseDirectory in ThisBuild).value / scalaStyleOnCompileConfig
val configUrlV = scalastyleConfigUrl.in(config).value
val streamsV = streams.in(config).value
val failOnErrorV = true
val failOnWarningV = false
val scalastyleTargetV = scalastyleTarget.in(config).value
val configRefreshHoursV = scalastyleConfigRefreshHours.in(config).value
val targetV = target.in(config).value
val configCacheFileV = scalastyleConfigUrlCacheFile.in(config).value
logger.info(s"Running scalastyle on ${name.value} in ${config.name}")
Tasks.doScalastyle(args, configV, configUrlV, failOnErrorV, failOnWarningV, scalaSourceV,
scalastyleTargetV, streamsV, configRefreshHoursV, targetV, configCacheFileV)
Set.empty
}
}
cachedFun(findFiles(scalaSource.in(config).value))
}
private def findFiles(file: File): Set[File] = if (file.isDirectory) {
file.listFiles().toSet.flatMap(findFiles) + file
} else {
Set(file)
}
def enableScalaStyle: Seq[sbt.Def.Setting[_]] = Seq(
scalaStyleOnCompile := cachedScalaStyle(Compile).value,
scalaStyleOnTest := cachedScalaStyle(Test).value,
logLevel in scalaStyleOnCompile := Level.Warn,
logLevel in scalaStyleOnTest := Level.Warn,
(compile in Compile) := {
scalaStyleOnCompile.value
(compile in Compile).value
},
(compile in Test) := {
scalaStyleOnTest.value
(compile in Test).value
}
)
lazy val sharedSettings = sparkGenjavadocSettings ++
(if (sys.env.contains("NOLINT_ON_COMPILE")) Nil else enableScalaStyle) ++ Seq(
exportJars in Compile := true,
exportJars in Test := false,
javaHome := sys.env.get("JAVA_HOME")
.orElse(sys.props.get("java.home").map { p => new File(p).getParentFile().getAbsolutePath() })
.map(file),
incOptions := incOptions.value.withNameHashing(true),
publishMavenStyle := true,
unidocGenjavadocVersion := "0.10",
// Override SBT's default resolvers:
resolvers := Seq(
DefaultMavenRepository,
Resolver.mavenLocal,
Resolver.file("local", file(Path.userHome.absolutePath + "/.ivy2/local"))(Resolver.ivyStylePatterns)
),
externalResolvers := resolvers.value,
otherResolvers := SbtPomKeys.mvnLocalRepository(dotM2 => Seq(Resolver.file("dotM2", dotM2))).value,
publishLocalConfiguration in MavenCompile :=
new PublishConfiguration(None, "dotM2", packagedArtifacts.value, Seq(), ivyLoggingLevel.value),
publishMavenStyle in MavenCompile := true,
publishLocal in MavenCompile := publishTask(publishLocalConfiguration in MavenCompile, deliverLocal).value,
publishLocalBoth := Seq(publishLocal in MavenCompile, publishLocal).dependOn.value,
javacOptions in (Compile, doc) ++= {
val versionParts = System.getProperty("java.version").split("[+.\\-]+", 3)
var major = versionParts(0).toInt
if (major == 1) major = versionParts(1).toInt
if (major >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty
},
javacJVMVersion := "1.8",
scalacJVMVersion := "1.8",
javacOptions in Compile ++= Seq(
"-encoding", "UTF-8",
"-source", javacJVMVersion.value
),
// This -target and Xlint:unchecked options cannot be set in the Compile configuration scope since
// `javadoc` doesn't play nicely with them; see https://github.com/sbt/sbt/issues/355#issuecomment-3817629
// for additional discussion and explanation.
javacOptions in (Compile, compile) ++= Seq(
"-target", javacJVMVersion.value,
"-Xlint:unchecked"
),
scalacOptions in Compile ++= Seq(
s"-target:jvm-${scalacJVMVersion.value}",
"-sourcepath", (baseDirectory in ThisBuild).value.getAbsolutePath // Required for relative source links in scaladoc
),
// Remove certain packages from Scaladoc
scalacOptions in (Compile, doc) := Seq(
"-groups",
"-skip-packages", Seq(
"org.apache.spark.api.python",
"org.apache.spark.network",
"org.apache.spark.deploy",
"org.apache.spark.util.collection"
).mkString(":"),
"-doc-title", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " ScalaDoc"
) ++ {
// Do not attempt to scaladoc javadoc comments under 2.12 since it can't handle inner classes
if (scalaBinaryVersion.value == "2.12") Seq("-no-java-comments") else Seq.empty
},
// Implements -Xfatal-warnings, ignoring deprecation warnings.
// Code snippet taken from https://issues.scala-lang.org/browse/SI-8410.
compile in Compile := {
val analysis = (compile in Compile).value
val out = streams.value
def logProblem(l: (=> String) => Unit, f: File, p: xsbti.Problem) = {
l(f.toString + ":" + p.position.line.fold("")(_ + ":") + " " + p.message)
l(p.position.lineContent)
l("")
}
var failed = 0
analysis.infos.allInfos.foreach { case (k, i) =>
i.reportedProblems foreach { p =>
val deprecation = p.message.contains("is deprecated")
if (!deprecation) {
failed = failed + 1
}
val printer: (=> String) => Unit = s => if (deprecation) {
out.log.warn(s)
} else {
out.log.error("[warn] " + s)
}
logProblem(printer, k, p)
}
}
if (failed > 0) {
sys.error(s"$failed fatal warnings")
}
analysis
}
)
def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = {
val existingSettings = projectsMap.getOrElse(projectRef.project, Seq[Setting[_]]())
projectsMap += (projectRef.project -> (existingSettings ++ settings))
}
// Note ordering of these settings matter.
/* Enable shared settings on all projects */
(allProjects ++ optionallyEnabledProjects ++ assemblyProjects ++ copyJarsProjects ++ Seq(spark, tools))
.foreach(enable(sharedSettings ++ DependencyOverrides.settings ++
ExcludedDependencies.settings ++ Checkstyle.settings))
/* Enable tests settings for all projects except examples, assembly and tools */
(allProjects ++ optionallyEnabledProjects).foreach(enable(TestSettings.settings))
val mimaProjects = allProjects.filterNot { x =>
Seq(
spark, hive, hiveThriftServer, catalyst, repl, networkCommon, networkShuffle, networkYarn,
unsafe, tags, sqlKafka010, kvstore, avro
).contains(x)
}
mimaProjects.foreach { x =>
enable(MimaBuild.mimaSettings(sparkHome, x))(x)
}
/* Generate and pick the spark build info from extra-resources */
enable(Core.settings)(core)
/* Unsafe settings */
enable(Unsafe.settings)(unsafe)
/*
* Set up tasks to copy dependencies during packaging. This step can be disabled in the command
* line, so that dev/mima can run without trying to copy these files again and potentially
* causing issues.
*/
if (!"false".equals(System.getProperty("copyDependencies"))) {
copyJarsProjects.foreach(enable(CopyDependencies.settings))
}
/* Enable Assembly for all assembly projects */
assemblyProjects.foreach(enable(Assembly.settings))
/* Package pyspark artifacts in a separate zip file for YARN. */
enable(PySparkAssembly.settings)(assembly)
/* Enable unidoc only for the root spark project */
enable(Unidoc.settings)(spark)
/* Catalyst ANTLR generation settings */
enable(Catalyst.settings)(catalyst)
/* Spark SQL Core console settings */
enable(SQL.settings)(sql)
/* Hive console settings */
enable(Hive.settings)(hive)
enable(Flume.settings)(streamingFlumeSink)
// SPARK-14738 - Remove docker tests from main Spark build
// enable(DockerIntegrationTests.settings)(dockerIntegrationTests)
/**
* Adds the ability to run the spark shell directly from SBT without building an assembly
* jar.
*
* Usage: `build/sbt sparkShell`
*/
val sparkShell = taskKey[Unit]("start a spark-shell.")
val sparkPackage = inputKey[Unit](
s"""
|Download and run a spark package.
|Usage `builds/sbt "sparkPackage <group:artifact:version> <MainClass> [args]
""".stripMargin)
val sparkSql = taskKey[Unit]("starts the spark sql CLI.")
enable(Seq(
connectInput in run := true,
fork := true,
outputStrategy in run := Some (StdoutOutput),
javaOptions += "-Xmx2g",
sparkShell := {
(runMain in Compile).toTask(" org.apache.spark.repl.Main -usejavacp").value
},
sparkPackage := {
import complete.DefaultParsers._
val packages :: className :: otherArgs = spaceDelimited("<group:artifact:version> <MainClass> [args]").parsed.toList
val scalaRun = (runner in run).value
val classpath = (fullClasspath in Runtime).value
val args = Seq("--packages", packages, "--class", className, (Keys.`package` in Compile in LocalProject("core"))
.value.getCanonicalPath) ++ otherArgs
println(args)
scalaRun.run("org.apache.spark.deploy.SparkSubmit", classpath.map(_.data), args, streams.value.log)
},
javaOptions in Compile += "-Dspark.master=local",
sparkSql := {
(runMain in Compile).toTask(" org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver").value
}
))(assembly)
enable(Seq(sparkShell := sparkShell in LocalProject("assembly")))(spark)
// TODO: move this to its upstream project.
override def projectDefinitions(baseDirectory: File): Seq[Project] = {
super.projectDefinitions(baseDirectory).map { x =>
if (projectsMap.exists(_._1 == x.id)) x.settings(projectsMap(x.id): _*)
else x.settings(Seq[Setting[_]](): _*)
} ++ Seq[Project](OldDeps.project)
}
}
object Core {
lazy val settings = Seq(
resourceGenerators in Compile += Def.task {
val buildScript = baseDirectory.value + "/../build/spark-build-info"
val targetDir = baseDirectory.value + "/target/extra-resources/"
val command = Seq("bash", buildScript, targetDir, version.value)
Process(command).!!
val propsFile = baseDirectory.value / "target" / "extra-resources" / "spark-version-info.properties"
Seq(propsFile)
}.taskValue
)
}
object Unsafe {
lazy val settings = Seq(
// This option is needed to suppress warnings from sun.misc.Unsafe usage
javacOptions in Compile += "-XDignore.symbol.file"
)
}
object Flume {
lazy val settings = sbtavro.SbtAvro.avroSettings
}
object DockerIntegrationTests {
// This serves to override the override specified in DependencyOverrides:
lazy val settings = Seq(
dependencyOverrides += "com.google.guava" % "guava" % "18.0",
resolvers += "DB2" at "https://app.camunda.com/nexus/content/repositories/public/",
libraryDependencies += "com.oracle" % "ojdbc6" % "11.2.0.1.0" from "https://app.camunda.com/nexus/content/repositories/public/com/oracle/ojdbc6/11.2.0.1.0/ojdbc6-11.2.0.1.0.jar" // scalastyle:ignore
)
}
/**
* Overrides to work around sbt's dependency resolution being different from Maven's.
*/
object DependencyOverrides {
lazy val settings = Seq(
dependencyOverrides += "com.google.guava" % "guava" % "14.0.1",
dependencyOverrides += "jline" % "jline" % "2.14.3")
}
/**
* This excludes library dependencies in sbt, which are specified in maven but are
* not needed by sbt build.
*/
object ExcludedDependencies {
lazy val settings = Seq(
libraryDependencies ~= { libs => libs.filterNot(_.name == "groovy-all") }
)
}
/**
* Project to pull previous artifacts of Spark for generating Mima excludes.
*/
object OldDeps {
lazy val project = Project("oldDeps", file("dev"), settings = oldDepsSettings)
lazy val allPreviousArtifactKeys = Def.settingDyn[Seq[Set[ModuleID]]] {
SparkBuild.mimaProjects
.map { project => MimaKeys.mimaPreviousArtifacts in project }
.map(k => Def.setting(k.value))
.join
}
def oldDepsSettings() = Defaults.coreDefaultSettings ++ Seq(
name := "old-deps",
libraryDependencies := allPreviousArtifactKeys.value.flatten
)
}
object Catalyst {
lazy val settings = antlr4Settings ++ Seq(
antlr4Version in Antlr4 := "4.7",
antlr4PackageName in Antlr4 := Some("org.apache.spark.sql.catalyst.parser"),
antlr4GenListener in Antlr4 := true,
antlr4GenVisitor in Antlr4 := true
)
}
object SQL {
lazy val settings = Seq(
initialCommands in console :=
"""
|import org.apache.spark.SparkContext
|import org.apache.spark.sql.SQLContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
|import org.apache.spark.sql.catalyst.expressions._
|import org.apache.spark.sql.catalyst.plans.logical._
|import org.apache.spark.sql.catalyst.rules._
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.types._
|
|val sc = new SparkContext("local[*]", "dev-shell")
|val sqlContext = new SQLContext(sc)
|import sqlContext.implicits._
|import sqlContext._
""".stripMargin,
cleanupCommands in console := "sc.stop()"
)
}
object Hive {
lazy val settings = Seq(
// Specially disable assertions since some Hive tests fail them
javaOptions in Test := (javaOptions in Test).value.filterNot(_ == "-ea"),
// Supporting all SerDes requires us to depend on deprecated APIs, so we turn off the warnings
// only for this subproject.
scalacOptions := (scalacOptions map { currentOpts: Seq[String] =>
currentOpts.filterNot(_ == "-deprecation")
}).value,
initialCommands in console :=
"""
|import org.apache.spark.SparkContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
|import org.apache.spark.sql.catalyst.expressions._
|import org.apache.spark.sql.catalyst.plans.logical._
|import org.apache.spark.sql.catalyst.rules._
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.hive._
|import org.apache.spark.sql.hive.test.TestHive._
|import org.apache.spark.sql.hive.test.TestHive.implicits._
|import org.apache.spark.sql.types._""".stripMargin,
cleanupCommands in console := "sparkContext.stop()",
// Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce
// in order to generate golden files. This is only required for developers who are adding new
// new query tests.
fullClasspath in Test := (fullClasspath in Test).value.filterNot { f => f.toString.contains("jcl-over") }
)
}
object Assembly {
import sbtassembly.AssemblyUtils._
import sbtassembly.Plugin._
import AssemblyKeys._
val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.")
lazy val settings = assemblySettings ++ Seq(
test in assembly := {},
hadoopVersion := {
sys.props.get("hadoop.version")
.getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String])
},
jarName in assembly := {
if (moduleName.value.contains("streaming-flume-assembly")
|| moduleName.value.contains("streaming-kafka-0-8-assembly")
|| moduleName.value.contains("streaming-kafka-0-10-assembly")
|| moduleName.value.contains("streaming-kinesis-asl-assembly")) {
// This must match the same name used in maven (see external/kafka-0-8-assembly/pom.xml)
s"${moduleName.value}-${version.value}.jar"
} else {
s"${moduleName.value}-${version.value}-hadoop${hadoopVersion.value}.jar"
}
},
jarName in (Test, assembly) := s"${moduleName.value}-test-${version.value}.jar",
mergeStrategy in assembly := {
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
case m if m.toLowerCase.matches("meta-inf.*\\.sf$") => MergeStrategy.discard
case "log4j.properties" => MergeStrategy.discard
case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines
case "reference.conf" => MergeStrategy.concat
case _ => MergeStrategy.first
}
)
}
object PySparkAssembly {
import sbtassembly.Plugin._
import AssemblyKeys._
import java.util.zip.{ZipOutputStream, ZipEntry}
lazy val settings = Seq(
// Use a resource generator to copy all .py files from python/pyspark into a managed directory
// to be included in the assembly. We can't just add "python/" to the assembly's resource dir
// list since that will copy unneeded / unwanted files.
resourceGenerators in Compile += Def.macroValueI(resourceManaged in Compile map { outDir: File =>
val src = new File(BuildCommons.sparkHome, "python/pyspark")
val zipFile = new File(BuildCommons.sparkHome , "python/lib/pyspark.zip")
zipFile.delete()
zipRecursive(src, zipFile)
Seq.empty[File]
}).value
)
private def zipRecursive(source: File, destZipFile: File) = {
val destOutput = new ZipOutputStream(new FileOutputStream(destZipFile))
addFilesToZipStream("", source, destOutput)
destOutput.flush()
destOutput.close()
}
private def addFilesToZipStream(parent: String, source: File, output: ZipOutputStream): Unit = {
if (source.isDirectory()) {
output.putNextEntry(new ZipEntry(parent + source.getName()))
for (file <- source.listFiles()) {
addFilesToZipStream(parent + source.getName() + File.separator, file, output)
}
} else {
val in = new FileInputStream(source)
output.putNextEntry(new ZipEntry(parent + source.getName()))
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
output.write(buf, 0, n)
}
}
output.closeEntry()
in.close()
}
}
}
object Unidoc {
import BuildCommons._
import sbtunidoc.Plugin._
import UnidocKeys._
private def ignoreUndocumentedPackages(packages: Seq[Seq[File]]): Seq[Seq[File]] = {
packages
.map(_.filterNot(_.getName.contains("$")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/deploy")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/examples")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/memory")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/network")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/shuffle")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/executor")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/unsafe")))
.map(_.filterNot(_.getCanonicalPath.contains("python")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/collection")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive/test")))
}
private def ignoreClasspaths(classpaths: Seq[Classpath]): Seq[Classpath] = {
classpaths
.map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka-clients-0\.10.*""")))
.map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka_2\..*-0\.10.*""")))
}
val unidocSourceBase = settingKey[String]("Base URL of source links in Scaladoc.")
lazy val settings = scalaJavaUnidocSettings ++ Seq (
publish := {},
unidocProjectFilter in(ScalaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, kubernetes,
yarn, tags, streamingKafka010, sqlKafka010, avro),
unidocProjectFilter in(JavaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, kubernetes,
yarn, tags, streamingKafka010, sqlKafka010, avro),
unidocAllClasspaths in (ScalaUnidoc, unidoc) := {
ignoreClasspaths((unidocAllClasspaths in (ScalaUnidoc, unidoc)).value)
},
unidocAllClasspaths in (JavaUnidoc, unidoc) := {
ignoreClasspaths((unidocAllClasspaths in (JavaUnidoc, unidoc)).value)
},
// Skip actual catalyst, but include the subproject.
// Catalyst is not public API and contains quasiquotes which break scaladoc.
unidocAllSources in (ScalaUnidoc, unidoc) := {
ignoreUndocumentedPackages((unidocAllSources in (ScalaUnidoc, unidoc)).value)
},
// Skip class names containing $ and some internal packages in Javadocs
unidocAllSources in (JavaUnidoc, unidoc) := {
ignoreUndocumentedPackages((unidocAllSources in (JavaUnidoc, unidoc)).value)
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/hadoop")))
},
javacOptions in (JavaUnidoc, unidoc) := Seq(
"-windowtitle", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " JavaDoc",
"-public",
"-noqualifier", "java.lang",
"-tag", """example:a:Example\:""",
"-tag", """note:a:Note\:""",
"-tag", "group:X",
"-tag", "tparam:X",
"-tag", "constructor:X",
"-tag", "todo:X",
"-tag", "groupname:X"
),
// Use GitHub repository for Scaladoc source links
unidocSourceBase := s"https://github.com/apache/spark/tree/v${version.value}",
scalacOptions in (ScalaUnidoc, unidoc) ++= Seq(
"-groups", // Group similar methods together based on the @group annotation.
"-skip-packages", "org.apache.hadoop",
"-sourcepath", (baseDirectory in ThisBuild).value.getAbsolutePath
) ++ (
// Add links to sources when generating Scaladoc for a non-snapshot release
if (!isSnapshot.value) {
Opts.doc.sourceUrl(unidocSourceBase.value + "€{FILE_PATH}.scala")
} else {
Seq()
}
)
)
}
object Checkstyle {
lazy val settings = Seq(
checkstyleSeverityLevel := Some(CheckstyleSeverityLevel.Error),
javaSource in (Compile, checkstyle) := baseDirectory.value / "src/main/java",
javaSource in (Test, checkstyle) := baseDirectory.value / "src/test/java",
checkstyleConfigLocation := CheckstyleConfigLocation.File("dev/checkstyle.xml"),
checkstyleOutputFile := baseDirectory.value / "target/checkstyle-output.xml",
checkstyleOutputFile in Test := baseDirectory.value / "target/checkstyle-output.xml"
)
}
object CopyDependencies {
val copyDeps = TaskKey[Unit]("copyDeps", "Copies needed dependencies to the build directory.")
val destPath = (crossTarget in Compile) { _ / "jars"}
lazy val settings = Seq(
copyDeps := {
val dest = destPath.value
if (!dest.isDirectory() && !dest.mkdirs()) {
throw new IOException("Failed to create jars directory.")
}
(dependencyClasspath in Compile).value.map(_.data)
.filter { jar => jar.isFile() }
.foreach { jar =>
val destJar = new File(dest, jar.getName())
if (destJar.isFile()) {
destJar.delete()
}
Files.copy(jar.toPath(), destJar.toPath())
}
},
crossTarget in (Compile, packageBin) := destPath.value,
packageBin in Compile := (packageBin in Compile).dependsOn(copyDeps).value
)
}
object TestSettings {
import BuildCommons._
private val scalaBinaryVersion =
if (System.getProperty("scala-2.12") == "true") {
"2.12"
} else {
"2.11"
}
lazy val settings = Seq (
// Fork new JVMs for tests and set Java options for those
fork := true,
// Setting SPARK_DIST_CLASSPATH is a simple way to make sure any child processes
// launched by the tests have access to the correct test-time classpath.
envVars in Test ++= Map(
"SPARK_DIST_CLASSPATH" ->
(fullClasspath in Test).value.files.map(_.getAbsolutePath)
.mkString(File.pathSeparator).stripSuffix(File.pathSeparator),
"SPARK_PREPEND_CLASSES" -> "1",
"SPARK_SCALA_VERSION" -> scalaBinaryVersion,
"SPARK_TESTING" -> "1",
"JAVA_HOME" -> sys.env.get("JAVA_HOME").getOrElse(sys.props("java.home"))),
javaOptions in Test += s"-Djava.io.tmpdir=$testTempDir",
javaOptions in Test += "-Dspark.test.home=" + sparkHome,
javaOptions in Test += "-Dspark.testing=1",
javaOptions in Test += "-Dspark.port.maxRetries=100",
javaOptions in Test += "-Dspark.master.rest.enabled=false",
javaOptions in Test += "-Dspark.memory.debugFill=true",
javaOptions in Test += "-Dspark.ui.enabled=false",
javaOptions in Test += "-Dspark.ui.showConsoleProgress=false",
javaOptions in Test += "-Dspark.unsafe.exceptionOnMemoryLeak=true",
javaOptions in Test += "-Dsun.io.serialization.extendedDebugInfo=false",
javaOptions in Test += "-Dderby.system.durability=test",
javaOptions in Test ++= System.getProperties.asScala.filter(_._1.startsWith("spark"))
.map { case (k,v) => s"-D$k=$v" }.toSeq,
javaOptions in Test += "-ea",
javaOptions in Test ++= "-Xmx3g -Xss4m"
.split(" ").toSeq,
javaOptions += "-Xmx3g",
// Exclude tags defined in a system property
testOptions in Test += Tests.Argument(TestFrameworks.ScalaTest,
sys.props.get("test.exclude.tags").map { tags =>
tags.split(",").flatMap { tag => Seq("-l", tag) }.toSeq
}.getOrElse(Nil): _*),
testOptions in Test += Tests.Argument(TestFrameworks.JUnit,
sys.props.get("test.exclude.tags").map { tags =>
Seq("--exclude-categories=" + tags)
}.getOrElse(Nil): _*),
// Show full stack trace and duration in test cases.
testOptions in Test += Tests.Argument("-oDF"),
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"),
// Enable Junit testing.
libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % "test",
// Only allow one test at a time, even across projects, since they run in the same JVM
parallelExecution in Test := false,
// Make sure the test temp directory exists.
resourceGenerators in Test += Def.macroValueI(resourceManaged in Test map { outDir: File =>
var dir = new File(testTempDir)
if (!dir.isDirectory()) {
// Because File.mkdirs() can fail if multiple callers are trying to create the same
// parent directory, this code tries to create parents one at a time, and avoids
// failures when the directories have been created by somebody else.
val stack = new Stack[File]()
while (!dir.isDirectory()) {
stack.push(dir)
dir = dir.getParentFile()
}
while (stack.nonEmpty) {
val d = stack.pop()
require(d.mkdir() || d.isDirectory(), s"Failed to create directory $d")
}
}
Seq.empty[File]
}).value,
concurrentRestrictions in Global += Tags.limit(Tags.Test, 1)
)
}
|
tejasapatil/spark
|
project/SparkBuild.scala
|
Scala
|
apache-2.0
| 34,244
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql._
import org.apache.spark.sql.internal.SQLConf.{PARTITION_OVERWRITE_MODE, PartitionOverwriteMode}
import org.apache.spark.sql.test.SharedSparkSession
/**
* A collection of "INSERT INTO" tests that can be run through the SQL or DataFrameWriter APIs.
* Extending test suites can implement the `doInsert` method to run the insert through either
* API.
*
* @param supportsDynamicOverwrite Whether the Table implementations used in the test suite support
* dynamic partition overwrites. If they do, we will check for the
* success of the operations. If not, then we will check that we
* failed with the right error message.
* @param includeSQLOnlyTests Certain INSERT INTO behavior can be achieved purely through SQL, e.g.
* static or dynamic partition overwrites. This flag should be set to
* true if we would like to test these cases.
*/
abstract class InsertIntoTests(
override protected val supportsDynamicOverwrite: Boolean,
override protected val includeSQLOnlyTests: Boolean) extends InsertIntoSQLOnlyTests {
import testImplicits._
/**
* Insert data into a table using the insertInto statement. Implementations can be in SQL
* ("INSERT") or using the DataFrameWriter (`df.write.insertInto`).
*/
protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode = null): Unit
test("insertInto: append") {
val t1 = s"${catalogAndNamespace}tbl"
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
doInsert(t1, df)
verifyTable(t1, df)
}
test("insertInto: append by position") {
val t1 = s"${catalogAndNamespace}tbl"
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
val dfr = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("data", "id")
doInsert(t1, dfr)
verifyTable(t1, df)
}
test("insertInto: append partitioned table") {
val t1 = s"${catalogAndNamespace}tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
doInsert(t1, df)
verifyTable(t1, df)
}
}
test("insertInto: overwrite non-partitioned table") {
val t1 = s"${catalogAndNamespace}tbl"
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
val df2 = Seq((4L, "d"), (5L, "e"), (6L, "f")).toDF("id", "data")
doInsert(t1, df)
doInsert(t1, df2, SaveMode.Overwrite)
verifyTable(t1, df2)
}
test("insertInto: overwrite partitioned table in static mode") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
val init = Seq((2L, "dummy"), (4L, "keep")).toDF("id", "data")
doInsert(t1, init)
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
doInsert(t1, df, SaveMode.Overwrite)
verifyTable(t1, df)
}
}
test("insertInto: overwrite partitioned table in static mode by position") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
val init = Seq((2L, "dummy"), (4L, "keep")).toDF("id", "data")
doInsert(t1, init)
val dfr = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("data", "id")
doInsert(t1, dfr, SaveMode.Overwrite)
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
verifyTable(t1, df)
}
}
}
test("insertInto: fails when missing a column") {
val t1 = s"${catalogAndNamespace}tbl"
sql(s"CREATE TABLE $t1 (id bigint, data string, missing string) USING $v2Format")
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
val exc = intercept[AnalysisException] {
doInsert(t1, df)
}
verifyTable(t1, Seq.empty[(Long, String, String)].toDF("id", "data", "missing"))
val tableName = if (catalogAndNamespace.isEmpty) s"default.$t1" else t1
assert(exc.getMessage.contains(s"Cannot write to '$tableName', not enough data columns"))
}
test("insertInto: fails when an extra column is present") {
val t1 = s"${catalogAndNamespace}tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
val df = Seq((1L, "a", "mango")).toDF("id", "data", "fruit")
val exc = intercept[AnalysisException] {
doInsert(t1, df)
}
verifyTable(t1, Seq.empty[(Long, String)].toDF("id", "data"))
val tableName = if (catalogAndNamespace.isEmpty) s"default.$t1" else t1
assert(exc.getMessage.contains(s"Cannot write to '$tableName', too many data columns"))
}
}
dynamicOverwriteTest("insertInto: overwrite partitioned table in dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
val init = Seq((2L, "dummy"), (4L, "keep")).toDF("id", "data")
doInsert(t1, init)
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
doInsert(t1, df, SaveMode.Overwrite)
verifyTable(t1, df.union(sql("SELECT 4L, 'keep'")))
}
}
dynamicOverwriteTest("insertInto: overwrite partitioned table in dynamic mode by position") {
val t1 = s"${catalogAndNamespace}tbl"
withTable(t1) {
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
val init = Seq((2L, "dummy"), (4L, "keep")).toDF("id", "data")
doInsert(t1, init)
val dfr = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("data", "id")
doInsert(t1, dfr, SaveMode.Overwrite)
val df = Seq((1L, "a"), (2L, "b"), (3L, "c"), (4L, "keep")).toDF("id", "data")
verifyTable(t1, df)
}
}
}
trait InsertIntoSQLOnlyTests
extends QueryTest
with SharedSparkSession
with BeforeAndAfter {
import testImplicits._
/** Check that the results in `tableName` match the `expected` DataFrame. */
protected def verifyTable(tableName: String, expected: DataFrame): Unit
protected val v2Format: String
protected val catalogAndNamespace: String
/**
* Whether dynamic partition overwrites are supported by the `Table` definitions used in the
* test suites. Tables that leverage the V1 Write interface do not support dynamic partition
* overwrites.
*/
protected val supportsDynamicOverwrite: Boolean
/** Whether to include the SQL specific tests in this trait within the extending test suite. */
protected val includeSQLOnlyTests: Boolean
private def withTableAndData(tableName: String)(testFn: String => Unit): Unit = {
withTable(tableName) {
val viewName = "tmp_view"
val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data")
df.createOrReplaceTempView(viewName)
withTempView(viewName) {
testFn(viewName)
}
}
}
protected def dynamicOverwriteTest(testName: String)(f: => Unit): Unit = {
test(testName) {
try {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.DYNAMIC.toString) {
f
}
if (!supportsDynamicOverwrite) {
fail("Expected failure from test, because the table doesn't support dynamic overwrites")
}
} catch {
case a: AnalysisException if !supportsDynamicOverwrite =>
assert(a.getMessage.contains("does not support dynamic overwrite"))
}
}
}
if (includeSQLOnlyTests) {
test("InsertInto: when the table doesn't exist") {
val t1 = s"${catalogAndNamespace}tbl"
val t2 = s"${catalogAndNamespace}tbl2"
withTableAndData(t1) { _ =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
val e = intercept[AnalysisException] {
sql(s"INSERT INTO $t2 VALUES (2L, 'dummy')")
}
assert(e.getMessage.contains(t2))
assert(e.getMessage.contains("Table not found"))
}
}
test("InsertInto: append to partitioned table - static clause") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
sql(s"INSERT INTO $t1 PARTITION (id = 23) SELECT data FROM $view")
verifyTable(t1, sql(s"SELECT 23, data FROM $view"))
}
}
test("InsertInto: static PARTITION clause fails with non-partition column") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (data)")
val exc = intercept[AnalysisException] {
sql(s"INSERT INTO TABLE $t1 PARTITION (id=1) SELECT data FROM $view")
}
verifyTable(t1, spark.emptyDataFrame)
assert(exc.getMessage.contains(
"PARTITION clause cannot contain a non-partition column name"))
assert(exc.getMessage.contains("id"))
}
}
test("InsertInto: dynamic PARTITION clause fails with non-partition column") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
val exc = intercept[AnalysisException] {
sql(s"INSERT INTO TABLE $t1 PARTITION (data) SELECT * FROM $view")
}
verifyTable(t1, spark.emptyDataFrame)
assert(exc.getMessage.contains(
"PARTITION clause cannot contain a non-partition column name"))
assert(exc.getMessage.contains("data"))
}
}
test("InsertInto: overwrite - dynamic clause - static mode") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy'), (4L, 'also-deleted')")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (id) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a"),
(2, "b"),
(3, "c")).toDF())
}
}
}
dynamicOverwriteTest("InsertInto: overwrite - dynamic clause - dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy'), (4L, 'keep')")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (id) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a"),
(2, "b"),
(3, "c"),
(4, "keep")).toDF("id", "data"))
}
}
test("InsertInto: overwrite - missing clause - static mode") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy'), (4L, 'also-deleted')")
sql(s"INSERT OVERWRITE TABLE $t1 SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a"),
(2, "b"),
(3, "c")).toDF("id", "data"))
}
}
}
dynamicOverwriteTest("InsertInto: overwrite - missing clause - dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format PARTITIONED BY (id)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy'), (4L, 'keep')")
sql(s"INSERT OVERWRITE TABLE $t1 SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a"),
(2, "b"),
(3, "c"),
(4, "keep")).toDF("id", "data"))
}
}
test("InsertInto: overwrite - static clause") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p1 int) " +
s"USING $v2Format PARTITIONED BY (p1)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 23), (4L, 'keep', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (p1 = 23) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 23),
(2, "b", 23),
(3, "c", 23),
(4, "keep", 2)).toDF("id", "data", "p1"))
}
}
test("InsertInto: overwrite - mixed clause - static mode") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'also-deleted', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (id, p = 2) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 2),
(2, "b", 2),
(3, "c", 2)).toDF("id", "data", "p"))
}
}
}
test("InsertInto: overwrite - mixed clause reordered - static mode") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'also-deleted', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (p = 2, id) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 2),
(2, "b", 2),
(3, "c", 2)).toDF("id", "data", "p"))
}
}
}
test("InsertInto: overwrite - implicit dynamic partition - static mode") {
withSQLConf(PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.STATIC.toString) {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'also-deleted', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (p = 2) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 2),
(2, "b", 2),
(3, "c", 2)).toDF("id", "data", "p"))
}
}
}
dynamicOverwriteTest("InsertInto: overwrite - mixed clause - dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'keep', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (p = 2, id) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 2),
(2, "b", 2),
(3, "c", 2),
(4, "keep", 2)).toDF("id", "data", "p"))
}
}
dynamicOverwriteTest("InsertInto: overwrite - mixed clause reordered - dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'keep', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (id, p = 2) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 2),
(2, "b", 2),
(3, "c", 2),
(4, "keep", 2)).toDF("id", "data", "p"))
}
}
dynamicOverwriteTest("InsertInto: overwrite - implicit dynamic partition - dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'keep', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (p = 2) SELECT * FROM $view")
verifyTable(t1, Seq(
(1, "a", 2),
(2, "b", 2),
(3, "c", 2),
(4, "keep", 2)).toDF("id", "data", "p"))
}
}
dynamicOverwriteTest("InsertInto: overwrite - multiple static partitions - dynamic mode") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string, p int) " +
s"USING $v2Format PARTITIONED BY (id, p)")
sql(s"INSERT INTO $t1 VALUES (2L, 'dummy', 2), (4L, 'keep', 2)")
sql(s"INSERT OVERWRITE TABLE $t1 PARTITION (id = 2, p = 2) SELECT data FROM $view")
verifyTable(t1, Seq(
(2, "a", 2),
(2, "b", 2),
(2, "c", 2),
(4, "keep", 2)).toDF("id", "data", "p"))
}
}
test("do not double insert on INSERT INTO collect()") {
val t1 = s"${catalogAndNamespace}tbl"
withTableAndData(t1) { view =>
sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
val df = sql(s"INSERT INTO TABLE $t1 SELECT * FROM $view")
df.collect()
df.take(5)
df.tail(5)
df.where("true").collect()
df.where("true").take(5)
df.where("true").tail(5)
verifyTable(t1, spark.table(view))
}
}
}
}
|
witgo/spark
|
sql/core/src/test/scala/org/apache/spark/sql/connector/InsertIntoTests.scala
|
Scala
|
apache-2.0
| 19,019
|
package webdoc
import java.io.{InputStream, EOFException}
import scala.collection.immutable.{ListMap}
/**
* Object that tokenizes webdoc input
*/
case class Lexer(in:InputStream)
{
// Converts in to a String
private var text:String = ""
// Constants
private val escapeChar = '\\\\'
private val whitespaces: Array[Char] = Array(' ', '\\t', '\\n')
// State
private var pos: Int = 0 // Current character index.
// Types
private type Assignment = (String, Any)
private type Attempt = ()=>Any
// ---------- Token reading methods ----------------
/**
* Reads next byte as a character.
*/
private def readChar:Char =
{
// Checks index
if(pos >= text.length)
throw ParserException("Reached end of file.", currentLocation)
// Reads character
val c = text.charAt(pos)
pos += 1
// Returns.
c
}
/**
* Reads next byte as a character without advancing
* internal pointer.
*/
private def peekChar:Char =
{
// Checks index
if(pos >= text.length)
throw ParserException("Reached end of file.", currentLocation)
// Reads character
text.charAt(pos)
}
private def atEOF:Boolean = pos == text.length
/**
* Skips text past whitespace
* @return true if reached end of file.
*/
private def skipWhitespace():Boolean =
{
// Advances until past whitespace
if(atEOF) true
else
{
val c = readChar
if(!c.isWhitespace)
{
pos -= 1
false
}
else
{
skipWhitespace()
}
}
}
/**
* @return true if c is a whitespace.
*/
private def isWhitespace(b:Int):Boolean =
{
var i = 0
while(i < whitespaces.length)
{
if(b == whitespaces(i))
return true
i += 1
}
false
}
/**
* Determines if character is alphabetic
*/
private def isAlpha(c:Int):Boolean =
(c >= 65 && c <= 90) || (c >= 97 && c <= 122)
/**
* Determines if character is numeric
*/
private def isNum(c:Int):Boolean =
(c >= 48 && c <= 57)
/**
* determines if character is alpha-numeric
*/
private def isAlphaNum(c:Int):Boolean =
isAlpha(c) || isNum(c)
/**
* Determines if C is the start of a Block.
*/
private def isBlockChar(c:Int):Boolean =
c == '\\'' || c == '"' ||c == '{' || c == '('
/**
* @param cseq CharacterSequence in question
* @param index Character in cseq to check for escaping status.
* @return true if character specified is escaped.
*/
private def isEscaped(cseq:CharSequence, index:Int):Boolean = index match
{
case 0 => false
case _ => cseq.charAt(index-1) == '\\\\' && !isEscaped(cseq, index-1)
}
/**
* Reads contents of File into a String until a double quote is found.
* Assumes already advanced passed starting quote.
*/
private def readQuotedString():String =
{
// Initializes
val builder = new StringBuilder()
var c = ' '
var done = false
// Reads until end quote.
do
{
c = readChar
c match
{
case '"' => done = true
case '\\\\' => builder.append(readChar)
case _ => builder.append(c)
}
}
while(!done)
// Returns result
builder.toString
}
/**
* Attempts to run all functions in the sequence.
* Stops at first one that succeeds, and returns result.
* @throws Exception if the last function fails.
*/
private def attemptAll(attempts:Seq[Attempt], index:Int=0):Any =
{
val fun:()=>Any = attempts(index)
if(index == attempts.length-1)
fun()
else
{
// Tries to invoke current attempt
val savePos:Int = pos
try
{
fun()
}
// Recovers and tries the next
catch
{
case t:Throwable =>
pos = savePos
attemptAll(attempts, index+1)
}
}
}
/**
* Reads a number as a Double.
*/
private def readNum():Double =
{
// Prepares
val builder = new StringBuilder()
var c = peekChar
// Buffers characters
while(!isWhitespace(c) && c != ')' && c != ']')
{
// Checks that character is numeric
if(!isNum(c) && c != '.')
throwUnexpected
// Continues to read
pos += 1
builder.append(c)
c = peekChar
}
// Parses and returns result
val str = builder.toString
try
{
str.toDouble
}
catch
{
case t:Throwable => throw ParserException("Could not parse decimal number", currentLocation)
}
}
/**
* Reads a value as a Boolean.
*/
private def readBool():Boolean =
{
val str:String = readAlphaNumString
try
{
str.toBoolean
}
catch
{
case t:Throwable => throw ParserException("Could not parse bool", currentLocation)
}
}
/**
* Reads into a String until a whitespace character is hit.
*/
private def readAlphaNumString():String =
{
// Allocates and reads first
val builder = new StringBuilder()
var c:Char = peekChar
if(!isAlpha(c))
throwUnexpected
// Continues reading until whitespace character is reached
while(isAlphaNum(c))
{
// Adds to buffer
pos += 1
builder.append(c)
c = peekChar
}
// Returns contents of buffer
builder.toString
}
// ------------------- READER METHODS -------------------------
/**
* Possibly reads element
*/
def parse():Element =
{
// Buffers contents of InputStream into a String
text =
{
val builder = new StringBuilder
var b:Int = in.read
while(b != -1)
{
builder.append(b.toChar)
b = in.read
}
builder.append('\\n')
builder.toString
}
// Reads Element
readElement
}
/**
* Current location in the file (line/column)
*/
private def currentLocation:(Int, Int) =
{
var i = 0
var line = 1
var column = 1
while(i < pos)
{
var c = text.charAt(i)
if(c == '\\n')
{
line += 1
column = 0
}
column += 1
i += 1
}
// Returns result
(line, column)
}
// Stub
private def currentLine = currentLocation._1
// Stub
private def currentColumn = currentLocation._2
/**
* Reads an assignment to an element.
*/
private def readAssignment():Assignment =
{
// Skips whitespace
skipWhitespace()
// Reads name
val key:String = readAlphaNumString
// Reads value
val value:Any = readValue
// Returns
(key, value)
}
/**
* Reads a sequence of assignments
* @param assignments Current seq of assignments created.
* Defaults to empty Seq.
*/
private def readAssignments(assignments:Seq[Assignment] = Seq.empty):Seq[Assignment] =
{
// Skips whitespace and reads character
skipWhitespace()
var c = peekChar
// Handles content
if(isAlpha(c))
{
// Key
val key = readAlphaNumString
// Expected equals symbol
skipWhitespace()
val nextC:Char = peekChar
if(nextC != '=')
throwUnexpected()
pos += 1
// Value
val value:Any = readValue
val assignment:Assignment = (key, value)
// Returns recursive result
val result = readAssignments(assignments :+ assignment)
result
}
// End of assignments
else if(c == ')')
{
// Advances
pos += 1
// Attempts to read following array
val eof:Boolean = skipWhitespace()
if(!eof && peekChar == '[')
{
pos += 1
val children:Seq[Any] = readSequence()
assignments :+ ("children", children)
}
else
{
assignments
}
}
// Unexpected
else
{
throwUnexpected
Seq.empty
}
}
/**
* Reads a sequence of certain values
* @param seq Sequence accumulated. Empty by default.
* @return Sequence of elements.
*/
private def readSequence(seq:Seq[Any] = Seq.empty):Seq[Any] =
{
// Skips whitespace
skipWhitespace()
// Peeks at character
val c:Char = peekChar
c match
{
case ']' =>
pos += 1
seq.toArray.toSeq
case _ =>
val value:Any = readValue()
readSequence(seq :+ value)
}
}
/**
* Reads next content as an Element
*/
private def readElement():Element =
{
// Skips whitespace just in case
skipWhitespace()
// Tries to buffer in name header
val builder = new StringBuilder()
var c:Char = peekChar
// Only looks for name at start if first character is not the beginning of the object.
if(c != '(')
{
if(!isAlpha(c))
throwUnexpected
// Reads the rest
while(isAlphaNum(c))
{
pos += 1
builder.append(c)
c = peekChar
}
// Skips whitespace
skipWhitespace()
}
// Checks if next character is valid
c = peekChar
if(c != '(')
throwUnexpected
pos += 1
// Stores name and reads assignments
val assignments:Seq[Assignment] = readAssignments()
val meta:Map[String, Any] = ListMap(assignments:_*)
val name:Any = meta.get("name") match
{
case Some(a:Any) => a
case None => builder.toString
}
// Builds RawElement
val start:Map[String, Any] = ListMap(("name", name))
RawElement(start ++ meta)
}
/**
* Reads some element
*/
private def readValue():Any =
{
// Skips whitespace and peeks at the next character
skipWhitespace()
val c:Char = peekChar
// Uses it to evaluate following text
val value:Any =
{
if(c == '"') // Start of a String
{
pos += 1
readQuotedString()
}
else if(c == '(') // Start of a list of assignments
{
pos += 1
readAssignments()
}
else if(c == '[') // Start of an array
{
pos += 1
readSequence()
}
else if(isAlpha(c)) // Start of a a boolean or an element
{
attemptAll(Seq(
() => readBool(),
() => readElement()
))
}
else if(isNum(c)) // Start of a number
{
readNum()
}
else // Unexpected
{
throwUnexpected
"Unreachable"
}
}
// Returns result
value
}
/**
* Throws an unexpected character exception for the previous character
*/
private def throwUnexpected()
{
throw UnexpectedCharacterException(text(pos), currentLocation)
}
}
|
Anti-Alias/WebDoc
|
src/main/scala/webdoc/Lexer.scala
|
Scala
|
mit
| 12,118
|
/**
* The MIT License (MIT)
* <p/>
* Copyright (c) 2016 ScalateKids
* <p/>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p/>
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* <p/>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* <p/>
* @author Scalatekids
* @version 1.0
* @since 1.0
*/
package com.actorbase.driver.data
import com.actorbase.driver.client.Connector
import com.actorbase.driver.ActorbaseDriver.Connection
import scala.collection.immutable.TreeMap
import scala.collection.generic.FilterMonadic
case class ActorbaseCollectionMap private
(var data: TreeMap[String, ActorbaseCollection])(implicit val conn: Connection, implicit val scheme: String = "http://") extends Connector {
/**
* Find keys inside the collection map
*
* @param keys a vararg of String
* @return ActorbaseCollection containing results of the query
*/
def find(keys: String*): ActorbaseCollection = {
var (coll, contr) = (new TreeMap[String, Any](), Map.empty[String, Boolean])
data map { collection =>
collection._2.find(keys:_*).foreach(kv => coll += (kv._1 -> kv._2))
contr ++= collection._2.contributors
}
ActorbaseCollection(conn.username, "findResults", contr, coll)(conn, scheme)
}
/**
* Drop collections from the collection map
*
* @param collections a vararg of String representing a sequence of collections
* @return no return value
*/
def drop(collections: String*): Unit = {
collections.foreach { collection =>
data get collection map (x => x.drop)
data -= collection
}
}
/**
* Count the number of collections
*/
def count: Int = data.size
/**
* Foreach method, applies a function f to all elements of this map.
*
* @param f the function that is applied for its side-effect to every element.
* The result of function f is discarded.
* @return no return value
*/
def foreach(f: ((String, ActorbaseCollection)) => Unit): Unit = data.foreach(f)
/**
* Creates a non-strict filter of this traversable collection.
*
* @param p the predicate used to test elements.
* @return an object of class WithFilter, which supports map, flatMap, foreach,
* and withFilter operations. All these operations apply to those elements of
* this traversable collection which satisfy the predicate p.
*/
def withFilter(f: ((String, ActorbaseCollection)) => Boolean): FilterMonadic[(String, ActorbaseCollection), TreeMap[String, ActorbaseCollection]] = data.withFilter(f)
/**
* Converts this collection to a string.
*
* @param
* @return a string representation of this collection. By default this string
* consists of a JSON containing the colleciton name, the owner and items
*/
override def toString: String = {
var ret = ""
data.foreach {
case (k, v) => ret += "\\n" + v.toString + "\\n"
}
ret
}
}
|
ScalateKids/Actorbase-Client
|
src/main/scala/com/actorbase/driver/data/ActorbaseCollectionMap.scala
|
Scala
|
mit
| 3,870
|
package com.ajjpj.adiagram_.model
import java.io.File
import javafx.stage.{Stage, FileChooser}
import scala.xml.XML
import com.ajjpj.adiagram_.ui.fw.{JavaFxHelper, Digest}
import javafx.stage.FileChooser.ExtensionFilter
import javafx.scene.control.Label
import com.ajjpj.adiagram_.ui.ADiagramController
/**
* @author arno
*/
object DiagramIO {
private var stages = Map[Stage, Option[ADiagramController]]()
def init(stage: Stage) {
val ctrl = Init.initEmptyStage(stage)
stages += (stage -> Some(ctrl))
stage.show()
}
def newDiagram() {
val stage = new Stage
val ctrl = Init.initEmptyStage(stage)
stages += (stage -> Some(ctrl))
stage.show()
}
def closeAll(): Boolean = {
stages.values.foreach(_ match {
case Some(ctrl) => ctrl.window.toFront(); if(! close(ctrl)(ctrl.digest)) return false
case None =>
})
true
}
def exit() {
if(closeAll()) {
System.exit(0)
}
}
/**
* @return true iff the diagram was actually closed
*/
def close(ctrl: ADiagramController)(implicit digest: Digest): Boolean = {
import JavaFxHelper._
def doClose(): Boolean = {
val stage = ctrl.window
if(stages.size == 1) {
stage.setScene(null)
val newCtrl = Init.initEmptyStage(stage)
stages += (stage -> Some(newCtrl))
}
else {
stage.close()
stages -= stage
}
true
}
if(ctrl.isDirty) {
val btnSave = ButtonSpec(text="Save", clickId="save", default=true)
val btnDiscard = ButtonSpec(text="Discard", clickId="discard")
val btnCancel = ButtonSpec(text="Cancel", clickId="cancel", cancel=true)
showSingleClickDialog(ctrl.window, "Unsaved Diagram", new Label("Unsaved diagram. How do you want to proceed?"), btnSave, btnDiscard, btnCancel) match {
case "save" => save(ctrl); doClose() //TODO handle 'cancel' during save operation
case "discard" => doClose()
case "cancel" => false
}
}
else {
doClose()
}
}
def open(ctrl: ADiagramController) {
val fileChooser = new FileChooser
fileChooser.setTitle("Open Diagram")
fileChooser.getExtensionFilters.add(extensionFilter)
//TODO set initial directory
val file = fileChooser.showOpenDialog(ctrl.window)
if(file != null) {
doOpen(file, ctrl)
}
}
def save(ctrl: ADiagramController)(implicit digest: Digest) {
ctrl.file match {
case Some(file) => doSave(file, ctrl)
case None => saveAs(ctrl)
}
}
private val extensionFilter = new ExtensionFilter("Diagram Files", "*.adiagram")
def saveAs(ctrl: ADiagramController)(implicit digest: Digest) {
val fileChooser = new FileChooser
fileChooser.setTitle("Save Diagram")
fileChooser.getExtensionFilters.add(extensionFilter)
//TODO set initial directory
//TODO set previous file name (?)
val fileRaw = fileChooser.showSaveDialog(ctrl.window)
if(fileRaw != null) {
val file = if(fileRaw.getName endsWith ".adiagram") fileRaw else new File(fileRaw.getParentFile, fileRaw.getName + ".adiagram")
if(JavaFxHelper.confirmOverwrite(file, ctrl.window)) doSave(file, ctrl)
}
}
private[model] def doOpen(file: File, ctrl: ADiagramController) {
val deser = new DiagramDeserializer(XML.loadFile(file))
if(ctrl.isPristine) {
val stage = ctrl.root.getScene.getWindow.asInstanceOf[Stage]
val newCtrl = Init.initStage(stage, deser.diagram, deser.styleRepository, deser.selectedStyles, file)
stages += (stage -> Some(newCtrl))
}
else {
val stage = new Stage()
val newCtrl = Init.initStage(stage, deser.diagram, deser.styleRepository, deser.selectedStyles, file)
stages += (stage -> Some(newCtrl))
stage.show()
}
}
private[model] def doSave(file: File, ctrl: ADiagramController)(implicit digest: Digest) {
XML.save(file.getPath,new DiagramSerializer(ctrl).toXml, xmlDecl=true, enc="UTF-8")
ctrl.file = Some(file)
digest.undoRedo.clear()
}
}
|
arnohaase/a-diagram
|
src/main/scala-old/com/ajjpj/adiagram_/model/DiagramIO.scala
|
Scala
|
apache-2.0
| 4,054
|
package models.street
import models.audit.AuditTaskTable
import models.region._
import models.utils.MyPostgresDriver.simple._
import play.api.Play.current
import scala.slick.lifted.ForeignKeyQuery
case class StreetEdgeRegion(streetEdgeId: Int, regionId: Int)
class StreetEdgeRegionTable(tag: Tag) extends Table[StreetEdgeRegion](tag, Some("sidewalk"), "street_edge_region") {
def streetEdgeId = column[Int]("street_edge_id")
def regionId = column[Int]("region_id")
def * = (streetEdgeId, regionId) <> ((StreetEdgeRegion.apply _).tupled, StreetEdgeRegion.unapply)
def streetEdge: ForeignKeyQuery[StreetEdgeTable, StreetEdge] =
foreignKey("street_edge_region_street_edge_id_fkey", streetEdgeId, TableQuery[StreetEdgeTable])(_.streetEdgeId)
def region: ForeignKeyQuery[RegionTable, Region] =
foreignKey("street_edge_region_region_id_fkey", regionId, TableQuery[RegionTable])(_.regionId)
}
object StreetEdgeRegionTable {
val db = play.api.db.slick.DB
val streetEdgeRegionTable = TableQuery[StreetEdgeRegionTable]
val nonDeletedStreetEdgeRegions = for {
_ser <- streetEdgeRegionTable
_se <- StreetEdgeTable.streetEdgesWithoutDeleted if _ser.streetEdgeId === _se.streetEdgeId
_r <- RegionTable.regionsWithoutDeleted if _ser.regionId === _r.regionId
} yield _ser
/**
* Get records based on the street edge id.
*
* @param streetEdgeId
* @return
*/
def selectByStreetEdgeId(streetEdgeId: Int): List[StreetEdgeRegion] = db.withSession { implicit session =>
streetEdgeRegionTable.filter(item => item.streetEdgeId === streetEdgeId).list
}
/**
* Get records based on the region id.
*
* @param regionId
* @return
*/
def selectNonDeletedByRegionId(regionId: Int): List[StreetEdgeRegion] = db.withSession { implicit session =>
nonDeletedStreetEdgeRegions.filter(item => item.regionId === regionId).list
}
/**
* Checks if every street in the region has an associated completed audit task.
*
* @param regionId
* @return
*/
def allStreetsInARegionAudited(regionId: Int): Boolean = db.withSession { implicit session =>
val edgesInRegion: Int = selectNonDeletedByRegionId(regionId).length
val edgesAuditedInRegion: Int = (for {
_edgeRegions <- nonDeletedStreetEdgeRegions if _edgeRegions.regionId === regionId
_audits <- AuditTaskTable.completedTasks if _audits.streetEdgeId === _edgeRegions.streetEdgeId
} yield _audits.streetEdgeId).groupBy(x => x).map(_._1).size.run
edgesAuditedInRegion == edgesInRegion
}
}
|
ProjectSidewalk/SidewalkWebpage
|
app/models/street/StreetEdgeRegionTable.scala
|
Scala
|
mit
| 2,555
|
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
* This file incorporates work covered by the following copyright and permission notice:
*
* Copyright 2012 silenteh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package payload
import io.netty.buffer.ByteBuf
import enums.RecordType
import org.slf4j.LoggerFactory
import scala.reflect.ClassTag
// This class reassemble the network frames
case class Message(
header: Header,
query: Array[Question],
answers: Array[RRData],
authority: Array[RRData],
additional: Array[RRData]
) {
override def toString() = {
val opcodeStr = header.opcode match {
case 0 => "Standard query"
case 1 => "Inverse query"
case 2 => "Status"
case 4 => "Notify"
case 5 => "Update"
}
"%s - %s - type: %s, class: %s".format(opcodeStr, domainName, RecordType(query(0).qtype).toString, query(0).qclass)
}
def toByteArray = header.toByteArray ++
query.foldRight(Array[Byte]()){case(question, total) => question.toByteArray ++ total} ++
answers.foldRight(Array[Byte]()){case(answer, total) => answer.toByteArray ++ total} ++
authority.foldRight(Array[Byte]()){case(authority, total) => authority.toByteArray ++ total} ++
additional.foldRight(Array[Byte]()){case(additional, total) => additional.toByteArray ++ total}
def toCompressedByteArray(input: (Array[Byte], Map[String, Int])) = {
val headerBytes = header.toCompressedByteArray(input)
val queryBytes = query.foldRight(headerBytes) {case(question, total) => question.toCompressedByteArray(total)}
(answers ++ authority ++ additional).foldLeft(queryBytes) {case(total, item) => item.toCompressedByteArray(total)}
}
private def domainName = query(0).qname.map(new String(_, "UTF-8")).mkString(".")
}
object Message {
val logger = LoggerFactory.getLogger("app")
def apply(buf: ByteBuf, offset: Int = 0) = {
val header = Header(buf)
new Message(
header,
deserialize(buf, header.questionCount, offset, deserializeQuestions),
deserialize(buf, header.answerCount, offset, deserializeRRData),
deserialize(buf, header.authorityCount, offset, deserializeRRData),
deserialize(buf, header.additionalCount, offset, deserializeRRData)
)
}
def deserialize[T: ClassTag](buf: ByteBuf, n: Int, o: Int, fn: (ByteBuf, Int, Int) => Array[T]) =
try {
fn(buf, n, o)
} catch {
case e: Exception => {
logger.debug(e.getStackTraceString)
Array[T]()
}
}
def deserializeQuestions(buf: ByteBuf, n: Int, o: Int): Array[Question] =
if (n >= 1) Array.tabulate(n) { i => Question(buf, o) } else Array()
def deserializeRRData(buf: ByteBuf, n: Int, o: Int): Array[RRData] =
if (n >= 1) Array.tabulate(n) { i => RRData(buf, o) }.filter(_.rdata != null) else Array()
}
|
Moydco/AlwaysResolveDNS
|
src/main/scala/payload/Message.scala
|
Scala
|
apache-2.0
| 3,383
|
package com.github.tminglei.bind
import org.scalatest._
import java.util.ResourceBundle
class GeneralMappingsSpec extends FunSpec with Matchers with Constraints {
val bundle: ResourceBundle = ResourceBundle.getBundle("bind-messages")
val messages: Messages = (key) => Option(bundle.getString(key))
case class TestBean(id: Long, name: String, desc: Option[String] = None)
describe("test pre-defined general usage mappings") {
describe("ignored-simple") {
val ignored = Mappings.ignored(35)
it("invalid data") {
val invalidData = Map("number" -> "t135")
ignored.validate("number", invalidData, messages, Options.apply()) match {
case Nil => ignored.convert("number", invalidData) should be (35)
case err => err should be (Nil)
}
}
it("valid data") {
val validData = Map("number" -> "135")
ignored.validate("number", validData, messages, Options.apply()) match {
case Nil => ignored.convert("number", validData) should be (35)
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
ignored.validate("number", nullData, messages, Options.apply()) match {
case Nil => ignored.convert("number", nullData) should be (35)
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("number" -> "")
ignored.validate("number", emptyData, messages, Options.apply()) match {
case Nil => ignored.convert("number", emptyData) should be (35)
case err => err should be (Nil)
}
}
}
describe("optional-simple") {
val base = Mappings.int()
val optional = Mappings.optional(base)
it("invalid data") {
val invalidData = Map("number" -> "t122345")
optional.validate("number", invalidData, messages, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
err should be (Seq("number" -> "'t122345' must be a number"))
base.validate("number", invalidData, messages, Options.apply()) should be (Seq("number" -> "'t122345' must be a number"))
}
}
}
it("valid data") {
val validData = Map("number" -> "122345")
optional.validate("number", validData, messages, Options.apply()) match {
case Nil => {
base.validate("number", validData, messages, Options.apply()) should be (Nil)
optional.convert("number", validData) should be (Some(122345))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
optional.validate("number", nullData, messages, Options.apply()) match {
case Nil => {
base.validate("number", nullData, messages, Options.apply()) should be (Nil)
optional.convert("number", nullData) should be (None)
}
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("number" -> "")
optional.validate("number", emptyData, messages, Options.apply()) match {
case Nil => {
base.validate("number", emptyData, messages, Options.apply()) should be (Nil)
optional.convert("number", emptyData) should be (None)
}
case err => err should be (Nil)
}
}
it("delegate pre-processors") {
val optional1 = Processors.omitLeft("$") >-: optional
val validData = Map("number" -> "$12453")
optional1.validate("number", validData, messages, Options.apply()) match {
case Nil => {
base.validate("number", validData, messages, Options.apply()) should be (Seq("number" -> "'$12453' must be a number"))
optional1.convert("number", validData) should be (Some(12453))
}
case err => err should be (Nil)
}
}
it("delegate constraints") {
val optional1 = Constraints.maxLength(8) >+: optional
val invalidData = Map("number" -> "146896540")
optional1.validate("number", invalidData, messages, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
base.validate("number", invalidData, messages, Options.apply()) should be (Nil)
err should be (Seq("number" -> "'146896540' cannot be longer than 8 characters"))
}
}
}
}
describe("default-simple") {
val base = Mappings.int()
val default = Mappings.default(base, 101)
it("invalid data") {
val invalidData = Map("number" -> "t122345")
default.validate("number", invalidData, messages, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
err should be (Seq("number" -> "'t122345' must be a number"))
base.validate("number", invalidData, messages, Options.apply()) should be (Seq("number" -> "'t122345' must be a number"))
}
}
}
it("valid data") {
val validData = Map("number" -> "122345")
default.validate("number", validData, messages, Options.apply()) match {
case Nil => {
base.validate("number", validData, messages, Options.apply()) should be (Nil)
default.convert("number", validData) should be (122345)
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
default.validate("number", nullData, messages, Options.apply()) match {
case Nil => {
base.validate("number", nullData, messages, Options.apply()) should be (Nil)
default.convert("number", nullData) should be (101)
}
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("number" -> "")
default.validate("number", emptyData, messages, Options.apply()) match {
case Nil => {
base.validate("number", emptyData, messages, Options.apply()) should be (Nil)
default.convert("number", emptyData) should be (101)
}
case err => err should be (Nil)
}
}
}
describe("list-simple") {
val base = Mappings.int()
val list = Constraints.required() >+: Mappings.list(base).label("xx")
it("invalid data") {
val invalidData = Map("number[0]" -> "t122345", "number[1]" -> "t11345")
list.validate("number", invalidData, messages, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
base.validate("number[0]", invalidData, messages, Options.apply()) should be (Seq("number[0]" -> "'t122345' must be a number"))
base.validate("number[1]", invalidData, messages, Options.apply()) should be (Seq("number[1]" -> "'t11345' must be a number"))
err should be (Seq("number[0]" -> "'t122345' must be a number", "number[1]" -> "'t11345' must be a number"))
}
}
}
it("valid data") {
val validData = Map("number[0]" -> "122345", "number[1]" -> "754")
list.validate("number", validData, messages, Options.apply()) match {
case Nil => {
base.validate("number[0]", validData, messages, Options.apply()) should be (Nil)
base.validate("number[1]", validData, messages, Options.apply()) should be (Nil)
list.convert("number", validData) should be (List(122345, 754))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
list.validate("number", nullData, (key) => Some("%s is required"), Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => err.toList should be (List("number" -> "xx is required"))
}
}
it("empty data") {
val emptyData = Map("number[0]" -> "", "number[1]" -> "133")
list.validate("number", emptyData, (key) => Some("%s is required"), Options.apply()) match {
case Nil => {
base.validate("number[0]", emptyData, messages, Options.apply()) should be (Nil)
base.validate("number[1]", emptyData, messages, Options.apply()) should be (Nil)
list.convert("number", emptyData) should be (List(0, 133))
}
case err => err should be (Nil)
}
}
}
describe("map-simple") {
val base = Mappings.int()
val map = Mappings.map(base, Constraints.required()).label("xx")
it("invalid data") {
val invalidData = Map("map.aa" -> "t122345", "map.\\"b-1\\"" -> "t11345")
map.validate("map", invalidData, messages, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
base.validate("map.aa", invalidData, messages, Options.apply()) should be (Seq("map.aa" -> "'t122345' must be a number"))
base.validate("map.\\"b-1\\"", invalidData, messages, Options.apply()) should be (Seq("map.\\"b-1\\"" -> "'t11345' must be a number"))
err should be (Seq("map.aa" -> "'t122345' must be a number", "map.\\"b-1\\"" -> "'t11345' must be a number"))
}
}
}
it("valid data") {
val validData = Map("map.aa" -> "122345", "map.\\"b-1\\"" -> "754")
map.validate("map", validData, messages, Options.apply()) match {
case Nil => {
base.validate("map.aa", validData, messages, Options.apply()) should be (Nil)
base.validate("map.\\"b-1\\"", validData, messages, Options.apply()) should be (Nil)
map.convert("map", validData) should be (Map("aa" -> 122345, "b-1" -> 754))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
map.validate("map", nullData, (key) => Some("%s is required"), Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => err.toList should be (List("map" -> "xx is required"))
}
}
it("empty data") {
val emptyData = Map("map.aa" -> "", "map.\\"b-1\\"" -> "133")
map.validate("map", emptyData, (key) => Some("%s is required"), Options.apply()) match {
case Nil => {
base.validate("map.aa", emptyData, messages, Options.apply()) should be (Nil)
base.validate("map.\\"b-1\\"", emptyData, messages, Options.apply()) should be (Nil)
map.convert("map", emptyData) should be (Map("aa" -> 0, "b-1" -> 133))
}
case err => err should be (Nil)
}
}
}
describe("ignored-compound") {
val testBean = TestBean(101, "test")
val ignored = Mappings.ignored(testBean)
it("invalid data") {
val invalidData = Map(
"test.id" -> "t135",
"test.name" -> "test",
"test.desc" -> ""
)
ignored.validate("", invalidData, messages, Options.apply()) match {
case Nil => ignored.convert("", invalidData) should be (testBean)
case err => err should be (Nil)
}
}
it("valid data") {
val validData = Map(
"test.id" -> "135",
"test.name" -> "test",
"test.desc" -> ""
)
ignored.validate("", validData, messages, Options.apply()) match {
case Nil => ignored.convert("", validData) should be (testBean)
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
ignored.validate("", nullData, messages, Options.apply()) match {
case Nil => ignored.convert("", nullData) should be (testBean)
case err => err should be (Nil)
}
}
it("empty data (wrong way)") {
val emptyData = Map("test" -> "")
ignored.validate("", emptyData, messages, Options.apply()) match {
case Nil => ignored.convert("", emptyData) should be (testBean)
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("test" -> null)
ignored.validate("", emptyData, messages, Options.apply()) match {
case Nil => ignored.convert("", emptyData) should be (testBean)
case err => err should be (Nil)
}
}
}
describe("optional-compound") {
val dummyMessages1: Messages = (key: String) => Some("dummy")
val base = Mappings.mapping(
"id" -> Mappings.long(),
"name" -> Mappings.text(),
"desc" -> Mappings.optional(Mappings.text())
)(TestBean.apply)
val optional = Mappings.optional(base)
it("invalid data") {
val invalidData = Map(
"test.id" -> "t135",
"test.name" -> "test",
"test.desc" -> ""
)
optional.validate("test", invalidData, dummyMessages1, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
err should be (Seq("test.id" -> "dummy"))
base.validate("test", invalidData, dummyMessages1, Options.apply()) should be (Seq("test.id" -> "dummy"))
}
}
}
it("valid data") {
val validData = Map(
"test.id" -> "135",
"test.name" -> "test",
"test.desc" -> ""
)
optional.validate("test", validData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", validData, dummyMessages1, Options.apply()) should be (Nil)
optional.convert("test", validData) should be (Some(TestBean(135, "test")))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
optional.validate("test", nullData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", nullData, dummyMessages1, Options.apply()) should be (Nil)
optional.convert("test", nullData) should be (None)
}
case err => err should be (Nil)
}
}
it("empty data (wrong way)") {
val emptyData = Map("test" -> "")
optional.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", emptyData, dummyMessages1, Options.apply()) should be (Nil)
optional.convert("test", emptyData) should be (None)
}
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("test" -> null)
optional.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", emptyData, dummyMessages1, Options.apply()) should be (Nil)
optional.convert("test", emptyData) should be (None)
}
case err => err should be (Nil)
}
}
}
describe("default-compound") {
val dummyMessages1: Messages = (key: String) => {
if (key == "error.object") Some("%s missing or not valid")
else Some("dummy")
}
val base = Mappings.mapping(
"id" -> Mappings.long(),
"name" -> Mappings.text(),
"desc" -> Mappings.optional(Mappings.text())
)(TestBean.apply)
val testBean = TestBean(35, "test1", Some("test bean"))
val default = Mappings.default(base, testBean)
it("invalid data") {
val invalidData = Map(
"test.id" -> "t135",
"test.name" -> "test",
"test.desc" -> ""
)
default.validate("test", invalidData, dummyMessages1, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => {
err should be (Seq("test.id" -> "dummy"))
base.validate("test", invalidData, dummyMessages1, Options.apply()) should be (Seq("test.id" -> "dummy"))
}
}
}
it("valid data") {
val validData = Map(
"test.id" -> "135",
"test.name" -> "test",
"test.desc" -> ""
)
default.validate("test", validData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", validData, dummyMessages1, Options.apply()) should be (Nil)
default.convert("test", validData) should be (TestBean(135, "test"))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
default.validate("test", nullData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", nullData, dummyMessages1, Options.apply()) should be (Nil)
default.convert("test", nullData) should be (testBean)
}
case err => err should be (Nil)
}
}
it("empty data (wrong way)") {
val emptyData = Map("test" -> "")
default.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", emptyData, dummyMessages1, Options.apply()) should be (Nil)
default.convert("test", emptyData) should be (testBean)
}
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("test" -> null)
default.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test", emptyData, dummyMessages1, Options.apply()) should be (Nil)
default.convert("test", emptyData) should be (testBean)
}
case err => err should be (Nil)
}
}
}
describe("list-compound") {
val dummyMessages1: Messages = (key: String) => {
if (key == "error.object") Some("%s missing or not valid")
else Some("dummy")
}
val base = Mappings.mapping(
"id" -> Mappings.long(),
"name" -> Mappings.text(),
"desc" -> Mappings.optional(Mappings.text())
)(TestBean.apply)
val list = Mappings.list(base)
it("invalid data") {
val invalidData = Map(
"test[0].id" -> "t135",
"test[0].name" -> "test",
"test[0].desc" -> "",
"test[1].id" -> "t137",
"test[1].name" -> "test",
"test[1].desc" -> "tt"
)
list.validate("test", invalidData, dummyMessages1, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => err should be (Seq("test[0].id" -> "dummy", "test[1].id" -> "dummy"))
}
}
it("valid data") {
val validData = Map(
"test[0].id" -> "135",
"test[0].name" -> "test",
"test[0].desc" -> "",
"test[1].id" -> "137",
"test[1].name" -> "test1",
"test[1].desc" -> "tt"
)
list.validate("test", validData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test[0]", validData, dummyMessages1, Options.apply()) should be (Nil)
base.validate("test[1]", validData, dummyMessages1, Options.apply()) should be (Nil)
list.convert("test", validData) should be (List(TestBean(135, "test"), TestBean(137, "test1", Some("tt"))))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
list.validate("test", nullData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test[0]", nullData, dummyMessages1, Options.apply()) should be (Nil)
list.convert("test", nullData) should be (Nil)
}
case err => err should be (Nil)
}
}
it("empty data (wrong way)") {
val emptyData = Map("test" -> "")
list.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test[0]", emptyData, dummyMessages1, Options.apply()) should be (Nil)
list.convert("test", emptyData) should be (Nil)
}
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("test" -> null)
list.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
base.validate("test[0]", emptyData, dummyMessages1, Options.apply()) should be (Nil)
list.convert("test", emptyData) should be (Nil)
}
case err => err should be (Nil)
}
}
}
describe("map-compound") {
val dummyMessages1: Messages = (key: String) => {
if (key == "error.object") Some("%s missing or not valid")
else Some("dummy")
}
val key = Mappings.int()
val value = Mappings.mapping(
"id" -> Mappings.long(),
"name" -> Mappings.text(),
"desc" -> Mappings.optional(Mappings.text())
)(TestBean.apply)
val map = Mappings.map(key, value)
it("invalid data") {
val invalidData = Map(
"test.101.id" -> "t135",
"test.101.name" -> "test",
"test.101.desc" -> "",
"test.103.id" -> "t137",
"test.103.name" -> "test",
"test.103.desc" -> "tt"
)
map.validate("test", invalidData, dummyMessages1, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => err.toList should be (List("test.103.id" -> "dummy", "test.101.id" -> "dummy"))
}
}
it("valid data") {
val validData = Map(
"test.101.id" -> "135",
"test.101.name" -> "test",
"test.101.desc" -> "",
"test.103.id" -> "137",
"test.103.name" -> "test1",
"test.103.desc" -> "tt"
)
map.validate("test", validData, dummyMessages1, Options.apply()) match {
case Nil => {
value.validate("test.101", validData, dummyMessages1, Options.apply()) should be (Nil)
value.validate("test.101", validData, dummyMessages1, Options.apply()) should be (Nil)
map.convert("test", validData) should be (Map(101 -> TestBean(135, "test"), 103 -> TestBean(137, "test1", Some("tt"))))
}
case err => err should be (Nil)
}
}
it("null data") {
val nullData = Map[String, String]()
map.validate("test", nullData, dummyMessages1, Options.apply()) match {
case Nil => {
value.validate("test.101", nullData, dummyMessages1, Options.apply()) should be (Nil)
map.convert("test", nullData) should be (Map())
}
case err => err should be (Nil)
}
}
it("empty data (wrong way)") {
val emptyData = Map("test" -> "")
map.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
value.validate("test.101", emptyData, dummyMessages1, Options.apply()) should be (Nil)
map.convert("test", emptyData) should be (Map())
}
case err => err should be (Nil)
}
}
it("empty data") {
val emptyData = Map("test" -> null)
map.validate("test", emptyData, dummyMessages1, Options.apply()) match {
case Nil => {
value.validate("test.101", emptyData, dummyMessages1, Options.apply()) should be (Nil)
map.convert("test", emptyData) should be (Map())
}
case err => err should be (Nil)
}
}
}
describe("w/ options") {
it("pass thru options") {
val base = Mappings.mapping(
"id" -> Mappings.long(required("%s is required")).label("id"),
"name" -> Mappings.text(),
"desc" -> Mappings.optional(Mappings.text())
)(TestBean.apply)
val list = Mappings.list(base)
val data = Map(
"test[0].id" -> "",
"test[0].name" -> "test",
"test[0].desc" -> "",
"test[1].id" -> "137",
"test[1].name" -> "test1",
"test[1].desc" -> "tt"
)
list.validate("test", data, messages, Options.apply()) match {
case Nil => ("invalid - shouldn't occur!") should be ("")
case err => err.toList should be (List("test[0].id" -> "id is required"))
}
list.validate("test", data, messages, Options().skipUntouched(true)) match {
case Nil => list.convert("test", data) should be (
List(TestBean(0, "test"), TestBean(137, "test1", Some("tt"))))
case err => err should be (Nil)
}
}
}
}
}
|
tminglei/form-binder
|
src/test/scala/com/github/tminglei/bind/GeneralMappingsSpec.scala
|
Scala
|
bsd-2-clause
| 25,160
|
package io.buoyant.namer.consul
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.{Addr, ConnectionFailedException, Failure}
import com.twitter.util._
import io.buoyant.consul.v1
private[consul] object DcServices {
/**
* We use a shared stats object so that counters/stats are not
* created anew for each DC/service.
*/
case class Stats(stats0: StatsReceiver) {
val stats = stats0.scope("dc")
val opens = stats.counter("opens")
val closes = stats.counter("closes")
val errors = stats.counter("errors")
val updates = stats.counter("updates")
val adds = stats.counter("adds")
val removes = stats.counter("removes")
val service = SvcAddr.Stats(stats0.scope("service"))
}
/**
* Contains all cached serviceMap responses and the mapping of names
* to Addrs for a particular datacenter.
*
* If the named datacenter does not exist, the consul API will retry
* indefinitely. This is because missing datacenters cannot be
* distinguished from server errors.
*/
def apply(
consulApi: v1.ConsulApi,
name: String,
domain: Option[String],
consistency: Option[v1.ConsistencyMode],
preferServiceAddress: Option[Boolean] = None,
stats: Stats
): Activity[Map[SvcKey, Var[Addr]]] = {
def getServices(index: Option[String]): Future[v1.Indexed[Set[SvcKey]]] =
consulApi.serviceMap(
datacenter = Some(name),
blockingIndex = index,
consistency = consistency,
retry = true
).map(toServices)
val states = Var.async[Activity.State[Map[SvcKey, Var[Addr]]]](Activity.Pending) { state =>
stats.opens.incr()
@volatile var stopped: Boolean = false
def loop(index0: Option[String], cache: Map[SvcKey, Var[Addr]]): Future[Unit] = {
if (stopped) Future.Unit
else getServices(index0).transform {
case Throw(Failure(Some(err: ConnectionFailedException))) =>
// Drop the index, in case it's been reset by a consul restart
loop(None, cache)
case Throw(e) =>
// If an exception escaped getService's retries, we treat it as
// effectively fatal to DC observation. In the future, we
// may consider retrying certain failures (with backoff).
state() = Activity.Failed(e)
stats.errors.incr()
Future.exception(e)
case Return(v1.Indexed(_, None)) =>
// If consul didn't give us an index, all bets are off.
state() = Activity.Failed(NoIndexException)
stats.errors.incr()
Future.exception(NoIndexException)
case Return(v1.Indexed(keys, index1)) =>
stats.updates.incr()
cache.keys.foreach { k =>
if (!keys(k)) {
log.debug("consul deleted: %s", k)
stats.removes.incr()
}
}
// Create a Var[Addr] for each new service. These addrs
// are lazily evaluated, so no additional work is done
// until the addr is observed.
val updated = keys.map { k =>
val svc = cache.get(k) match {
case Some(svc) => svc
case None =>
log.debug("consul added: %s", k)
stats.adds.incr()
SvcAddr(consulApi, name, k, domain, consistency, preferServiceAddress, stats.service)
}
k -> svc
}.toMap
state() = Activity.Ok(updated)
loop(index1, updated)
}
}
val pending = loop(None, Map.empty)
Closable.make { _ =>
stopped = true
pending.raise(DcRelease)
stats.closes.incr()
Future.Unit
}
}
Activity(states)
}
private[this] val NoIndexException =
Failure("consul did not return an index")
private[this] val DcRelease =
Failure("dc observation released", Failure.Interrupted)
private[this] val toServices: v1.Indexed[Map[String, Seq[String]]] => v1.Indexed[Set[SvcKey]] = {
case v1.Indexed(services, idx) =>
val keys = services.flatMap {
case (svcName, tags) =>
tags.map(tag => SvcKey(svcName.toLowerCase, Some(tag.toLowerCase))) :+ SvcKey(svcName.toLowerCase, None)
}
v1.Indexed(keys.toSet, idx)
}
}
|
denverwilliams/linkerd
|
namer/consul/src/main/scala/io/buoyant/namer/consul/DcServices.scala
|
Scala
|
apache-2.0
| 4,368
|
package eventstore
package akka
import core.constants.MaxBatchSize
class ReadStreamEventsBackwardITest extends TestConnection {
implicit val direction: ReadDirection = ReadDirection.Backward
"read stream events forward" should {
"fail if count <= 0" in new TestConnectionScope {
readStreamEventsFailed(EventNumber.Last, 0) must throwAn[IllegalArgumentException]
readStreamEventsFailed(EventNumber.Last, -1) must throwAn[IllegalArgumentException]
}
"fail if count > MaxBatchSize" in new TestConnectionScope {
readStreamEventsFailed(EventNumber.Last, MaxBatchSize + 1) must throwAn[IllegalArgumentException]
}
"fail if stream not found" in new TestConnectionScope {
readStreamEventsFailed(EventNumber.Last, 1000) must throwA[StreamNotFoundException]
}
"fail if stream has been deleted" in new TestConnectionScope {
appendEventToCreateStream()
deleteStream()
readStreamEventsFailed(EventNumber.Last, 1000) must throwA[StreamDeletedException]
}
"read last number if stream truncated" in new TestConnectionScope {
appendEventToCreateStream()
truncateStream(EventNumber.Exact(1))
readStreamEventsCompleted(EventNumber.Last, 1).lastEventNumber mustEqual EventNumber.First
}
"read last number if stream truncated many" in new TestConnectionScope {
appendMany(10)
truncateStream(EventNumber.Exact(15))
actor ! ReadEvent.StreamMetadata(streamId.metadata)
expectMsgType[ReadEventCompleted]
readStreamEventsCompleted(EventNumber.Last, 1).lastEventNumber mustEqual EventNumber.Exact(9)
val events = appendMany(10).takeRight(5)
val result = readStreamEventsCompleted(EventNumber.Last, 10)
result.events.size mustEqual 5
result.events.map { _.data } mustEqual events.reverse
}
"get empty slice if called with non existing range" in new TestConnectionScope {
append(newEventData)
readStreamEvents(EventNumber(1000), 10) must beEmpty
}
"get partial slice if not enough events in stream" in new TestConnectionScope {
append(newEventData)
readStreamEvents(EventNumber(0), 1000) must haveSize(1)
}
"get events in reversed order as written" in new TestConnectionScope {
val events = appendMany()
readStreamEvents(EventNumber.Last, 10) mustEqual events.reverse
}
"be able to read single event from arbitrary position" in new TestConnectionScope {
val events = appendMany()
readStreamEvents(EventNumber(5), 1) mustEqual List(events(5))
}
"be able to read slice from arbitrary position" in new TestConnectionScope {
val events = appendMany()
readStreamEvents(EventNumber(5), 3) mustEqual List(events(5), events(4), events(3))
}
"be able to read first event" in new TestConnectionScope {
val events = appendMany()
val result = readStreamEventsCompleted(EventNumber.First, 1)
result.events.map(_.data) mustEqual List(events.head)
result.endOfStream must beTrue
result.nextEventNumber mustEqual EventNumber.Last
}
"be able to read last event" in new TestConnectionScope {
val events = appendMany()
val result = readStreamEventsCompleted(EventNumber(9), 1)
result.events.map(_.data) mustEqual List(events.last)
result.endOfStream must beFalse
result.nextEventNumber mustEqual EventNumber(8)
}
"read not modified events" in new TestConnectionScope {
appendMany()
def read() = readStreamEventsCompleted(EventNumber.First, 1)
val r1 = read()
val r2 = read()
r1.events mustEqual r2.events
}
"not read linked events if resolveLinkTos = false" in new TestConnectionScope {
val (linked, link) = linkedAndLink()
val event = readStreamEventsCompleted(EventNumber.Last, 5, resolveLinkTos = false).events.head
event mustEqual link
}
"read linked events if resolveLinkTos = true" in new TestConnectionScope {
val (linked, link) = linkedAndLink()
val event = readStreamEventsCompleted(EventNumber.Last, 5, resolveLinkTos = true).events.head
event mustEqual ResolvedEvent(linked, link)
}
}
}
|
EventStore/EventStore.JVM
|
client/src/test/scala/eventstore/akka/ReadStreamEventsBackwardITest.scala
|
Scala
|
bsd-3-clause
| 4,207
|
package com.sksamuel.elastic4s.searches.suggestions
case class SuggestDefinition(suggestions: Seq[SuggestionDefinition])
|
ulric260/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/suggestions/SuggestDefinition.scala
|
Scala
|
apache-2.0
| 121
|
package org.thp.cortex.services
import javax.inject.{ Inject, Singleton }
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import play.api.Configuration
import play.api.cache.AsyncCacheApi
import play.api.libs.json.JsObject
import akka.NotUsed
import akka.stream.scaladsl.Source
import org.thp.cortex.models.{ Organization, OrganizationModel }
import org.elastic4play.controllers.Fields
import org.elastic4play.database.ModifyConfig
import org.elastic4play.services._
@Singleton
class OrganizationSrv(
cacheExpiration: Duration,
organizationModel: OrganizationModel,
getSrv: GetSrv,
updateSrv: UpdateSrv,
findSrv: FindSrv,
deleteSrv: DeleteSrv,
createSrv: CreateSrv,
cache: AsyncCacheApi) {
@Inject() def this(
config: Configuration,
organizationModel: OrganizationModel,
getSrv: GetSrv,
updateSrv: UpdateSrv,
findSrv: FindSrv,
deleteSrv: DeleteSrv,
createSrv: CreateSrv,
cache: AsyncCacheApi) = this(
config.get[Duration]("cache.organization"),
organizationModel,
getSrv,
updateSrv,
findSrv,
deleteSrv,
createSrv,
cache)
def create(fields: Fields)(implicit authContext: AuthContext): Future[Organization] = {
createSrv[OrganizationModel, Organization](organizationModel, fields)
}
def get(orgId: String): Future[Organization] = cache.getOrElseUpdate(s"org-$orgId", cacheExpiration) {
getSrv[OrganizationModel, Organization](organizationModel, orgId)
}
def update(orgId: String, fields: Fields)(implicit Context: AuthContext): Future[Organization] =
update(orgId, fields, ModifyConfig.default)
def update(orgId: String, fields: Fields, modifyConfig: ModifyConfig)(implicit Context: AuthContext): Future[Organization] = {
cache.remove(s"org-$orgId")
updateSrv[OrganizationModel, Organization](organizationModel, orgId, fields, modifyConfig)
}
def update(organization: Organization, fields: Fields)(implicit Context: AuthContext): Future[Organization] =
update(organization, fields, ModifyConfig.default)
def update(organization: Organization, fields: Fields, modifyConfig: ModifyConfig)(implicit Context: AuthContext): Future[Organization] = {
cache.remove(s"org-${organization.id}")
updateSrv(organization, fields, modifyConfig)
}
def delete(orgId: String)(implicit Context: AuthContext): Future[Organization] = {
cache.remove(s"org-$orgId")
deleteSrv[OrganizationModel, Organization](organizationModel, orgId)
}
def find(queryDef: QueryDef, range: Option[String], sortBy: Seq[String]): (Source[Organization, NotUsed], Future[Long]) = {
findSrv[OrganizationModel, Organization](organizationModel, queryDef, range, sortBy)
}
def stats(queryDef: QueryDef, aggs: Seq[Agg]): Future[JsObject] = findSrv(organizationModel, queryDef, aggs: _*)
}
|
CERT-BDF/Cortex
|
app/org/thp/cortex/services/OrganizationSrv.scala
|
Scala
|
agpl-3.0
| 2,867
|
package cromwell.logging
import java.time.OffsetDateTime
import java.time.format.DateTimeFormatter
import ch.qos.logback.classic.Level
import ch.qos.logback.classic.pattern.ThrowableProxyConverter
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.core.LayoutBase
import common.util.TerminalUtil
object TerminalLayout {
val Converter = new ThrowableProxyConverter
Converter.start()
private val dateTimeFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss,SS")
implicit class EnhancedILoggingEvent(val event: ILoggingEvent) extends AnyVal {
def toStackTrace: String = Converter.convert(event)
}
implicit class ColorString(msg: String) {
def colorizeUuids: String = {
"UUID\\\\((.*?)\\\\)".r.findAllMatchIn(msg).foldLeft(msg) {
case (l, r) =>
val color = if (Option(System.getProperty("RAINBOW_UUID")).isDefined)
Math.abs(17 * r.group(1).substring(0,8).map(_.toInt).product) % 209 + 22
else 2
l.replace(r.group(0), TerminalUtil.highlight(color, r.group(1)))
}
}
def colorizeCommand: String = msg.replaceAll("`([^`]*?)`", TerminalUtil.highlight(5, "$1"))
}
}
class TerminalLayout extends LayoutBase[ILoggingEvent] {
import TerminalLayout._
def doLayout(event: ILoggingEvent): String = {
val level = event.getLevel match {
case Level.WARN => TerminalUtil.highlight(220, "warn")
case Level.ERROR => TerminalUtil.highlight(1, "error")
case x => x.toString.toLowerCase
}
val timestamp = OffsetDateTime.now.format(dateTimeFormatter)
val highlightedMessage = event.getFormattedMessage.colorizeUuids.colorizeCommand
s"[$timestamp] [$level] $highlightedMessage\\n${event.toStackTrace}"
}
}
|
ohsu-comp-bio/cromwell
|
engine/src/main/scala/cromwell/logging/TerminalLayout.scala
|
Scala
|
bsd-3-clause
| 1,744
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.driver.factory
import java.io.File
import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.sdk.utils.AggregationTime
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Duration, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import scala.util.{Failure, Success, Try}
object SparkContextFactory extends SLF4JLogging {
private var sc: Option[SparkContext] = None
private var sparkSession: Option[SparkSession] = None
private var ssc: Option[StreamingContext] = None
private var sqlInitialSentences: Seq[String] = Seq.empty[String]
def sparkSessionInstance: SparkSession =
synchronized {
sparkSession.getOrElse {
if (sc.isDefined) sparkSession = Option(SparkSession.builder().config(sc.get.getConf).getOrCreate())
sqlInitialSentences.foreach(sentence => if (sentence.nonEmpty) sparkSession.get.sql(sentence))
sparkSession.get
}
}
def setInitialSentences(sentences: Seq[String]): Unit = sqlInitialSentences = sentences
def sparkStreamingInstance(batchDuration: Duration, checkpointDir: String, remember: Option[String]):
Option[StreamingContext] = {
synchronized {
ssc match {
case Some(_) => ssc
case None => ssc = Some(getNewStreamingContext(batchDuration, checkpointDir, remember))
}
}
ssc
}
def setSparkContext(createdContext: SparkContext): Unit = sc = Option(createdContext)
def setSparkStreamingContext(createdContext: StreamingContext): Unit = ssc = Option(createdContext)
private def getNewStreamingContext(batchDuration: Duration, checkpointDir: String, remember: Option[String]):
StreamingContext = {
val ssc = new StreamingContext(sc.get, batchDuration)
ssc.checkpoint(checkpointDir)
remember.foreach(value => ssc.remember(Duration(AggregationTime.parseValueToMilliSeconds(value))))
ssc
}
def sparkStandAloneContextInstance(specificConfig: Map[String, String], jars: Seq[File]): SparkContext =
synchronized {
sc.getOrElse(instantiateSparkContext(specificConfig, jars))
}
def sparkClusterContextInstance(specificConfig: Map[String, String], files: Seq[String]): SparkContext =
sc.getOrElse(instantiateClusterContext(specificConfig, files))
private def instantiateSparkContext(specificConfig: Map[String, String], jars: Seq[File]): SparkContext = {
sc = Some(SparkContext.getOrCreate(configToSparkConf(specificConfig)))
jars.foreach(f => {
log.info(s"Adding jar ${f.getAbsolutePath} to Spark context")
sc.get.addJar(f.getAbsolutePath)
})
sc.get
}
private def instantiateClusterContext(specificConfig: Map[String, String], files: Seq[String]): SparkContext = {
sc = Some(SparkContext.getOrCreate(configToSparkConf(specificConfig)))
files.foreach(f => {
log.info(s"Adding jar $f to cluster Spark context")
sc.get.addJar(f)
})
sc.get
}
private def configToSparkConf(specificConfig: Map[String, String]): SparkConf = {
val conf = new SparkConf()
specificConfig.foreach { case (key, value) => conf.set(key, value) }
conf
}
def destroySparkStreamingContext(): Unit = {
ssc.fold(log.warn("Spark Streaming Context is empty")) { streamingContext =>
try {
synchronized {
log.info(s"Stopping Streaming Context with name: ${streamingContext.sparkContext.appName}")
Try(streamingContext.stop(stopSparkContext = false, stopGracefully = false)) match {
case Success(_) =>
log.info("Streaming Context have been stopped")
case Failure(error) =>
log.error("Streaming Context is not been stopped correctly", error)
}
}
} finally {
ssc = None
}
}
}
def destroySparkContext(destroyStreamingContext: Boolean = true): Unit = {
if (destroyStreamingContext)
destroySparkStreamingContext()
sc.fold(log.warn("Spark Context is empty")) { sparkContext =>
synchronized {
try {
log.info("Stopping SparkContext with name: " + sparkContext.appName)
sparkContext.stop()
log.info("Stopped SparkContext with name: " + sparkContext.appName)
} finally {
sparkSession = None
sqlInitialSentences = Seq.empty[String]
ssc = None
sc = None
}
}
}
}
}
|
diegohurtado/sparta
|
driver/src/main/scala/com/stratio/sparta/driver/factory/SparkContextFactory.scala
|
Scala
|
apache-2.0
| 5,026
|
import sbt.Keys._
import play.Project
object Build extends sbt.Build
{
val main = Project( "play-tmpltr", settings = play.Project.playScalaSettings ).settings(
name := "Play Tmpltr",
organization := "com.taig",
scalacOptions ++= Seq( "-feature", "-language:implicitConversions", "-language:existentials" ),
version := "1.0.0-BETA"
)
}
|
Taig/Play-Tmpltr
|
project/Build.scala
|
Scala
|
mit
| 345
|
package com.github.vooolll.serialization.compatibility
import java.net.URL
import com.github.vooolll.domain.oauth.FacebookAppId
import com.github.vooolll.domain.profile.FacebookApplication
import com.github.vooolll.serialization.FacebookDecoders._
class ApplicationCompatSpec extends CompatibilitySpec {
val applicationPath = "testdata/basic_application.json"
val application = FacebookApplication(
FacebookAppId("1969406143275709"),
new URL("https://www.facebook.com/games/?app_id=1969406143275709"),
"testing_app"
)
"FacebookApplication" should {
s"be compatible with $applicationPath" in {
decodeJson[FacebookApplication](applicationPath) shouldBe application
}
}
}
|
vooolll/facebook4s
|
src/test/scala/com/github/vooolll/serialization/compatibility/ApplicationCompatSpec.scala
|
Scala
|
apache-2.0
| 711
|
package uk.co.seansaville.ninetyninescalaprobs.lists
import uk.co.seansaville.ninetyninescalaprobs.UnitSpec
class Problem28Spec extends UnitSpec {
"lsort" should "sort a list of lists according to their lengths" in {
val testList = List(List(1, 2, 3), List(1), List(1, 2), List(1, 2, 3, 4))
assert(Problem28.lsort(testList) == List(List(1), List(1, 2), List(1, 2, 3), List(1, 2, 3, 4)))
}
"lsortFreq" should "sort a list of lists according to the frequency of their lengths" in {
val testList = List(List(2, 2), List(1), List(3, 3, 3), List(2, 2), List(3, 3, 3), List(3, 3, 3))
assert(Problem28.lsortFreq(testList) ==
List(List(1), List(2, 2), List(2, 2), List(3, 3, 3), List(3, 3, 3), List(3, 3, 3)))
}
}
|
seansaville/99scalaprobs
|
src/test/scala/uk/co/seansaville/ninetyninescalaprobs/lists/Problem28Spec.scala
|
Scala
|
mit
| 741
|
package chrome.omnibox.bindings
import scala.scalajs.js
class SuggestResult extends js.Object {
val content: String = js.native
val description: String = js.native
}
object SuggestResult {
def apply(content: String, description: String): SuggestResult = {
js.Dynamic.literal(
content = content,
description = description
).asInstanceOf[SuggestResult]
}
}
|
amsayk/scala-js-chrome
|
bindings/src/main/scala/chrome/omnibox/bindings/SuggestResult.scala
|
Scala
|
mit
| 391
|
package scalapb.compiler
import scalapb.compiler.EnclosingType.Collection
sealed trait Expression extends Product with Serializable {
def andThen(other: Expression) = (this, other) match {
case (Identity, e2: LiteralExpression) => e2
case (e1, Identity) => e1
case (ExpressionList(l1), ExpressionList(l2)) => ExpressionList(l2 ++ l1)
case (ExpressionList(l1), e: LiteralExpression) => ExpressionList(e :: l1)
case (e: LiteralExpression, ExpressionList(l2)) => ExpressionList(l2 :+ e)
case (e1: LiteralExpression, e2: LiteralExpression) => ExpressionList(e2 :: e1 :: Nil)
}
def apply(
e: String,
sourceType: EnclosingType,
targetType: EnclosingType,
mustCopy: Boolean
): String =
ExpressionBuilder.run(this, e, sourceType, targetType, mustCopy)
def apply(e: String, sourceType: EnclosingType, targetType: EnclosingType): String =
ExpressionBuilder.run(this, e, sourceType, targetType, false)
def apply(e: String, sourceType: EnclosingType, mustCopy: Boolean): String =
ExpressionBuilder.run(this, e, sourceType, sourceType, mustCopy)
def apply(e: String, sourceType: EnclosingType): String =
ExpressionBuilder.run(this, e, sourceType, sourceType, false)
}
case class ExpressionList(l: List[LiteralExpression]) extends Expression
sealed trait LiteralExpression extends Expression {
def isIdentity: Boolean
def isFunctionApplication: Boolean
}
case object Identity extends LiteralExpression {
def isIdentity: Boolean = true
def isFunctionApplication: Boolean = false
}
case class FunctionApplication(name: String) extends LiteralExpression {
def isIdentity: Boolean = false
def isFunctionApplication: Boolean = true
}
case class MethodApplication(name: String) extends LiteralExpression {
def isIdentity: Boolean = false
def isFunctionApplication: Boolean = false
}
case class OperatorApplication(op: String) extends LiteralExpression {
def isIdentity: Boolean = false
def isFunctionApplication: Boolean = false
}
object ExpressionBuilder {
def runSingleton(es: List[LiteralExpression])(e: String): String = es match {
case Nil => e
case Identity :: tail => runSingleton(tail)(e)
case FunctionApplication(name) :: tail => s"$name(${runSingleton(tail)(e)})"
case MethodApplication(name) :: tail => s"${runSingleton(tail)(e)}.$name"
case OperatorApplication(name) :: tail => s"${runSingleton(tail)(e)} $name"
}
def convertCollection(expr: String, targetType: EnclosingType): String = {
val convert = List(targetType match {
case Collection(_, Some(tc)) => FunctionApplication(s"${tc}.fromIteratorUnsafe")
case Collection(DescriptorImplicits.ScalaVector, _) => MethodApplication("toVector")
case Collection(DescriptorImplicits.ScalaSeq, _) => MethodApplication("toSeq")
case Collection(DescriptorImplicits.ScalaMap, _) => MethodApplication("toMap")
case Collection(DescriptorImplicits.ScalaIterable, _) =>
FunctionApplication("_root_.scalapb.internal.compat.toIterable")
case Collection(_, _) => FunctionApplication("_root_.scalapb.internal.compat.convertTo")
case _ => Identity
})
runSingleton(convert)(expr)
}
def runCollection(
es: List[LiteralExpression]
)(e0: String, sourceType: EnclosingType, targetType: EnclosingType, mustCopy: Boolean): String = {
require(sourceType != EnclosingType.None)
val nontrivial: List[LiteralExpression] = es.filterNot(_.isIdentity)
val needVariable =
nontrivial
.filterNot(_.isIdentity)
.dropRight(1)
.exists(_.isFunctionApplication)
val e = sourceType match {
case Collection(_, Some(tc)) => s"$tc.toIterator($e0)"
case Collection(DescriptorImplicits.ScalaIterator, None) => e0
case Collection(_, None) => s"$e0.iterator"
case _ => e0
}
val forceTypeConversion = sourceType match {
case Collection(_, Some(_)) if sourceType != targetType => true
case _ => false
}
if (needVariable)
convertCollection(s"""$e.map(__e => ${runSingleton(nontrivial)("__e")})""", targetType)
else if (nontrivial.nonEmpty) {
val f = nontrivial match {
case List(FunctionApplication(name)) =>
s"${name}(_)"
case _ =>
runSingleton(nontrivial)("_")
}
convertCollection(s"""$e.map($f)""", targetType)
} else if (mustCopy) {
convertCollection(s"""$e.map(_root_.scala.Predef.identity)""", targetType)
} else if (forceTypeConversion) {
convertCollection(e, targetType)
} else e0
}
private[scalapb] def run(
es: List[LiteralExpression],
e: String,
sourceType: EnclosingType,
targetType: EnclosingType,
mustCopy: Boolean
): String =
sourceType match {
case EnclosingType.None =>
runSingleton(es)(e)
case _ =>
runCollection(es)(e, sourceType, targetType, mustCopy)
}
private[scalapb] def run(
es: Expression,
e: String,
sourceType: EnclosingType,
targetType: EnclosingType,
mustCopy: Boolean
): String =
es match {
case ExpressionList(l) => run(l, e, sourceType, targetType, mustCopy)
case expr: LiteralExpression => run(expr :: Nil, e, sourceType, targetType, mustCopy)
}
@deprecated("0.10.10", "Use Expression()")
def run(
es: Expression
)(e: String, sourceType: EnclosingType, mustCopy: Boolean): String =
run(es, e, sourceType, sourceType, mustCopy)
}
sealed trait EnclosingType {
def asType(enclosed: String): String = this match {
case EnclosingType.None => enclosed
case EnclosingType.ScalaOption => s"${DescriptorImplicits.ScalaOption}[$enclosed]"
case EnclosingType.Collection(cc, _) => s"${cc}[$enclosed]"
}
}
object EnclosingType {
case object None extends EnclosingType
case object ScalaOption extends EnclosingType
/** Indicates that the result should be a collection with type constructor cc, such as List, Map.
*/
case class Collection(cc: String, typeClass: Option[String]) extends EnclosingType {
def this(cc: String) = { this(cc, scala.None) }
}
}
|
scalapb/ScalaPB
|
compiler-plugin/src/main/scala/scalapb/compiler/ExpressionBuilder.scala
|
Scala
|
apache-2.0
| 6,512
|
package ch14
object ex03 {
def swap(a: Array[Int]) = {
a match {
case Array(x, y, z @ _*) => Array(y, x) ++ z
case _ => a
}
}
def main(args: Array[String]): Unit = {
val arr1 = Array(1)
val arr2 = Array(1, 2, 3, 4)
println(swap(arr1).toList)
println(swap(arr2).toList)
}
}
|
tuxdna/scala-for-the-impatient-exercises
|
src/main/scala/ch14/ex03.scala
|
Scala
|
apache-2.0
| 318
|
package edu.nus.systemtesting.hipsleek
import java.nio.file.Path
import java.nio.file.Paths
import edu.nus.systemtesting.ExecutionOutput
import edu.nus.systemtesting.PreparedSystem
import edu.nus.systemtesting.Result
import edu.nus.systemtesting.Testable
import edu.nus.systemtesting.TestCaseConfiguration
import edu.nus.systemtesting.TestCase
import edu.nus.systemtesting.Parser.filterLinesMatchingRegex
import edu.nus.systemtesting.ProgramFlags.{ flagsOfProgram, isFlag }
import edu.nus.systemtesting.ExpectsOutput
object ValidateableSleekTestCase {
implicit def constructTestCase(ps: PreparedSystem, tc: Testable with ExpectsOutput, conf: TestCaseConfiguration): ValidateableSleekTestCase =
new ValidateableSleekTestCase(ps.binDir,
tc.commandName,
ps.corpusDir,
tc.fileName,
tc.arguments,
tc.expectedOutput,
conf.timeout)
}
class ValidateableSleekTestCase(binDir: Path = Paths.get(""),
cmd: Path = Paths.get(""),
corpDir: Path = Paths.get(""),
fn: Path = Paths.get(""),
args: String = "",
val expectedOutput: String = "",
timeout: Int = 300,
regex: String = "Validate \\\\d+: (OK|.*)")
extends TestCase(binDir, cmd, corpDir, fn, args, timeout) with ExpectsOutput {
// cf. sleekengine.ml process_validate function
// regex to capture things like:
// Validate #: OK
// Validate #: Expecting flow $IDENT
// Validate #: Expecting$EXP but got no residue
// Validate #: Expecting$EXP but got : Unsat (or Unknown)
// Validate #: Expecting$EXP but got : Sat (or Unknown)
// Validate #: Expecting$EXP but got : $RES
// Validate #: Expecting(3)$EXP but got : $RES
// Compared to typical SleekTestCase, the 'validation' is done by sleekengine.
// (Rather than comparing against expectedOutput from some run-fast-tests list)
// The above "Validate ..." needs to map to TestCaseResult which `checkResults`
// was taking care of.
// OK -> Result(idx, OK, OK)
// no residue -> Result(idx, OK, no residue)
// Unsat/Sat -> Result(idx, OK, Unsat/Sat)
// $RES -> Result(idx, OK, $EXP-but-got-$RES)
def checkResults(output: ExecutionOutput): Either[List[String], Iterable[Result]] = {
// `parse` is responsible for populating `results` with
// lines which match `regex`.
val results = filterLinesMatchingRegex(output.output, regex)
if (results.isEmpty) {
val testFlags = arguments.split(" ").filter(isFlag)
val SleekFlags = flagsOfProgram(absCmdPath)
val invalidFlags = testFlags.filterNot(SleekFlags.contains)
if (!invalidFlags.isEmpty) {
val flagsStr = invalidFlags.map(f => s"Invalid flag $f").mkString("\\n")
return Left(List("Binary failed to execute. Please investigate", flagsStr))
} else {
// Could try searching the output for errors?
return Left(List("Binary failed to execute. Please investigate",
"Output was:") ++
output.stdoutLines ++
output.stderrLines)
}
}
// Seems to be no impact of more `expect` commands than entailments.
def resultFromOutputLine(resultLine: String): String = {
// Always a `:` in the result Line.
val validateResidue = resultLine.substring(resultLine.indexOf(':') + 2)
val OkR = "OK".r
val FlowR = "Expecting flow (.*)".r
val NoResidueR = "Expecting(.*) BUT got no residue".r
val ExpGotResidueR = "Expecting(.*) BUT got : (.*)".r
// this crops up sometimes:
val Exp3GotResidueR = "Expecting\\\\(3\\\\)(.*) but got : (.*)".r
validateResidue match {
case OkR() => "OK"
case FlowR(ident) => validateResidue
case NoResidueR(exp) => s"No residue (expecting $exp)"
case ExpGotResidueR(exp,act) => s"Expecing $exp but got $act"
case Exp3GotResidueR(exp,act) => s"Expecing $exp but got $act"
case _ => s"UNKNOWN-VALIDATION: validateResidue"
}
}
val resultUnits = results.zipWithIndex.flatMap({
case (resultLine, idx) => {
val expected = "OK"
val actual = resultFromOutputLine(resultLine)
Some(Result(idx.toString, expected, actual))
}
})
return Right(resultUnits)
}
}
|
rgoulter/system-testing
|
src/main/scala/edu/nus/systemtesting/hipsleek/ValidateableSleekTestCase.scala
|
Scala
|
mit
| 4,626
|
///////////////////////////////////////////////////////////////////////////////
// TimeDocument.scala
//
// Copyright (C) 2011, 2012 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.fieldspring.poligrounder
import collection.mutable
import opennlp.fieldspring.util.distances._
import opennlp.fieldspring.util.textdbutil.Schema
import opennlp.fieldspring.util.printutil._
import opennlp.fieldspring.gridlocate.{DistDocument,DistDocumentTable,CellGrid}
import opennlp.fieldspring.gridlocate.DistDocumentConverters._
import opennlp.fieldspring.worddist.WordDistFactory
class TimeDocument(
schema: Schema,
table: TimeDocumentTable
) extends DistDocument[TimeCoord](schema, table) {
var coord: TimeCoord = _
var user: String = _
def has_coord = coord != null
def title = if (coord != null) coord.toString else "unknown time"
def struct =
<TimeDocument>
{
if (has_coord)
<timestamp>{ coord }</timestamp>
}
</TimeDocument>
override def set_field(name: String, value: String) {
name match {
case "min-timestamp" => coord = get_x_or_null[TimeCoord](value)
case "user" => user = value
case _ => super.set_field(name, value)
}
}
def coord_as_double(coor: TimeCoord) = coor match {
case null => Double.NaN
case TimeCoord(x) => x.toDouble / 1000
}
def distance_to_coord(coord2: TimeCoord) = {
(coord_as_double(coord2) - coord_as_double(coord)).abs
}
def output_distance(dist: Double) = "%s seconds" format dist
}
/**
* A DistDocumentTable specifically for documents with coordinates described
* by a TimeCoord.
* We delegate the actual document creation to a subtable specific to the
* type of corpus (e.g. Wikipedia or Twitter).
*/
class TimeDocumentTable(
override val driver: PoligrounderDriver,
word_dist_factory: WordDistFactory
) extends DistDocumentTable[TimeCoord, TimeDocument, TimeCellGrid](
driver, word_dist_factory
) {
def create_document(schema: Schema) = new TimeDocument(schema, this)
}
|
utcompling/fieldspring
|
src/main/scala/opennlp/fieldspring/poligrounder/TimeDocument.scala
|
Scala
|
apache-2.0
| 2,679
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.