code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.jetbrains.plugins.scala
package lang
package parser
import _root_.com.intellij.psi.util.PsiTreeUtil
import _root_.org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt
import com.intellij.lang.{ASTNode, ParserDefinition}
import com.intellij.openapi.project.Project
import com.intellij.psi.tree.{IFileElementType, TokenSet}
import com.intellij.psi.{FileViewProvider, PsiElement, PsiFile}
import org.jetbrains.plugins.dotty.lang.parser.{DottyParser, DottyPsiCreator}
import org.jetbrains.plugins.scala.lang.lexer.{ScalaLexer, ScalaTokenTypes}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaFileImpl
import org.jetbrains.plugins.scala.project.ProjectExt
import org.jetbrains.plugins.scala.settings._
/**
* @author ilyas
*/
class ScalaParserDefinition extends ScalaParserDefinitionWrapper {
private var hasDotty = false
def createLexer(project: Project) = {
val treatDocCommentAsBlockComment = ScalaProjectSettings.getInstance(project).isTreatDocCommentAsBlockComment
new ScalaLexer(treatDocCommentAsBlockComment)
}
def createParser(project: Project) = {
hasDotty = project.hasDotty
if (hasDotty) new DottyParser else new ScalaParser
}
def getFileNodeType: IFileElementType = ScalaElementTypes.FILE
def getCommentTokens: TokenSet = ScalaTokenTypes.COMMENTS_TOKEN_SET
def getStringLiteralElements: TokenSet = ScalaTokenTypes.STRING_LITERAL_TOKEN_SET
def getWhitespaceTokens: TokenSet = ScalaTokenTypes.WHITES_SPACES_TOKEN_SET
def createElement(astNode: ASTNode): PsiElement = (if (hasDotty) DottyPsiCreator else ScalaPsiCreator).createElement(astNode)
def createFile(fileViewProvider: FileViewProvider): PsiFile = {
ScalaFileFactory.EP_NAME.getExtensions
.view
.flatMap(_.createFile(fileViewProvider))
.headOption
.getOrElse(new ScalaFileImpl(fileViewProvider))
}
override def spaceExistanceTypeBetweenTokens(leftNode: ASTNode, rightNode: ASTNode): ParserDefinition.SpaceRequirements = {
import com.intellij.lang.ParserDefinition._
if (rightNode.getElementType != ScalaTokenTypes.tWHITE_SPACE_IN_LINE || !rightNode.getText.contains("\\n")) {
val imp: ScImportStmt = PsiTreeUtil.getParentOfType(leftNode.getPsi, classOf[ScImportStmt])
if (imp != null && rightNode.getTextRange.getStartOffset == imp.getTextRange.getEndOffset)
return SpaceRequirements.MUST_LINE_BREAK
}
(leftNode.getElementType, rightNode.getElementType) match {
case (_, ScalaTokenTypes.kIMPORT) => SpaceRequirements.MUST_LINE_BREAK
case _ => super.spaceExistanceTypeBetweenTokens(leftNode, rightNode)
}
}
}
| katejim/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/ScalaParserDefinition.scala | Scala | apache-2.0 | 2,666 |
package org.openurp.edu.eams.teach.program
import org.beangle.commons.inject.bind.AbstractBindModule
import org.springframework.transaction.interceptor.TransactionProxyFactoryBean
import com.ekingstar.eams.teach.major.helper.MajorPlanSearchHelper
import org.openurp.edu.eams.teach.program.bind.service.impl.StudentProgramBindServiceImpl
import org.openurp.edu.eams.teach.program.bind.web.action.ProgramBindManageAction
import org.openurp.edu.eams.teach.program.common.copydao.coursegroup.MajorCourseGroupCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.coursegroup.OriginalMajorCourseGroupCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.coursegroup.PersonalPlanCourseGroupCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.plan.MajorPlanCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.plan.OriginalMajorPlanCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.plan.PersonalPlanCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.plancourse.MajorPlanCourseCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.plancourse.OriginalMajorPlanCourseCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.copydao.plancourse.PersonalPlanCourseCopyDaoHibernate
import org.openurp.edu.eams.teach.program.common.dao.impl.PlanCommonDaoHibernate
import org.openurp.edu.eams.teach.program.common.dao.impl.PlanCourseCommonDaoHibernate
import org.openurp.edu.eams.teach.program.common.dao.impl.PlanCourseGroupCommonDaoHibernate
import org.openurp.edu.eams.teach.program.common.service.impl.PlanCompareServiceImpl
import org.openurp.edu.eams.teach.program.major.dao.hibernate.MajorPlanAuditDaoHibernate
import org.openurp.edu.eams.teach.program.major.dao.hibernate.MajorPlanCourseDaoHibernate
import org.openurp.edu.eams.teach.program.major.dao.hibernate.MajorCourseGroupDaoHibernate
import org.openurp.edu.eams.teach.program.major.dao.hibernate.MajorPlanDaoHibernate
import org.openurp.edu.eams.teach.program.major.flexible.impl.DefaultMajorProgramTextTitleProvider
import org.openurp.edu.eams.teach.program.major.guard.impl.MajorProgramBasicGuard
import org.openurp.edu.eams.teach.program.major.guard.impl.MajorProgramCUDGuard
import org.openurp.edu.eams.teach.program.major.service.impl.MajorPlanAuditServiceImpl
import org.openurp.edu.eams.teach.program.major.service.impl.MajorCourseGroupServiceImpl
import org.openurp.edu.eams.teach.program.major.service.impl.MajorPlanCourseServiceImpl
import org.openurp.edu.eams.teach.program.major.service.impl.MajorPlanServiceImpl
import org.openurp.edu.eams.teach.program.major.web.action.CollegeCourseAction
import org.openurp.edu.eams.teach.program.major.web.action.MajorPlanAction
import org.openurp.edu.eams.teach.program.major.web.action.MajorPlanAuditAction
import org.openurp.edu.eams.teach.program.major.web.action.MajorPlanCourseAction
import org.openurp.edu.eams.teach.program.major.web.action.MajorCourseGroupAction
import org.openurp.edu.eams.teach.program.major.web.action.MajorPlanSearchAction
import org.openurp.edu.eams.teach.program.major.web.action.ProgramDocAction
import org.openurp.edu.eams.teach.program.majorapply.dao.hibernate.MajorCourseGroupModifyApplyDaoHibernate
import org.openurp.edu.eams.teach.program.majorapply.dao.hibernate.MajorCourseGroupModifyAuditDaoHibernate
import org.openurp.edu.eams.teach.program.majorapply.dao.hibernate.MajorPlanCourseModifyApplyDaoHibernate
import org.openurp.edu.eams.teach.program.majorapply.dao.hibernate.MajorPlanCourseModifyAuditDaoHibernate
import org.openurp.edu.eams.teach.program.majorapply.service.impl.MajorCourseGroupModifyApplyServiceImpl
import org.openurp.edu.eams.teach.program.majorapply.service.impl.MajorCourseGroupModifyAuditServiceImpl
import org.openurp.edu.eams.teach.program.majorapply.service.impl.MajorPlanCourseModifyApplyServiceImpl
import org.openurp.edu.eams.teach.program.majorapply.service.impl.MajorPlanCourseModifyAuditServiceImpl
import org.openurp.edu.eams.teach.program.majorapply.web.action.MajorCourseGroupModifyApplyAction
import org.openurp.edu.eams.teach.program.majorapply.web.action.MajorCourseGroupModifyAuditAction
import org.openurp.edu.eams.teach.program.majorapply.web.action.MajorPlanCourseModifyApplyAction
import org.openurp.edu.eams.teach.program.majorapply.web.action.MajorPlanCourseModifyAuditAction
import org.openurp.edu.eams.teach.program.majorapply.web.action.MajorPlanModifyApplyAction
import org.openurp.edu.eams.teach.program.majorapply.web.action.MajorPlanModifyAuditAction
import org.openurp.edu.eams.teach.program.original.web.action.OriginalMajorPlanSearchAction
import org.openurp.edu.eams.teach.program.personal.service.impl.PersonalPlanCompareServiceImpl
import org.openurp.edu.eams.teach.program.personal.service.impl.PersonalPlanCourseServiceImpl
import org.openurp.edu.eams.teach.program.personal.service.impl.PersonalPlanServiceImpl
import org.openurp.edu.eams.teach.program.personal.web.action.PersonalPlanAction
import org.openurp.edu.eams.teach.program.personal.web.action.PersonalPlanCourseAction
import org.openurp.edu.eams.teach.program.personal.web.action.PersonalPlanCourseGroupAction
import org.openurp.edu.eams.teach.program.personal.web.action.PersonalPlanSearchAction
import org.openurp.edu.eams.teach.program.share.web.action.SharePlanAction
import org.openurp.edu.eams.teach.program.share.web.action.SharePlanCourseAction
import org.openurp.edu.eams.teach.program.share.web.action.SharePlanCourseGroupAction
import org.openurp.edu.eams.teach.program.student.web.action.MyPlanAction
import org.openurp.edu.eams.teach.program.subst.web.action.MajorCourseSubstitutionAction
import org.openurp.edu.eams.teach.program.subst.web.action.StdCourseSubstitutionAction
import org.openurp.edu.eams.teach.program.template.web.action.DocTemplateAction
//remove if not needed
class PlanWebActionModule extends AbstractBindModule {
protected override def doBinding() {
bind(classOf[MyPlanAction], classOf[DocTemplateAction], classOf[ProgramDocAction], classOf[CollegeCourseAction])
bind(classOf[ProgramBindManageAction], classOf[StdCourseSubstitutionAction], classOf[MajorCourseSubstitutionAction])
bind(classOf[MajorPlanSearchAction], classOf[MajorCourseGroupAction], classOf[MajorPlanCourseAction],
classOf[PersonalPlanSearchAction], classOf[PersonalPlanAction], classOf[PersonalPlanCourseAction],
classOf[PersonalPlanCourseGroupAction], classOf[MajorPlanAuditAction], classOf[OriginalMajorPlanSearchAction],
classOf[MajorPlanModifyApplyAction], classOf[MajorPlanModifyAuditAction], classOf[MajorPlanCourseModifyApplyAction],
classOf[MajorPlanCourseModifyAuditAction], classOf[MajorCourseGroupModifyApplyAction], classOf[MajorCourseGroupModifyAuditAction])
bind(classOf[SharePlanAction], classOf[SharePlanCourseAction], classOf[SharePlanCourseGroupAction])
bind(classOf[MajorPlanAction]).property("guards", list(ref("majorProgramBasicGuard"), ref("majorProgramCUDGuard")))
bind("textTitleProvider", classOf[DefaultMajorProgramTextTitleProvider])
bind("majorProgramBasicGuard", classOf[MajorProgramBasicGuard])
bind("majorProgramCUDGuard", classOf[MajorProgramCUDGuard])
bind("studentProgramBindService", classOf[StudentProgramBindServiceImpl])
bind("baseTransactionProxyExt", classOf[TransactionProxyFactoryBean])
.parent("baseTransactionProxy")
.setAbstract()
.property("transactionAttributes", props("save*=PROPAGATION_REQUIRED", "update*=PROPAGATION_REQUIRED",
"remove*=PROPAGATION_REQUIRED", "delete*=PROPAGATION_REQUIRED", "create*=PROPAGATION_REQUIRED",
"gen*=PROPAGATION_REQUIRED", "copy**=PROPAGATION_REQUIRED", "init*=PROPAGATION_REQUIRED", "add*=PROPAGATION_REQUIRED",
"approved*=PROPAGATION_REQUIRED", "rejected*=PROPAGATION_REQUIRED", "*=PROPAGATION_REQUIRED,readOnly"))
bind("majorPlanDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorPlanDaoHibernate])
.parent("baseTransactionProxyExt")
bind("MajorCourseGroupDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorCourseGroupDaoHibernate])
.parent("baseTransactionProxyExt")
bind("majorPlanCourseDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorPlanCourseDaoHibernate])
.parent("baseTransactionProxyExt")
bind("majorPlanCourseModifyApplyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorPlanCourseModifyApplyDaoHibernate])
.parent("baseTransactionProxyExt")
bind("majorPlanCourseModifyAuditDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorPlanCourseModifyAuditDaoHibernate])
.parent("baseTransactionProxyExt")
bind("MajorCourseGroupModifyApplyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorCourseGroupModifyApplyDaoHibernate])
.parent("baseTransactionProxyExt")
bind("MajorCourseGroupModifyAuditDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorCourseGroupModifyAuditDaoHibernate])
.parent("baseTransactionProxyExt")
bind("majorPlanService", classOf[MajorPlanServiceImpl])
bind("MajorCourseGroupService", classOf[MajorCourseGroupServiceImpl])
bind("majorPlanCourseService", classOf[MajorPlanCourseServiceImpl])
bind("planCompareService", classOf[PlanCompareServiceImpl])
bind("personalPlanCompareService", classOf[PersonalPlanCompareServiceImpl])
bind("personalPlanService", classOf[PersonalPlanServiceImpl])
bind("personalPlanCourseService", classOf[PersonalPlanCourseServiceImpl])
bind("majorPlanAuditService", classOf[MajorPlanAuditServiceImpl])
bind("majorPlanAuditDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorPlanAuditDaoHibernate])
.parent("baseTransactionProxy")
bind("majorPlanCourseModifyApplyService", classOf[MajorPlanCourseModifyApplyServiceImpl])
bind("majorPlanCourseModifyAuditService", classOf[MajorPlanCourseModifyAuditServiceImpl])
bind("MajorCourseGroupModifyApplyService", classOf[MajorCourseGroupModifyApplyServiceImpl])
bind("MajorCourseGroupModifyAuditService", classOf[MajorCourseGroupModifyAuditServiceImpl])
bind("majorPlanSearchHelper", classOf[MajorPlanSearchHelper])
bind("planCommonDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[PlanCommonDaoHibernate])
.parent("baseTransactionProxyExt")
bind("planCourseCommonDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[PlanCourseCommonDaoHibernate])
.parent("baseTransactionProxyExt")
bind("planCourseGroupCommonDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[PlanCourseGroupCommonDaoHibernate])
.parent("baseTransactionProxyExt")
bind("planCompareService", classOf[PlanCompareServiceImpl])
bind("majorPlanCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", bean(classOf[MajorPlanCopyDaoHibernate]).property("courseGroupCopyDao", ref("MajorCourseGroupCopyDao")))
.parent("baseTransactionProxyExt")
bind("MajorCourseGroupCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", bean(classOf[MajorCourseGroupCopyDaoHibernate])
.property("planCourseCopyDao", ref("majorPlanCourseCopyDao")))
.parent("baseTransactionProxyExt")
bind("majorPlanCourseCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[MajorPlanCourseCopyDaoHibernate])
.parent("baseTransactionProxyExt")
bind("personalPlanCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", bean(classOf[PersonalPlanCopyDaoHibernate]).property("courseGroupCopyDao", ref("personalPlanCourseGroupCopyDao")))
.parent("baseTransactionProxyExt")
bind("personalPlanCourseGroupCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", bean(classOf[PersonalPlanCourseGroupCopyDaoHibernate])
.property("planCourseCopyDao", ref("personalPlanCourseCopyDao")))
.parent("baseTransactionProxyExt")
bind("personalPlanCourseCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[PersonalPlanCourseCopyDaoHibernate])
.parent("baseTransactionProxyExt")
bind("originalMajorPlanCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", bean(classOf[OriginalMajorPlanCopyDaoHibernate]).property("courseGroupCopyDao",
ref("originalMajorCourseGroupCopyDao")))
.parent("baseTransactionProxyExt")
bind("originalMajorCourseGroupCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", bean(classOf[OriginalMajorCourseGroupCopyDaoHibernate])
.property("planCourseCopyDao", ref("originalMajorPlanCourseCopyDao")))
.parent("baseTransactionProxyExt")
bind("originalMajorPlanCourseCopyDao", classOf[TransactionProxyFactoryBean])
.proxy("target", classOf[OriginalMajorPlanCourseCopyDaoHibernate])
.parent("baseTransactionProxyExt")
}
}
| openurp/edu-eams-webapp | plan/src/main/scala/org/openurp/edu/eams/teach/program/PlanWebActionModule.scala | Scala | gpl-3.0 | 13,093 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.datacompaction
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
/**
* FT for data compaction scenario.
*/
class DataCompactionBlockletBoundryTest extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists blocklettest")
sql("drop table if exists Carbon_automation_hive")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.BLOCKLET_SIZE,
"125")
sql(
"CREATE TABLE IF NOT EXISTS blocklettest (country String, ID String, date Timestamp, name " +
"String, " +
"phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
".format'"
)
val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
// loading the rows greater than 256. so that the column cardinality crosses byte boundary.
val csvFilePath2 = s"$resourcesPath/compaction/compactioncard2.csv"
sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE blocklettest OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\\"')"
)
sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE blocklettest OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\\"')"
)
// compaction will happen here.
sql("alter table blocklettest compact 'major'"
)
sql(
"create table Carbon_automation_hive (ID String, date " +
"Timestamp,country String, name String, phonetype String, serialname String, salary Int ) row format " +
"delimited fields terminated by ',' TBLPROPERTIES ('skip.header.line.count'='1') "
)
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/compaction/compaction1_forhive.csv" + "' INTO " +
"table Carbon_automation_hive ")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/compaction/compactioncard2_forhive.csv" + "' INTO " +
"table Carbon_automation_hive ")
}
test("select country,count(*) as a from blocklettest")({
checkAnswer(
sql("select country,count(*) as a from blocklettest group by country"),
sql("select country,count(*) as a from Carbon_automation_hive group by country")
)
}
)
override def afterAll {
sql("drop table if exists blocklettest")
sql("drop table if exists Carbon_automation_hive")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.BLOCKLET_SIZE,
"" + CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
}
}
| HuaweiBigData/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBlockletBoundryTest.scala | Scala | apache-2.0 | 3,651 |
package io.apibuilder.rewriter
import apibuilder.{ApiBuilderHelper, ApiBuilderHelperImpl}
import io.apibuilder.validation.{ApiBuilderType, MultiService}
import scala.annotation.tailrec
/**
* Rewrites the multi service such that the specified types resolve correctly. eg.
* recurses through the provides types to collect ALL referenced types (through models, unions, etc)
* and return a multi service containing only the types necessary such that the provided
* types fully resolve.
*/
case class MinimalTypesRewriter(types: Iterable[ApiBuilderType]) extends DefaultRewriter {
override def rewrite(multiService: MultiService): MultiService = {
val helper = ApiBuilderHelperImpl(multiService)
val all = expand(helper, types.toSeq, Set.empty).groupBy(_.namespace)
MultiService(
multiService.services().map { s =>
val svcTypes = all.getOrElse(s.namespace, Nil).toSeq
s.copy(
service = s.service.copy(
enums = svcTypes.collect { case t: ApiBuilderType.Enum => t }.map(_.`enum`),
models = svcTypes.collect { case t: ApiBuilderType.Model => t }.map(_.model),
unions = svcTypes.collect { case t: ApiBuilderType.Union => t }.map(_.union),
)
)
}
)
}
/**
* Expand the types to include any types defined on the fields of models
* or the types of a union
*/
@tailrec
private[this] def expand(helper: ApiBuilderHelper, incoming: Seq[ApiBuilderType], resolved: Set[ApiBuilderType]): Set[ApiBuilderType] = {
incoming.toList match {
case Nil => resolved
case one :: rest => {
val newTypes = one match {
case _: ApiBuilderType.Enum => Nil
case t: ApiBuilderType.Interface => t.fields.flatMap(helper.resolveType)
case t: ApiBuilderType.Model => t.fields.flatMap(helper.resolveType)
case t: ApiBuilderType.Union => t.types.flatMap(helper.resolveType)
}
val newResolved = resolved ++ Set(one)
expand(
helper,
rest ++ newTypes
.collect { case t: ApiBuilderType => t }
.filterNot(newResolved.contains),
newResolved
)
}
}
}
}
| flowcommerce/lib-apidoc-json-validation | src/main/scala/io/apibuilder/rewriter/MinimalTypesRewriter.scala | Scala | mit | 2,199 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.view.action.popup
import scalismo.ui.model.{GroupNode, SceneNode}
import scalismo.ui.view.ScalismoFrame
object GroupDelegatingAction extends PopupAction.Factory {
override def apply(context: List[SceneNode])(implicit frame: ScalismoFrame): List[PopupAction] = {
singleMatch[GroupNode](context).toList.flatMap { group =>
group.children
.flatMap { child =>
PopupAction(List(child))
}
.collect { case p: GroupDelegatingAction => p }
}
}
}
trait GroupDelegatingAction extends PopupAction {}
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/view/action/popup/GroupDelegatingAction.scala | Scala | gpl-3.0 | 1,298 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.storage.jdbc
import grizzled.slf4j.Logging
import org.apache.predictionio.data.storage.BaseStorageClient
import org.apache.predictionio.data.storage.StorageClientConfig
import org.apache.predictionio.data.storage.StorageClientException
import scalikejdbc._
/** JDBC implementation of [[BaseStorageClient]] */
class StorageClient(val config: StorageClientConfig)
extends BaseStorageClient with Logging {
override val prefix = "JDBC"
if (!config.properties.contains("URL")) {
throw new StorageClientException("The URL variable is not set!", null)
}
if (!config.properties.contains("USERNAME")) {
throw new StorageClientException("The USERNAME variable is not set!", null)
}
if (!config.properties.contains("PASSWORD")) {
throw new StorageClientException("The PASSWORD variable is not set!", null)
}
// set max size of connection pool
val maxSize: Int = config.properties.getOrElse("CONNECTIONS", "8").toInt
val settings = ConnectionPoolSettings(maxSize = maxSize)
ConnectionPool.singleton(
config.properties("URL"),
config.properties("USERNAME"),
config.properties("PASSWORD"),
settings)
/** JDBC connection URL. Connections are managed by ScalikeJDBC. */
val client = config.properties("URL")
}
| PredictionIO/PredictionIO | storage/jdbc/src/main/scala/org/apache/predictionio/data/storage/jdbc/StorageClient.scala | Scala | apache-2.0 | 2,095 |
/*
* Copyright (c) 2012 SnowPlow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
import sbt._
import Keys._
object ScaldingExampleProjectBuild extends Build {
import Dependencies._
import BuildSettings._
// Configure prompt to show current project
override lazy val settings = super.settings :+ {
shellPrompt := { s => Project.extract(s).currentProject.id + " > " }
}
// Define our project, with basic project information and library dependencies
lazy val project = Project("scalding-example-project", file("."))
.settings(buildSettings: _*)
.settings(
libraryDependencies ++= Seq(
Libraries.specs2,
Libraries.shapeless,
Libraries.log4j,
Libraries.scaldingCore,
Libraries.scaldingArgs,
Libraries.scaldingAvro,
Libraries.scaldingCommons,
// Dep.scaldingDate,
Libraries.hadoopCommon,
Libraries.hadoopCore
// Add your additional libraries here (comma-separated)...
)
).settings(net.virtualvoid.sbt.graph.Plugin.graphSettings: _*)
}
| txominpelu/scalding-shapeless-test | project/ScaldingExampleProjectBuild.scala | Scala | apache-2.0 | 1,683 |
/*
* UnificationException.scala
*
*/
package at.logic.gapt.language.fol.algorithms
class UnificationException( msg: String ) extends Exception( msg )
| gisellemnr/gapt | src/main/scala/at/logic/gapt/language/fol/algorithms/UnificationException.scala | Scala | gpl-3.0 | 155 |
/*
* Copyright 2014 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.svaractor.semantictrait.example
import simplex3d.math.floatx.Vec3f
import simx.core.entity.Entity
import simx.core.entity.description.SValSet
import simx.core.ontology.types.Integer
import simx.core.ontology.types._
import simx.core.svaractor.SVarActor
import simx.core.svaractor.handlersupport.Types.CPSRet
import simx.core.svaractor.semantictrait.base._
import simx.core.svaractor.semantictrait.example.actions.Move
import simx.core.svaractor.semantictrait.example.relations.{affectedBy, has}
import simx.core.svaractor.semantictrait.example.traits._
import simx.core.svaractor.semantictrait.example.types.{Shape, Location, Anything, Number, SteeringBehavior, SemanticEntity}
import simx.core.svaractor.unifiedaccess.EntityUpdateHandling
import scala.language.reflectiveCalls
import scala.util.continuations
/**
* Created by dwiebusch on 27.11.14
*/
object Test{
def testMe(x : Semantic.Entity[ Scale.Type ]){
println("in testMe")
}
def testMe2(x : simx.core.ontology.types.Gravity.ValueType): Unit ={
println("!in testMe2")
}
def main(args: Array[java.lang.String]) {
SetLocationAction
// SetRadiusAction
SVarActor.createActor(new SVarActor with EntityUpdateHandling {
override protected def removeFromLocalRep(e: Entity){}
/**
* called when the actor is started
*/
override protected def startUp() = continuations.reset {
println("start")
val e = SemanticEntity(new Entity())
simx.core.ontology.types.init()
val wheels = for (i <- 1 to 4) yield SemanticEntity(new Entity())
object Iterate{
def over[T](i : Iterable[T]) = Iterate(i)
}
case class Iterate[T](i : Iterable[T]) {
def foreach[U](handler: T => U@CPSRet): Unit@CPSRet = if (i.nonEmpty) {
handler(i.head)
Iterate(i.tail).foreach(handler)
}
}
Iterate over wheels foreach { wheel =>
wheel modify has(Shape("round")) set Position set Scale apply()
e set has(wheel)
}
val carEntity = e modify
has(SteeringBehavior) set
Gravity set
Scale set
Anything set
Position2D set
affectedBy(Gravity(Vec3f.Zero)) set
Container(SValSet()) set
println(Vehicle(carEntity) attain has(Radius(1)))
println("x " + Integer.valueDescription.groundedSymbol)
val gr = simx.core.ontology.types.Gravity(-9.81f * simplex3d.math.float.Vec3.UnitY)
val grav = e attain Gravity
implicit def tesym2sym(in: BasicGroundedSymbol): GroundedSymbolBase[in.SymbolType] =
in.Symbol
testMe2(gr)
implicit def toSemanticEntity(in : Semantic.Entity[_ <: Thing]): Object {def isA(a: SpecificSemanticTrait[_ <: Thing]): scala.Boolean@CPSRet } = new {
def isA(a : SpecificSemanticTrait[_ <: Thing]) : scala.Boolean@CPSRet = a.tryApply(in).isDefined
}
// e as Movable moveTo Destination("London")
println(carEntity isA Vehicle)
testMe(traits.Vehicle(carEntity))
If (Vehicle tryApply carEntity isDefined) Then {
Move(Movable.tryApply(carEntity).get, Location("Honolulu"))
val myCar = Movable.tryApply(carEntity)
Movable(myCar.get).moveTo(Location("Honolulu"))
}
val m = Movable.tryApply(e)
If (m.isDefined) Then {
println(Vehicle.tryApply(m.get.entity))
}
has(Number)
def test(v : Radius): Unit ={
println(v)
}
val x = Radius(1)
val y = Angle(1)
// test(y)
val list = Container(SValSet())
// list set has(Number(1))
list set Gravity(Vec3f.UnitY)
// val g = list get Gravity
// val x = Gravity get in(list)
// val i = list get has
val gb = list get Gravity(Vec3f.UnitY)
// println(x)
//e2 set
val green = Color2("green")
val e2 = Entity2(1)
val rel = has2(e -> Color2)
val rel2 = hasValue2(rel -> green)
println(rel)
// println(rel2)
}
})
// class S1 extends BasicGroundedSymbol
//
// object s2 extends S1
//
// var n = Number(1)
// n = Integer(2)
//
// println(Integer.valueDescription.groundedSymbol.toString , symbols.sinteger.Symbol)
//
// val s1 = new S1 {
// override def toString: String = getClass.getSimpleName
// }
//
// def test1(a: S1#BaseType): Unit = {
//
// }
//
// def continue[T <: Any](v: T): T@cpsParam[Any, Any] =
// shift { (x: T => Any) => println(x(v))} //testActor ! (v, x) }
//
// def fixIfElse() =
// shift { (x: Unit => Unit) => x(())}
//
// def If(condition : => Boolean)( x : => Any@cpsParam[Any, Any] ) = {
// if (condition) x else fixIfElse()
// }
//
// case class Test()
//
//
// val testActor: SVarActor.Ref = SVarActor.createActor(new SVarActor {
//// addHandler[(Any, Any => Unit)]{ f => f._2.apply(f._1)}
// addHandler[String] { x : String => if (x == "off") "off" }
// addHandler[Test]( {
// i : Test =>
// val svar = createSVar(simx.core.ontology.types.Name("klaus"), Now, Unbuffered)
// createActor(new SVarActor {
// addHandler[Test]{
// t =>
// println( svar.read)
// context.system.shutdown()
// }
// })(_ ! Test())()
//
// // If (false) {
//// val x: SVar[Int] = null
//// //x.read(this)
////// continue(2)
////
//// }
//
//// continue(1)
// "continueing"
// } )
// })
//
// testActor ! Test()
//
// def doOtherStuff() = {
// println("other stuff")
// continue(1)
// }
//
// def doIt() = {
// println("it")
// val x = doOtherStuff()
// println("got " + x)
// x.toLong
// }
//
// def myReset(ctx: => Any@cpsParam[Any, Any]) =
// reset { ctx }
//
// myReset {
//
//
// val value = doIt()
//
//
// println("test")
//// Thread.sleep(5000)
//// testActor ! "off"
//// math.cos(value)
//
//
// // object TEx extends Throwable{ override def fillInStackTrace(): Throwable = this }
// // def prepareFunc(in : => Unit) : () => Unit = () => in
// // def break(){ throw TEx }
// //
// // var id = 0L
// // def nextId = synchronized{
// // id = id+1
// // id
// // }
// //
// // def breakable(o : () => Unit ) {
// // try {
// // o.apply()
// // } catch {
// // case TEx =>
// // val id = nextId
// // }
// // }
// //
// //
// //
// // import System.nanoTime
// //
// //
// //
// // val pf = prepareFunc{
// // println("test")
// // 1
// // }
// // val t1 = nanoTime()
// // breakable(pf)
// // val t2 = nanoTime()
// //
// // println((t2-t1) / 1000000.0)
//
// // test1(s1)
//
// // val s2 =
//
// val Anything1 = BaseValueDescription(s1.Symbol)
// val Anything2 = BaseValueDescription(s2.Symbol)
//
// println(has2Sym.Symbol, Anything2.groundedSymbol)
//
//
//
// }
}
}
object has2Sym extends BasicGroundedSymbol
object hasValue2Sym extends BasicGroundedSymbol
object Entity2Sym extends BasicGroundedSymbol
object Gravity2Sym extends BasicGroundedSymbol
object Color2Sym extends BasicGroundedSymbol
object has2 extends RelationDescription(classOf[simx.core.svaractor.semantictrait.example.types.SemanticEntity.DataType], classOf[Any], has2Sym)
object hasValue2 extends RelationDescription(classOf[has.DataType], classOf[Any], hasValue2Sym)
object Entity2 extends SemanticType(classOf[Int], BaseValueDescription(Entity2Sym))
object Gravity2 extends SemanticType(classOf[Float], BaseValueDescription(Gravity2Sym))
object Color2 extends SemanticType(classOf[java.lang.String], BaseValueDescription(Color2Sym))
| simulator-x/core | src/simx/core/svaractor/semantictrait/example/Test.scala | Scala | apache-2.0 | 9,568 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.sql.{QueryTest, Row}
import org.apache.spark.sql.internal.oap.OapConf
import org.apache.spark.sql.test.oap.{SharedOapContext, TestIndex}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class OapIndexQuerySuite extends QueryTest with SharedOapContext with BeforeAndAfterEach {
import testImplicits._
override def beforeEach(): Unit = {
val path1 = Utils.createTempDir().getAbsolutePath
val path2 = Utils.createTempDir().getAbsolutePath
sql(s"""CREATE TEMPORARY VIEW parquet_test_1 (a INT, b STRING)
| USING parquet
| OPTIONS (path '$path1')""".stripMargin)
sql(s"""CREATE TEMPORARY VIEW orc_test_1 (a INT, b STRING)
| USING orc
| OPTIONS (path '$path2')""".stripMargin)
}
override def afterEach(): Unit = {
sqlContext.dropTempTable("parquet_test_1")
sqlContext.dropTempTable("orc_test_1")
}
test("index integrity") {
val data: Seq[(Int, String)] =
scala.util.Random.shuffle(1 to 300).map{ i => (i, s"this is test $i") }
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
val dfWithoutIdx = sql("SELECT * FROM parquet_test_1 WHERE a > 8 and a <= 200")
val dfOriginal = sql("SELECT * FROM t WHERE key > 8 and key <= 200")
sql("create oindex index1 on parquet_test_1 (a) using bitmap")
val dfwithIdx = sql("SELECT * FROM parquet_test_1 WHERE a > 8 and a <= 200")
assert(dfWithoutIdx.count == dfwithIdx.count)
assert(dfWithoutIdx.count == dfOriginal.count)
}
}
test("index row boundary") {
val groupSize = 1024 // use a small row group to check boundary.
val testRowId = groupSize - 1
val data: Seq[(Int, String)] = (0 until groupSize * 3)
.map { i => (i, s"this is test $i") }
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("create oindex index1 on parquet_test_1 (a)")
checkAnswer(sql(s"SELECT * FROM parquet_test_1 WHERE a = $testRowId"),
Row(testRowId, s"this is test $testRowId") :: Nil)
}
}
test("check sequence reading for oap, parquet and orc formats") {
val data: Seq[(Int, String)] = (1 to 300).map { i =>
if (i == 10) (1, s"this is test $i") else (i, s"this is test $i")
}
data.toDF("key", "value").createOrReplaceTempView("t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("insert overwrite table parquet_test_1 select * from t")
sql("create oindex index1 on parquet_test_1 (a)")
// While a in (0, 3), rowIds = (1, 10, 2)
// sort to ensure the sequence reading on parquet. so results are (1, 2, 10)
val parquetRslt = sql("select * from parquet_test_1 where a > 0 and a < 3")
checkAnswer(parquetRslt, Row(1, "this is test 1") ::
Row(2, "this is test 2") ::
Row(1, "this is test 10") :: Nil)
}
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("insert overwrite table parquet_test_1 select * from t")
sql("create oindex index1 on parquet_test_1 (a)")
// Sort is unnecessary for oap format, so rowIds should be (1, 10, 2)
val oapResult = sql("select * from parquet_test_1 where a > 0 and a < 3")
checkAnswer(oapResult,
Row(1, "this is test 1") ::
Row(1, "this is test 10") ::
Row(2, "this is test 2") :: Nil)
}
withIndex(TestIndex("orc_test_1", "index1")) {
sql("insert overwrite table orc_test_1 select * from t")
sql("create oindex index1 on orc_test_1 (a)")
// For orc format, the row Ids are sorted as well to reduce IO cost.
val parquetRslt = sql("select * from orc_test_1 where a > 0 and a < 3")
checkAnswer(parquetRslt, Row(1, "this is test 1") ::
Row(2, "this is test 2") ::
Row(1, "this is test 10") :: Nil)
}
}
test("#604 bitmap index core dump error") {
val rowRDD = spark.sparkContext.parallelize(1 to 31, 3).map(i =>
Seq(i % 20, s"this is row $i")).map(Row.fromSeq)
val schema =
StructType(
StructField("a", IntegerType) ::
StructField("b", StringType) :: Nil)
val df = spark.createDataFrame(rowRDD, schema)
df.createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "bmidx1")) {
sql("create oindex bmidx1 on parquet_test_1 (a) using bitmap")
val df1 = sql("SELECT * FROM parquet_test_1 WHERE a = 1")
checkAnswer(df1, Row(1, "this is row 1") :: Row(1, "this is row 21") :: Nil)
}
}
test("startswith using index") {
val data: Seq[(Int, String)] =
scala.util.Random.shuffle(1 to 30).map(i => (i, s"this$i is test"))
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("create oindex index1 on parquet_test_1 (b) using btree")
checkAnswer(sql("SELECT * FROM parquet_test_1 WHERE b like 'this3%'"),
Row(3, "this3 is test") :: Row(30, "this30 is test") :: Nil)
}
}
test("OAP-978 Misjudged by " +
"MinMaxStatisticsReader & PartByValueStatisticsReader startswith using index.") {
val data: Seq[(Int, String)] =
scala.util.Random.shuffle(30 to 90).map(i => (i, s"this$i is test"))
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("create oindex index1 on parquet_test_1 (b) using btree")
val ret = sql("SELECT a FROM parquet_test_1 WHERE b like 'this3%'")
checkAnswer(ret, Row(30) :: Row(31) :: Row(32) :: Row(33) :: Row(34) :: Row(35) :: Row(36)
:: Row(37) :: Row(38) :: Row(39) :: Nil)
}
}
test("startswith using multi-dimension index") {
val data: Seq[(Int, String)] =
scala.util.Random.shuffle(1 to 30).map(i => (i, s"this$i is test"))
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("create oindex index1 on parquet_test_1 (b, a) using btree")
checkAnswer(sql("SELECT * FROM parquet_test_1 WHERE b like 'this3%'"),
Row(3, "this3 is test") :: Row(30, "this30 is test") :: Nil)
}
}
test("startswith using multi-dimension index - multi-filters") {
val data: Seq[(Int, String)] =
scala.util.Random.shuffle(1 to 30).map(i => (i % 7, s"this$i is test"))
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("create oindex index1 on parquet_test_1 (a, b) using btree")
checkAnswer(sql("SELECT * FROM parquet_test_1 WHERE a = 3 and b like 'this3%'"),
Row(3, "this3 is test") :: Nil)
}
}
test("startswith using multi-dimension index 2") {
val data: Seq[(Int, String)] = Seq(
15, 29, 26, 4, 28, 17, 16, 11, 12, 27, 22, 6, 10, 18, 19, 20, 30, 21, 14, 25, 1, 2,
13, 23, 7, 24, 3, 8, 5, 9).map(i => (i, s"this$i is test"))
data.toDF("key", "value").createOrReplaceTempView("t")
sql("insert overwrite table parquet_test_1 select * from t")
withIndex(TestIndex("parquet_test_1", "index1")) {
sql("create oindex index1 on parquet_test_1 (b) using btree")
checkAnswer(sql("SELECT * FROM parquet_test_1 WHERE b like 'this3%'"),
Row(3, "this3 is test") :: Row(30, "this30 is test") :: Nil)
}
}
}
| Intel-bigdata/OAP | oap-cache/oap/src/test/scala/org/apache/spark/sql/execution/datasources/oap/OapIndexQuerySuite.scala | Scala | apache-2.0 | 8,720 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package types
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import com.intellij.psi.tree.TokenSet
import com.intellij.lang.ASTNode
import com.intellij.psi.tree.IElementType;
import com.intellij.psi._
import org.jetbrains.annotations._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScRefinementsImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScRefinements{
override def toString: String = "Refinements"
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScRefinementsImpl.scala | Scala | apache-2.0 | 860 |
package unluac.util
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
class Stack[T] private(private val data:mutable.Buffer[T]) {
def this() {
this(new ArrayBuffer[T]())
}
def isEmpty: Boolean = data.isEmpty
def peek: T = data.last
def pop: T = data.remove(data.size - 1)
def push(item: T):Unit = data += item
def size: Int = data.size
def reverse():Stack[T] = new Stack(data.reverse)
}
| danielwegener/unluac-scala | shared/src/main/scala/unluac/util/Stack.scala | Scala | mit | 443 |
/*
* Copyright (C) 2015 Jason Mar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package auth
import com.typesafe.config.ConfigFactory
object Conf {
val conf = ConfigFactory.load()
val ldapProtocol = conf.getString("ldap.ldapProtocol")
val ldapUseKeystore = conf.getBoolean("ldap.ldapUseKeystore")
val ldapHost0 = conf.getString("ldap.ldapHost0")
val ldapHost1 = conf.getString("ldap.ldapHost1")
val ldapPort = conf.getInt("ldap.ldapPort")
val bindDN = conf.getString("ldap.bindDN")
val bindPass = conf.getString("ldap.bindPass")
val poolSize = conf.getInt("ldap.poolSize")
val roleBaseDN = conf.getString("ldap.roleBaseDN")
val userBaseDN = conf.getString("ldap.userBaseDN")
val uidAttribute = conf.getString("ldap.uidAttribute")
val memberAttribute = conf.getString("ldap.memberAttribute")
val roleMemberAttribute = conf.getString("ldap.roleMemberAttribute")
val roleAttribute = conf.getString("ldap.roleAttribute")
val trustStore = conf.getString("ldap.trustStore")
val trustStorePass = conf.getString("ldap.trustStorePass").toCharArray
val trustStoreType = conf.getString("ldap.trustStoreType")
val ldapCacheDuration = conf.getInt("ldap.ldapCacheDuration")
val acg1 = conf.getString("ldap.acg1")
val serverAddresses = Array(ldapHost0,ldapHost1)
val serverPorts = Array(ldapPort,ldapPort)
}
| jasonmar/play2-ldap-activedirectory | app/auth/Conf.scala | Scala | apache-2.0 | 1,863 |
package scala.meta.contrib
import scala.meta._
import scala.meta.tokens.Token
import scala.meta.tokens.Token.Comment
import scala.collection.immutable.List
import org.scalameta.logger
sealed abstract class AssociatedComments(
leadingMap: Map[Token, List[Comment]],
trailingMap: Map[Token, List[Comment]]
) {
private def pretty(map: Map[Token, List[Comment]]): String =
map
.map { case (tok, comments) =>
val commentStructure = comments.map(comment => logger.revealWhitespace(comment.syntax))
s" ${tok.structure} => $commentStructure"
}
.mkString("\\n")
def syntax: String =
s"""|AssociatedComments(
| Leading =
|${pretty(leadingMap)}
|
| Trailing =
|${pretty(trailingMap)}
|)""".stripMargin
override def toString: String = syntax
def leading(tree: Tree): Set[Comment] =
(for {
token <- tree.tokens.headOption
comments <- leadingMap.get(token)
} yield comments).getOrElse(Nil).toSet
def trailing(tree: Tree): Set[Comment] =
(for {
token <- tree.tokens.lastOption
comments <- trailingMap.get(token)
} yield comments).getOrElse(Nil).toSet
def hasComment(tree: Tree): Boolean =
trailing(tree).nonEmpty || leading(tree).nonEmpty
}
object AssociatedComments {
def apply(tree: Tree): AssociatedComments = apply(tree.tokens)
def apply(tokens: Tokens): AssociatedComments = {
val leadingBuilder = Map.newBuilder[Token, List[Comment]]
val trailingBuilder = Map.newBuilder[Token, List[Comment]]
val leading = List.newBuilder[Comment]
val trailing = List.newBuilder[Comment]
var isLeading = true
var lastToken: Token = tokens.head
tokens.foreach {
case c: Comment =>
if (isLeading) leading += c
else trailing += c
case Token.LF() => isLeading = true
case Token.EOF() =>
val l = leading.result()
val t = trailing.result()
if (l.nonEmpty || t.nonEmpty) {
trailingBuilder += lastToken -> (l ::: t)
}
case Trivia() =>
case currentToken =>
val t = trailing.result()
if (t.nonEmpty) {
trailingBuilder += lastToken -> t
trailing.clear()
}
val l = leading.result()
if (l.nonEmpty) {
leadingBuilder += currentToken -> l
leading.clear()
}
if (!currentToken.is[Token.Comma]) {
lastToken = currentToken
}
isLeading = false
}
new AssociatedComments(leadingBuilder.result(), trailingBuilder.result()) {}
}
}
| scalameta/scalameta | scalameta/contrib/shared/src/main/scala/scala/meta/contrib/AssociatedComments.scala | Scala | bsd-3-clause | 2,599 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{ByteArrayOutputStream, File}
import java.lang.{Long => JLong}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.{Locale, UUID}
import java.util.concurrent.atomic.AtomicLong
import scala.reflect.runtime.universe.TypeTag
import scala.util.Random
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.SparkException
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.expressions.Uuid
import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, OneRowRelation}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.connector.FakeV2Provider
import org.apache.spark.sql.execution.{FilterExec, QueryExecution, WholeStageCodegenExec}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.expressions.{Aggregator, Window}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSparkSession}
import org.apache.spark.sql.test.SQLTestData.{DecimalData, TestData2}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.Utils
import org.apache.spark.util.random.XORShiftRandom
class DataFrameSuite extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
test("analysis error should be eagerly reported") {
intercept[Exception] { testData.select("nonExistentName") }
intercept[Exception] {
testData.groupBy("key").agg(Map("nonExistentName" -> "sum"))
}
intercept[Exception] {
testData.groupBy("nonExistentName").agg(Map("key" -> "sum"))
}
intercept[Exception] {
testData.groupBy($"abcd").agg(Map("key" -> "sum"))
}
}
test("dataframe toString") {
assert(testData.toString === "[key: int, value: string]")
assert(testData("key").toString === "key")
assert($"test".toString === "test")
}
test("rename nested groupby") {
val df = Seq((1, (1, 1))).toDF()
checkAnswer(
df.groupBy("_1").agg(sum("_2._1")).toDF("key", "total"),
Row(1, 1) :: Nil)
}
test("access complex data") {
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1)
}
test("table scan") {
checkAnswer(
testData,
testData.collect().toSeq)
}
test("empty data frame") {
assert(spark.emptyDataFrame.columns.toSeq === Seq.empty[String])
assert(spark.emptyDataFrame.count() === 0)
}
test("head, take and tail") {
assert(testData.take(2) === testData.collect().take(2))
assert(testData.head(2) === testData.collect().take(2))
assert(testData.tail(2) === testData.collect().takeRight(2))
assert(testData.head(2).head.schema === testData.schema)
}
test("dataframe alias") {
val df = Seq(Tuple1(1)).toDF("c").as("t")
val dfAlias = df.alias("t2")
df.col("t.c")
dfAlias.col("t2.c")
}
test("simple explode") {
val df = Seq(Tuple1("a b c"), Tuple1("d e")).toDF("words")
checkAnswer(
df.explode("words", "word") { word: String => word.split(" ").toSeq }.select('word),
Row("a") :: Row("b") :: Row("c") :: Row("d") ::Row("e") :: Nil
)
}
test("explode") {
val df = Seq((1, "a b c"), (2, "a b"), (3, "a")).toDF("number", "letters")
val df2 =
df.explode('letters) {
case Row(letters: String) => letters.split(" ").map(Tuple1(_)).toSeq
}
checkAnswer(
df2
.select('_1 as 'letter, 'number)
.groupBy('letter)
.agg(count_distinct('number)),
Row("a", 3) :: Row("b", 2) :: Row("c", 1) :: Nil
)
}
test("Star Expansion - CreateStruct and CreateArray") {
val structDf = testData2.select("a", "b").as("record")
// CreateStruct and CreateArray in aggregateExpressions
assert(structDf.groupBy($"a").agg(min(struct($"record.*"))).
sort("a").first() == Row(1, Row(1, 1)))
assert(structDf.groupBy($"a").agg(min(array($"record.*"))).
sort("a").first() == Row(1, Seq(1, 1)))
// CreateStruct and CreateArray in project list (unresolved alias)
assert(structDf.select(struct($"record.*")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*")).first().getAs[Seq[Int]](0) === Seq(1, 1))
// CreateStruct and CreateArray in project list (alias)
assert(structDf.select(struct($"record.*").as("a")).first() == Row(Row(1, 1)))
assert(structDf.select(array($"record.*").as("a")).first().getAs[Seq[Int]](0) === Seq(1, 1))
}
test("Star Expansion - hash") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(hash($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(hash($"a", $"*")),
structDf.groupBy($"a", $"b").agg(hash($"a", $"a", $"b")))
checkAnswer(
structDf.select(hash($"*")),
structDf.select(hash($"record.*")))
checkAnswer(
structDf.select(hash($"a", $"*")),
structDf.select(hash($"a", $"record.*")))
}
test("Star Expansion - xxhash64") {
val structDf = testData2.select("a", "b").as("record")
checkAnswer(
structDf.groupBy($"a", $"b").agg(min(xxhash64($"a", $"*"))),
structDf.groupBy($"a", $"b").agg(min(xxhash64($"a", $"a", $"b"))))
checkAnswer(
structDf.groupBy($"a", $"b").agg(xxhash64($"a", $"*")),
structDf.groupBy($"a", $"b").agg(xxhash64($"a", $"a", $"b")))
checkAnswer(
structDf.select(xxhash64($"*")),
structDf.select(xxhash64($"record.*")))
checkAnswer(
structDf.select(xxhash64($"a", $"*")),
structDf.select(xxhash64($"a", $"record.*")))
}
private def assertDecimalSumOverflow(
df: DataFrame, ansiEnabled: Boolean, expectedAnswer: Row): Unit = {
if (!ansiEnabled) {
checkAnswer(df, expectedAnswer)
} else {
val e = intercept[SparkException] {
df.collect()
}
assert(e.getCause.isInstanceOf[ArithmeticException])
assert(e.getCause.getMessage.contains("cannot be represented as Decimal") ||
e.getCause.getMessage.contains("Overflow in sum of decimals"))
}
}
test("SPARK-28224: Aggregate sum big decimal overflow") {
val largeDecimals = spark.sparkContext.parallelize(
DecimalData(BigDecimal("1"* 20 + ".123"), BigDecimal("1"* 20 + ".123")) ::
DecimalData(BigDecimal("9"* 20 + ".123"), BigDecimal("9"* 20 + ".123")) :: Nil).toDF()
Seq(true, false).foreach { ansiEnabled =>
withSQLConf((SQLConf.ANSI_ENABLED.key, ansiEnabled.toString)) {
val structDf = largeDecimals.select("a").agg(sum("a"))
assertDecimalSumOverflow(structDf, ansiEnabled, Row(null))
}
}
}
test("SPARK-28067: sum of null decimal values") {
Seq("true", "false").foreach { wholeStageEnabled =>
withSQLConf((SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStageEnabled)) {
Seq("true", "false").foreach { ansiEnabled =>
withSQLConf((SQLConf.ANSI_ENABLED.key, ansiEnabled)) {
val df = spark.range(1, 4, 1).select(expr(s"cast(null as decimal(38,18)) as d"))
checkAnswer(df.agg(sum($"d")), Row(null))
}
}
}
}
}
test("SPARK-28067: Aggregate sum should not return wrong results for decimal overflow") {
Seq("true", "false").foreach { wholeStageEnabled =>
withSQLConf((SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStageEnabled)) {
Seq(true, false).foreach { ansiEnabled =>
withSQLConf((SQLConf.ANSI_ENABLED.key, ansiEnabled.toString)) {
val df0 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df1 = Seq(
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df = df0.union(df1)
val df2 = df.withColumnRenamed("decNum", "decNum2").
join(df, "intNum").agg(sum("decNum"))
val expectedAnswer = Row(null)
assertDecimalSumOverflow(df2, ansiEnabled, expectedAnswer)
val decStr = "1" + "0" * 19
val d1 = spark.range(0, 12, 1, 1)
val d2 = d1.select(expr(s"cast('$decStr' as decimal (38, 18)) as d")).agg(sum($"d"))
assertDecimalSumOverflow(d2, ansiEnabled, expectedAnswer)
val d3 = spark.range(0, 1, 1, 1).union(spark.range(0, 11, 1, 1))
val d4 = d3.select(expr(s"cast('$decStr' as decimal (38, 18)) as d")).agg(sum($"d"))
assertDecimalSumOverflow(d4, ansiEnabled, expectedAnswer)
val d5 = d3.select(expr(s"cast('$decStr' as decimal (38, 18)) as d"),
lit(1).as("key")).groupBy("key").agg(sum($"d").alias("sumd")).select($"sumd")
assertDecimalSumOverflow(d5, ansiEnabled, expectedAnswer)
val nullsDf = spark.range(1, 4, 1).select(expr(s"cast(null as decimal(38,18)) as d"))
val largeDecimals = Seq(BigDecimal("1"* 20 + ".123"), BigDecimal("9"* 20 + ".123")).
toDF("d")
assertDecimalSumOverflow(
nullsDf.union(largeDecimals).agg(sum($"d")), ansiEnabled, expectedAnswer)
val df3 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("50000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df4 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 2)).toDF("decNum", "intNum")
val df5 = Seq(
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("10000000000000000000"), 1),
(BigDecimal("20000000000000000000"), 2)).toDF("decNum", "intNum")
val df6 = df3.union(df4).union(df5)
val df7 = df6.groupBy("intNum").agg(sum("decNum"), countDistinct("decNum")).
filter("intNum == 1")
assertDecimalSumOverflow(df7, ansiEnabled, Row(1, null, 2))
}
}
}
}
}
test("Star Expansion - ds.explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1", "1,2"), ("2", "4"), ("3", "7,8,9")).toDF("prefix", "csv")
val e = intercept[AnalysisException] {
df.explode($"*") { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
}.queryExecution.assertAnalyzed()
}
assert(e.getMessage.contains("Invalid usage of '*' in explode/json_tuple/UDTF"))
checkAnswer(
df.explode('prefix, 'csv) { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
},
Row("1", "1,2", "1:1") ::
Row("1", "1,2", "1:2") ::
Row("2", "4", "2:4") ::
Row("3", "7,8,9", "3:7") ::
Row("3", "7,8,9", "3:8") ::
Row("3", "7,8,9", "3:9") :: Nil)
}
test("Star Expansion - explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv")
val e = intercept[AnalysisException] {
df.select(explode($"*"))
}
assert(e.getMessage.contains("Invalid usage of '*' in expression 'explode'"))
}
test("explode on output of array-valued function") {
val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv")
checkAnswer(
df.select(explode(split($"csv", pattern = ","))),
Row("1") :: Row("2") :: Row("4") :: Row("7") :: Row("8") :: Row("9") :: Nil)
}
test("Star Expansion - explode alias and star") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select(explode($"a").as("a"), $"*"),
Row("a", Seq("a"), 1) :: Nil)
}
test("sort after generate with join=true") {
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select($"*", explode($"a").as("c")).sortWithinPartitions("b", "c"),
Row(Seq("a"), 1, "a") :: Nil)
}
test("selectExpr") {
checkAnswer(
testData.selectExpr("abs(key)", "value"),
testData.collect().map(row => Row(math.abs(row.getInt(0)), row.getString(1))).toSeq)
}
test("selectExpr with alias") {
checkAnswer(
testData.selectExpr("key as k").select("k"),
testData.select("key").collect().toSeq)
}
test("selectExpr with udtf") {
val df = Seq((Map("1" -> 1), 1)).toDF("a", "b")
checkAnswer(
df.selectExpr("explode(a)"),
Row("1", 1) :: Nil)
}
test("filterExpr") {
val res = testData.collect().filter(_.getInt(0) > 90).toSeq
checkAnswer(testData.filter("key > 90"), res)
checkAnswer(testData.filter("key > 9.0e1"), res)
checkAnswer(testData.filter("key > .9e+2"), res)
checkAnswer(testData.filter("key > 0.9e+2"), res)
checkAnswer(testData.filter("key > 900e-1"), res)
checkAnswer(testData.filter("key > 900.0E-1"), res)
checkAnswer(testData.filter("key > 9.e+1"), res)
}
test("filterExpr using where") {
checkAnswer(
testData.where("key > 50"),
testData.collect().filter(_.getInt(0) > 50).toSeq)
}
test("repartition") {
intercept[IllegalArgumentException] {
testData.select("key").repartition(0)
}
checkAnswer(
testData.select("key").repartition(10).select("key"),
testData.select("key").collect().toSeq)
}
test("repartition with SortOrder") {
// passing SortOrder expressions to .repartition() should result in an informative error
def checkSortOrderErrorMsg[T](data: => Dataset[T]): Unit = {
val ex = intercept[IllegalArgumentException](data)
assert(ex.getMessage.contains("repartitionByRange"))
}
checkSortOrderErrorMsg {
Seq(0).toDF("a").repartition(2, $"a".asc)
}
checkSortOrderErrorMsg {
Seq((0, 0)).toDF("a", "b").repartition(2, $"a".asc, $"b")
}
}
test("repartitionByRange") {
val data1d = Random.shuffle(0.to(9))
val data2d = data1d.map(i => (i, data1d.size - i))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, $"val".asc)
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, $"val".desc)
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, data1d.size - 1 - i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, lit(42))
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(0, i)))
checkAnswer(
data1d.toDF("val").repartitionByRange(data1d.size, lit(null), $"val".asc, rand())
.select(spark_partition_id().as("id"), $"val"),
data1d.map(i => Row(i, i)))
// .repartitionByRange() assumes .asc by default if no explicit sort order is specified
checkAnswer(
data2d.toDF("a", "b").repartitionByRange(data2d.size, $"a".desc, $"b")
.select(spark_partition_id().as("id"), $"a", $"b"),
data2d.toDF("a", "b").repartitionByRange(data2d.size, $"a".desc, $"b".asc)
.select(spark_partition_id().as("id"), $"a", $"b"))
// at least one partition-by expression must be specified
intercept[IllegalArgumentException] {
data1d.toDF("val").repartitionByRange(data1d.size)
}
intercept[IllegalArgumentException] {
data1d.toDF("val").repartitionByRange(data1d.size, Seq.empty: _*)
}
}
test("coalesce") {
intercept[IllegalArgumentException] {
testData.select("key").coalesce(0)
}
assert(testData.select("key").coalesce(1).rdd.partitions.size === 1)
checkAnswer(
testData.select("key").coalesce(1).select("key"),
testData.select("key").collect().toSeq)
assert(spark.emptyDataFrame.coalesce(1).rdd.partitions.size === 0)
}
test("convert $\\"attribute name\\" into unresolved attribute") {
checkAnswer(
testData.where($"key" === lit(1)).select($"value"),
Row("1"))
}
test("convert Scala Symbol 'attrname into unresolved attribute") {
checkAnswer(
testData.where($"key" === lit(1)).select("value"),
Row("1"))
}
test("select *") {
checkAnswer(
testData.select($"*"),
testData.collect().toSeq)
}
test("simple select") {
checkAnswer(
testData.where($"key" === lit(1)).select("value"),
Row("1"))
}
test("select with functions") {
checkAnswer(
testData.select(sum("value"), avg("value"), count(lit(1))),
Row(5050.0, 50.5, 100))
checkAnswer(
testData2.select($"a" + $"b", $"a" < $"b"),
Seq(
Row(2, false),
Row(3, true),
Row(3, false),
Row(4, false),
Row(4, false),
Row(5, false)))
checkAnswer(
testData2.select(sum_distinct($"a")),
Row(6))
}
test("sorting with null ordering") {
val data = Seq[java.lang.Integer](2, 1, null).toDF("key")
checkAnswer(data.orderBy($"key".asc), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy($"key".asc_nulls_first), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy(asc_nulls_first("key")), Row(null) :: Row(1) :: Row(2) :: Nil)
checkAnswer(data.orderBy($"key".asc_nulls_last), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy(asc_nulls_last("key")), Row(1) :: Row(2) :: Row(null) :: Nil)
checkAnswer(data.orderBy($"key".desc), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy($"key".desc_nulls_first), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy(desc_nulls_first("key")), Row(null) :: Row(2) :: Row(1) :: Nil)
checkAnswer(data.orderBy($"key".desc_nulls_last), Row(2) :: Row(1) :: Row(null) :: Nil)
checkAnswer(data.orderBy(desc_nulls_last("key")), Row(2) :: Row(1) :: Row(null) :: Nil)
}
test("global sorting") {
checkAnswer(
testData2.orderBy($"a".asc, $"b".asc),
Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2)))
checkAnswer(
testData2.orderBy(asc("a"), desc("b")),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy($"a".asc, $"b".desc),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy($"a".desc, $"b".desc),
Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1)))
checkAnswer(
testData2.orderBy($"a".desc, $"b".asc),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)))
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(0).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).toSeq)
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(0).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).reverse.toSeq)
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(1).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).toSeq)
checkAnswer(
arrayData.toDF().orderBy($"data".getItem(1).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).reverse.toSeq)
}
test("limit") {
checkAnswer(
testData.limit(10),
testData.take(10).toSeq)
checkAnswer(
arrayData.toDF().limit(1),
arrayData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
checkAnswer(
mapData.toDF().limit(1),
mapData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
// SPARK-12340: overstep the bounds of Int in SparkPlan.executeTake
checkAnswer(
spark.range(2).toDF().limit(2147483638),
Row(0) :: Row(1) :: Nil
)
}
test("udf") {
val foo = udf((a: Int, b: String) => a.toString + b)
checkAnswer(
// SELECT *, foo(key, value) FROM testData
testData.select($"*", foo($"key", $"value")).limit(3),
Row(1, "1", "11") :: Row(2, "2", "22") :: Row(3, "3", "33") :: Nil
)
}
test("callUDF without Hive Support") {
val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
df.sparkSession.udf.register("simpleUDF", (v: Int) => v * v)
checkAnswer(
df.select($"id", callUDF("simpleUDF", $"value")), // test deprecated one
Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil)
}
test("withColumn") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol"))
}
test("withColumns") {
val df = testData.toDF().withColumns(Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2))
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1, key + 2)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol1", "newCol2"))
val err = intercept[IllegalArgumentException] {
testData.toDF().withColumns(Seq("newCol1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(
err.getMessage.contains("The size of column names: 1 isn't equal to the size of columns: 2"))
val err2 = intercept[AnalysisException] {
testData.toDF().withColumns(Seq("newCol1", "newCOL1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(err2.getMessage.contains("Found duplicate column(s)"))
}
test("withColumns: case sensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val df = testData.toDF().withColumns(Seq("newCol1", "newCOL1"),
Seq(col("key") + 1, col("key") + 2))
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1, key + 2)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol1", "newCOL1"))
val err = intercept[AnalysisException] {
testData.toDF().withColumns(Seq("newCol1", "newCol1"),
Seq(col("key") + 1, col("key") + 2))
}
assert(err.getMessage.contains("Found duplicate column(s)"))
}
}
test("withColumns: given metadata") {
def buildMetadata(num: Int): Seq[Metadata] = {
(0 until num).map { n =>
val builder = new MetadataBuilder
builder.putLong("key", n.toLong)
builder.build()
}
}
val df = testData.toDF().withColumns(
Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2),
buildMetadata(2))
df.select("newCol1", "newCol2").schema.zipWithIndex.foreach { case (col, idx) =>
assert(col.metadata.getLong("key").toInt === idx)
}
val err = intercept[IllegalArgumentException] {
testData.toDF().withColumns(
Seq("newCol1", "newCol2"),
Seq(col("key") + 1, col("key") + 2),
buildMetadata(1))
}
assert(err.getMessage.contains(
"The size of column names: 2 isn't equal to the size of metadata elements: 1"))
}
test("replace column using withColumn") {
val df2 = sparkContext.parallelize(Array(1, 2, 3)).toDF("x")
val df3 = df2.withColumn("x", df2("x") + 1)
checkAnswer(
df3.select("x"),
Row(2) :: Row(3) :: Row(4) :: Nil)
}
test("replace column using withColumns") {
val df2 = sparkContext.parallelize(Seq((1, 2), (2, 3), (3, 4))).toDF("x", "y")
val df3 = df2.withColumns(Seq("x", "newCol1", "newCol2"),
Seq(df2("x") + 1, df2("y"), df2("y") + 1))
checkAnswer(
df3.select("x", "newCol1", "newCol2"),
Row(2, 2, 3) :: Row(3, 3, 4) :: Row(4, 4, 5) :: Nil)
}
test("drop column using drop") {
val df = testData.drop("key")
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop columns using drop") {
val src = Seq((0, 2, 3)).toDF("a", "b", "c")
val df = src.drop("a", "b")
checkAnswer(df, Row(3))
assert(df.schema.map(_.name) === Seq("c"))
}
test("drop unknown column (no-op)") {
val df = testData.drop("random")
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop column using drop with column reference") {
val col = testData("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("SPARK-28189 drop column using drop with column reference with case-insensitive names") {
// With SQL config caseSensitive OFF, case insensitive column name should work
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val col1 = testData("KEY")
val df1 = testData.drop(col1)
checkAnswer(df1, testData.selectExpr("value"))
assert(df1.schema.map(_.name) === Seq("value"))
val col2 = testData("Key")
val df2 = testData.drop(col2)
checkAnswer(df2, testData.selectExpr("value"))
assert(df2.schema.map(_.name) === Seq("value"))
}
}
test("drop unknown column (no-op) with column reference") {
val col = Column("random")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
test("drop unknown column with same name with column reference") {
val col = Column("key")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop column after join with duplicate columns using column reference") {
val newSalary = salary.withColumnRenamed("personId", "id")
val col = newSalary("id")
// this join will result in duplicate "id" columns
val joinedDf = person.join(newSalary,
person("id") === newSalary("id"), "inner")
// remove only the "id" column that was associated with newSalary
val df = joinedDf.drop(col)
checkAnswer(
df,
joinedDf.collect().map {
case Row(id: Int, name: String, age: Int, idToDrop: Int, salary: Double) =>
Row(id, name, age, salary)
}.toSeq)
assert(df.schema.map(_.name) === Seq("id", "name", "age", "salary"))
assert(df("id") == person("id"))
}
test("drop top level columns that contains dot") {
val df1 = Seq((1, 2)).toDF("a.b", "a.c")
checkAnswer(df1.drop("a.b"), Row(2))
// Creates data set: {"a.b": 1, "a": {"b": 3}}
val df2 = Seq((1)).toDF("a.b").withColumn("a", struct(lit(3) as "b"))
// Not like select(), drop() parses the column name "a.b" literally without interpreting "."
checkAnswer(df2.drop("a.b").select("a.b"), Row(3))
// "`" is treated as a normal char here with no interpreting, "`a`b" is a valid column name.
assert(df2.drop("`a.b`").columns.size == 2)
}
test("drop(name: String) search and drop all top level columns that matches the name") {
val df1 = Seq((1, 2)).toDF("a", "b")
val df2 = Seq((3, 4)).toDF("a", "b")
checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4))
// Finds and drops all columns that match the name (case insensitive).
checkAnswer(df1.crossJoin(df2).drop("A"), Row(2, 4))
}
test("withColumnRenamed") {
val df = testData.toDF().withColumn("newCol", col("key") + 1)
.withColumnRenamed("value", "valueRenamed")
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "valueRenamed", "newCol"))
}
private lazy val person2: DataFrame = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
private lazy val person3: DataFrame = Seq(
("Luis", 1, 99),
("Luis", 16, 99),
("Luis", 16, 176),
("Fernando", 32, 99),
("Fernando", 32, 164),
("David", 60, 99),
("Amy", 24, 99)).toDF("name", "age", "height")
test("describe") {
val describeResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("max", "David", "60", "192"))
val emptyDescribeResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val describeAllCols = person2.describe()
assert(getSchemaAsSeq(describeAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(describeAllCols, describeResult)
// All aggregate value should have been cast to string
describeAllCols.collect().foreach { row =>
row.toSeq.foreach { value =>
if (value != null) {
assert(value.isInstanceOf[String], "expected string but found " + value.getClass)
}
}
}
val describeOneCol = person2.describe("age")
assert(getSchemaAsSeq(describeOneCol) === Seq("summary", "age"))
checkAnswer(describeOneCol, describeResult.map { case Row(s, _, d, _) => Row(s, d)} )
val describeNoCol = person2.select().describe()
assert(getSchemaAsSeq(describeNoCol) === Seq("summary"))
checkAnswer(describeNoCol, describeResult.map { case Row(s, _, _, _) => Row(s)} )
val emptyDescription = person2.limit(0).describe()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptyDescribeResult)
}
test("summary") {
val summaryResult = Seq(
Row("count", "4", "4", "4"),
Row("mean", null, "33.0", "178.0"),
Row("stddev", null, "19.148542155126762", "11.547005383792516"),
Row("min", "Alice", "16", "164"),
Row("25%", null, "16", "164"),
Row("50%", null, "24", "176"),
Row("75%", null, "32", "180"),
Row("max", "David", "60", "192"))
val emptySummaryResult = Seq(
Row("count", "0", "0", "0"),
Row("mean", null, null, null),
Row("stddev", null, null, null),
Row("min", null, null, null),
Row("25%", null, null, null),
Row("50%", null, null, null),
Row("75%", null, null, null),
Row("max", null, null, null))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
val summaryAllCols = person2.summary()
assert(getSchemaAsSeq(summaryAllCols) === Seq("summary", "name", "age", "height"))
checkAnswer(summaryAllCols, summaryResult)
// All aggregate value should have been cast to string
summaryAllCols.collect().foreach { row =>
row.toSeq.foreach { value =>
if (value != null) {
assert(value.isInstanceOf[String], "expected string but found " + value.getClass)
}
}
}
val summaryOneCol = person2.select("age").summary()
assert(getSchemaAsSeq(summaryOneCol) === Seq("summary", "age"))
checkAnswer(summaryOneCol, summaryResult.map { case Row(s, _, d, _) => Row(s, d)} )
val summaryNoCol = person2.select().summary()
assert(getSchemaAsSeq(summaryNoCol) === Seq("summary"))
checkAnswer(summaryNoCol, summaryResult.map { case Row(s, _, _, _) => Row(s)} )
val emptyDescription = person2.limit(0).summary()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height"))
checkAnswer(emptyDescription, emptySummaryResult)
}
test("SPARK-34165: Add count_distinct to summary") {
val summaryDF = person3.summary("count", "count_distinct")
val summaryResult = Seq(
Row("count", "7", "7", "7"),
Row("count_distinct", "4", "5", "3"))
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
assert(getSchemaAsSeq(summaryDF) === Seq("summary", "name", "age", "height"))
checkAnswer(summaryDF, summaryResult)
val approxSummaryDF = person3.summary("count", "approx_count_distinct")
val approxSummaryResult = Seq(
Row("count", "7", "7", "7"),
Row("approx_count_distinct", "4", "5", "3"))
assert(getSchemaAsSeq(summaryDF) === Seq("summary", "name", "age", "height"))
checkAnswer(approxSummaryDF, approxSummaryResult)
}
test("summary advanced") {
val stats = Array("count", "50.01%", "max", "mean", "min", "25%")
val orderMatters = person2.summary(stats: _*)
assert(orderMatters.collect().map(_.getString(0)) === stats)
val onlyPercentiles = person2.summary("0.1%", "99.9%")
assert(onlyPercentiles.count() === 2)
val fooE = intercept[IllegalArgumentException] {
person2.summary("foo")
}
assert(fooE.getMessage === "foo is not a recognised statistic")
val parseE = intercept[IllegalArgumentException] {
person2.summary("foo%")
}
assert(parseE.getMessage === "Unable to parse foo% as a percentile")
}
test("apply on query results (SPARK-5462)") {
val df = testData.sparkSession.sql("select key from testData")
checkAnswer(df.select(df("key")), testData.select("key").collect().toSeq)
}
test("inputFiles") {
Seq("csv", "").foreach { useV1List =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1List) {
withTempDir { dir =>
val df = Seq((1, 22)).toDF("a", "b")
val parquetDir = new File(dir, "parquet").getCanonicalPath
df.write.parquet(parquetDir)
val parquetDF = spark.read.parquet(parquetDir)
assert(parquetDF.inputFiles.nonEmpty)
val csvDir = new File(dir, "csv").getCanonicalPath
df.write.json(csvDir)
val csvDF = spark.read.json(csvDir)
assert(csvDF.inputFiles.nonEmpty)
val unioned = csvDF.union(parquetDF).inputFiles.sorted
val allFiles = (csvDF.inputFiles ++ parquetDF.inputFiles).distinct.sorted
assert(unioned === allFiles)
}
}
}
}
ignore("show") {
// This test case is intended ignored, but to make sure it compiles correctly
testData.select($"*").show()
testData.select($"*").show(1000)
}
test("getRows: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = Seq(
Seq("value"),
Seq("1"),
Seq("111111111111111111111"))
assert(df.getRows(10, 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = Seq(
Seq("value"),
Seq("1"),
Seq("11111111111111111..."))
assert(df.getRows(10, 20) === expectedAnswerForTrue)
}
test("getRows: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = Seq(
Seq("value"),
Seq("1"),
Seq("111"))
assert(df.getRows(10, 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = Seq(
Seq("value"),
Seq("1"),
Seq("11111111111111..."))
assert(df.getRows(10, 17) === expectedAnswerForTrue)
}
test("getRows: numRows = 0") {
val expectedAnswer = Seq(Seq("key", "value"), Seq("1", "1"))
assert(testData.select($"*").getRows(0, 20) === expectedAnswer)
}
test("getRows: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = Seq(
Seq("_1", "_2"),
Seq("[1, 2, 3]", "[1, 2, 3]"),
Seq("[2, 3, 4]", "[2, 3, 4]"))
assert(df.getRows(10, 20) === expectedAnswer)
}
test("getRows: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = Seq(
Seq("_1", "_2"),
Seq("[31 32]", "[41 42 43 2E]"),
Seq("[33 34]", "[31 32 33 34 36]"))
assert(df.getRows(10, 20) === expectedAnswer)
}
test("showString: truncate = [0, 20]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+---------------------+
||value |
|+---------------------+
||1 |
||111111111111111111111|
|+---------------------+
|""".stripMargin
assert(df.showString(10, truncate = 0) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+--------------------+
|| value|
|+--------------------+
|| 1|
||11111111111111111...|
|+--------------------+
|""".stripMargin
assert(df.showString(10, truncate = 20) === expectedAnswerForTrue)
}
test("showString: truncate = [0, 20], vertical = true") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = "-RECORD 0----------------------\\n" +
" value | 1 \\n" +
"-RECORD 1----------------------\\n" +
" value | 111111111111111111111 \\n"
assert(df.showString(10, truncate = 0, vertical = true) === expectedAnswerForFalse)
val expectedAnswerForTrue = "-RECORD 0---------------------\\n" +
" value | 1 \\n" +
"-RECORD 1---------------------\\n" +
" value | 11111111111111111... \\n"
assert(df.showString(10, truncate = 20, vertical = true) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17]") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = """+-----+
||value|
|+-----+
|| 1|
|| 111|
|+-----+
|""".stripMargin
assert(df.showString(10, truncate = 3) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+-----------------+
|| value|
|+-----------------+
|| 1|
||11111111111111...|
|+-----------------+
|""".stripMargin
assert(df.showString(10, truncate = 17) === expectedAnswerForTrue)
}
test("showString: truncate = [3, 17], vertical = true") {
val longString = Array.fill(21)("1").mkString
val df = sparkContext.parallelize(Seq("1", longString)).toDF()
val expectedAnswerForFalse = "-RECORD 0----\\n" +
" value | 1 \\n" +
"-RECORD 1----\\n" +
" value | 111 \\n"
assert(df.showString(10, truncate = 3, vertical = true) === expectedAnswerForFalse)
val expectedAnswerForTrue = "-RECORD 0------------------\\n" +
" value | 1 \\n" +
"-RECORD 1------------------\\n" +
" value | 11111111111111... \\n"
assert(df.showString(10, truncate = 17, vertical = true) === expectedAnswerForTrue)
}
test("showString(negative)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(-1) === expectedAnswer)
}
test("showString(negative), vertical = true") {
val expectedAnswer = "(0 rows)\\n"
assert(testData.select($"*").showString(-1, vertical = true) === expectedAnswer)
}
test("showString(0)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
assert(testData.select($"*").showString(0) === expectedAnswer)
}
test("showString(Int.MaxValue)") {
val df = Seq((1, 2), (3, 4)).toDF("a", "b")
val expectedAnswer = """+---+---+
|| a| b|
|+---+---+
|| 1| 2|
|| 3| 4|
|+---+---+
|""".stripMargin
assert(df.showString(Int.MaxValue) === expectedAnswer)
}
test("showString(0), vertical = true") {
val expectedAnswer = "(0 rows)\\n"
assert(testData.select($"*").showString(0, vertical = true) === expectedAnswer)
}
test("showString: array") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = """+---------+---------+
|| _1| _2|
|+---------+---------+
||[1, 2, 3]|[1, 2, 3]|
||[2, 3, 4]|[2, 3, 4]|
|+---------+---------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: array, vertical = true") {
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = "-RECORD 0--------\\n" +
" _1 | [1, 2, 3] \\n" +
" _2 | [1, 2, 3] \\n" +
"-RECORD 1--------\\n" +
" _1 | [2, 3, 4] \\n" +
" _2 | [2, 3, 4] \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("showString: binary") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = """+-------+----------------+
|| _1| _2|
|+-------+----------------+
||[31 32]| [41 42 43 2E]|
||[33 34]|[31 32 33 34 36]|
|+-------+----------------+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: binary, vertical = true") {
val df = Seq(
("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)),
("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8))
).toDF()
val expectedAnswer = "-RECORD 0---------------\\n" +
" _1 | [31 32] \\n" +
" _2 | [41 42 43 2E] \\n" +
"-RECORD 1---------------\\n" +
" _1 | [33 34] \\n" +
" _2 | [31 32 33 34 36] \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("showString: minimum column width") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = """+---+---+
|| _1| _2|
|+---+---+
|| 1| 1|
|| 2| 2|
|+---+---+
|""".stripMargin
assert(df.showString(10) === expectedAnswer)
}
test("showString: minimum column width, vertical = true") {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = "-RECORD 0--\\n" +
" _1 | 1 \\n" +
" _2 | 1 \\n" +
"-RECORD 1--\\n" +
" _1 | 2 \\n" +
" _2 | 2 \\n"
assert(df.showString(10, vertical = true) === expectedAnswer)
}
test("SPARK-33690: showString: escape meta-characters") {
val df1 = spark.sql("SELECT 'aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh'")
assert(df1.showString(1, truncate = 0) ===
"""+--------------------------------------+
||aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh|
|+--------------------------------------+
||aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh|
|+--------------------------------------+
|""".stripMargin)
val df2 = spark.sql("SELECT array('aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh')")
assert(df2.showString(1, truncate = 0) ===
"""+---------------------------------------------+
||array(aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh)|
|+---------------------------------------------+
||[aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh] |
|+---------------------------------------------+
|""".stripMargin)
val df3 =
spark.sql("SELECT map('aaa\\nbbb\\tccc', 'aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh')")
assert(df3.showString(1, truncate = 0) ===
"""+----------------------------------------------------------+
||map(aaa\\nbbb\\tccc, aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh)|
|+----------------------------------------------------------+
||{aaa\\nbbb\\tccc -> aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh} |
|+----------------------------------------------------------+
|""".stripMargin)
val df4 =
spark.sql("SELECT named_struct('v', 'aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh')")
assert(df4.showString(1, truncate = 0) ===
"""+-------------------------------------------------------+
||named_struct(v, aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh)|
|+-------------------------------------------------------+
||{aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh} |
|+-------------------------------------------------------+
|""".stripMargin)
}
test("SPARK-34308: printSchema: escape meta-characters") {
val captured = new ByteArrayOutputStream()
val df1 = spark.sql("SELECT 'aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh'")
Console.withOut(captured) {
df1.printSchema()
}
assert(captured.toString ===
"""root
| |-- aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh: string (nullable = false)
|
|""".stripMargin)
captured.reset()
val df2 = spark.sql("SELECT array('aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh')")
Console.withOut(captured) {
df2.printSchema()
}
assert(captured.toString ===
"""root
| |-- array(aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh): array (nullable = false)
| | |-- element: string (containsNull = false)
|
|""".stripMargin)
captured.reset()
val df3 =
spark.sql("SELECT map('aaa\\nbbb\\tccc', 'aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh')")
Console.withOut(captured) {
df3.printSchema()
}
assert(captured.toString ===
"""root
| |-- map(aaa\\nbbb\\tccc, aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh): map (nullable = false)
| | |-- key: string
| | |-- value: string (valueContainsNull = false)
|
|""".stripMargin)
captured.reset()
val df4 =
spark.sql("SELECT named_struct('v', 'aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\u000Bggg\\u0007hhh')")
Console.withOut(captured) {
df4.printSchema()
}
assert(captured.toString ===
"""root
| |-- named_struct(v, aaa\\nbbb\\tccc\\rddd\\feee\\bfff\\vggg\\ahhh): struct (nullable = false)
| | |-- v: string (nullable = false)
|
|""".stripMargin)
}
test("SPARK-7319 showString") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|""".stripMargin
assert(testData.select($"*").showString(1) === expectedAnswer)
}
test("SPARK-7319 showString, vertical = true") {
val expectedAnswer = "-RECORD 0----\\n" +
" key | 1 \\n" +
" value | 1 \\n" +
"only showing top 1 row\\n"
assert(testData.select($"*").showString(1, vertical = true) === expectedAnswer)
}
test("SPARK-23023 Cast rows to strings in showString") {
val df1 = Seq(Seq(1, 2, 3, 4)).toDF("a")
assert(df1.showString(10) ===
s"""+------------+
|| a|
|+------------+
||[1, 2, 3, 4]|
|+------------+
|""".stripMargin)
val df2 = Seq(Map(1 -> "a", 2 -> "b")).toDF("a")
assert(df2.showString(10) ===
s"""+----------------+
|| a|
|+----------------+
||{1 -> a, 2 -> b}|
|+----------------+
|""".stripMargin)
val df3 = Seq(((1, "a"), 0), ((2, "b"), 0)).toDF("a", "b")
assert(df3.showString(10) ===
s"""+------+---+
|| a| b|
|+------+---+
||{1, a}| 0|
||{2, b}| 0|
|+------+---+
|""".stripMargin)
}
test("SPARK-7327 show with empty dataFrame") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|""".stripMargin
assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer)
}
test("SPARK-7327 show with empty dataFrame, vertical = true") {
assert(testData.select($"*").filter($"key" < 0).showString(1, vertical = true) === "(0 rows)\\n")
}
test("SPARK-18350 show with session local timezone") {
val d = Date.valueOf("2016-12-01")
val ts = Timestamp.valueOf("2016-12-01 00:00:00")
val df = Seq((d, ts)).toDF("d", "ts")
val expectedAnswer = """+----------+-------------------+
||d |ts |
|+----------+-------------------+
||2016-12-01|2016-12-01 00:00:00|
|+----------+-------------------+
|""".stripMargin
assert(df.showString(1, truncate = 0) === expectedAnswer)
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "UTC") {
val expectedAnswer = """+----------+-------------------+
||d |ts |
|+----------+-------------------+
||2016-12-01|2016-12-01 08:00:00|
|+----------+-------------------+
|""".stripMargin
assert(df.showString(1, truncate = 0) === expectedAnswer)
}
}
test("SPARK-18350 show with session local timezone, vertical = true") {
val d = Date.valueOf("2016-12-01")
val ts = Timestamp.valueOf("2016-12-01 00:00:00")
val df = Seq((d, ts)).toDF("d", "ts")
val expectedAnswer = "-RECORD 0------------------\\n" +
" d | 2016-12-01 \\n" +
" ts | 2016-12-01 00:00:00 \\n"
assert(df.showString(1, truncate = 0, vertical = true) === expectedAnswer)
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "UTC") {
val expectedAnswer = "-RECORD 0------------------\\n" +
" d | 2016-12-01 \\n" +
" ts | 2016-12-01 08:00:00 \\n"
assert(df.showString(1, truncate = 0, vertical = true) === expectedAnswer)
}
}
test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") {
val rowRDD = sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0))))
val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false)))
val df = spark.createDataFrame(rowRDD, schema)
df.rdd.collect()
}
test("SPARK-6899: type should match when using codegen") {
checkAnswer(decimalData.agg(avg("a")), Row(new java.math.BigDecimal(2)))
}
test("SPARK-7133: Implement struct, array, and map field accessor") {
assert(complexData.filter(complexData("a")(0) === 2).count() == 1)
assert(complexData.filter(complexData("m")("1") === 1).count() == 1)
assert(complexData.filter(complexData("s")("key") === 1).count() == 1)
assert(complexData.filter(complexData("m")(complexData("s")("value")) === 1).count() == 1)
assert(complexData.filter(complexData("a")(complexData("s")("key")) === 1).count() == 1)
}
test("SPARK-7551: support backticks for DataFrame attribute resolution") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
val df = spark.read.json(Seq("""{"a.b": {"c": {"d..e": {"f": 1}}}}""").toDS())
checkAnswer(
df.select(df("`a.b`.c.`d..e`.`f`")),
Row(1)
)
val df2 = spark.read.json(Seq("""{"a b": {"c": {"d e": {"f": 1}}}}""").toDS())
checkAnswer(
df2.select(df2("`a b`.c.d e.f")),
Row(1)
)
def checkError(testFun: => Unit): Unit = {
val e = intercept[org.apache.spark.sql.AnalysisException] {
testFun
}
assert(e.getMessage.contains("syntax error in attribute name:"))
}
checkError(df("`abc.`c`"))
checkError(df("`abc`..d"))
checkError(df("`a`.b."))
checkError(df("`a.b`.c.`d"))
}
}
test("SPARK-7324 dropDuplicates") {
val testData = sparkContext.parallelize(
(2, 1, 2) :: (1, 1, 1) ::
(1, 2, 1) :: (2, 1, 2) ::
(2, 2, 2) :: (2, 2, 1) ::
(2, 1, 1) :: (1, 1, 2) ::
(1, 2, 2) :: (1, 2, 1) :: Nil).toDF("key", "value1", "value2")
checkAnswer(
testData.dropDuplicates(),
Seq(Row(2, 1, 2), Row(1, 1, 1), Row(1, 2, 1),
Row(2, 2, 2), Row(2, 1, 1), Row(2, 2, 1),
Row(1, 1, 2), Row(1, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key", "value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("value1", "value2")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value2")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates("key", "value1"),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
}
test("SPARK-8621: support empty string column name") {
val df = Seq(Tuple1(1)).toDF("").as("t")
// We should allow empty string as column name
df.col("")
df.col("t.``")
}
test("SPARK-8797: sort by float column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Float.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toFloat))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("SPARK-8797: sort by double column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Double.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toDouble))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
test("NaN is greater than all other non-NaN numeric values") {
val maxDouble = Seq(Double.NaN, Double.PositiveInfinity, Double.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Double.isNaN(maxDouble.getDouble(0)))
val maxFloat = Seq(Float.NaN, Float.PositiveInfinity, Float.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Float.isNaN(maxFloat.getFloat(0)))
}
test("SPARK-8072: Better Exception for Duplicate Columns") {
// only one duplicate column present
val e = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3), (2, 3, 4), (3, 4, 5)).toDF("column1", "column2", "column1")
.write.format("parquet").save("temp")
}
assert(e.getMessage.contains("Found duplicate column(s) when inserting into"))
assert(e.getMessage.contains("column1"))
assert(!e.getMessage.contains("column2"))
// multiple duplicate columns present
val f = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3, 4, 5), (2, 3, 4, 5, 6), (3, 4, 5, 6, 7))
.toDF("column1", "column2", "column3", "column1", "column3")
.write.format("json").save("temp")
}
assert(f.getMessage.contains("Found duplicate column(s) when inserting into"))
assert(f.getMessage.contains("column1"))
assert(f.getMessage.contains("column3"))
assert(!f.getMessage.contains("column2"))
}
test("SPARK-6941: Better error message for inserting into RDD-based Table") {
withTempDir { dir =>
withTempView("parquet_base", "json_base", "rdd_base", "indirect_ds", "one_row") {
val tempParquetFile = new File(dir, "tmp_parquet")
val tempJsonFile = new File(dir, "tmp_json")
val df = Seq(Tuple1(1)).toDF()
val insertion = Seq(Tuple1(2)).toDF("col")
// pass case: parquet table (HadoopFsRelation)
df.write.mode(SaveMode.Overwrite).parquet(tempParquetFile.getCanonicalPath)
val pdf = spark.read.parquet(tempParquetFile.getCanonicalPath)
pdf.createOrReplaceTempView("parquet_base")
insertion.write.insertInto("parquet_base")
// pass case: json table (InsertableRelation)
df.write.mode(SaveMode.Overwrite).json(tempJsonFile.getCanonicalPath)
val jdf = spark.read.json(tempJsonFile.getCanonicalPath)
jdf.createOrReplaceTempView("json_base")
insertion.write.mode(SaveMode.Overwrite).insertInto("json_base")
// error cases: insert into an RDD
df.createOrReplaceTempView("rdd_base")
val e1 = intercept[AnalysisException] {
insertion.write.insertInto("rdd_base")
}
assert(e1.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into a logical plan that is not a LeafNode
val indirectDS = pdf.select("_1").filter($"_1" > 5)
indirectDS.createOrReplaceTempView("indirect_ds")
val e2 = intercept[AnalysisException] {
insertion.write.insertInto("indirect_ds")
}
assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into an OneRowRelation
Dataset.ofRows(spark, OneRowRelation()).createOrReplaceTempView("one_row")
val e3 = intercept[AnalysisException] {
insertion.write.insertInto("one_row")
}
assert(e3.getMessage.contains("Inserting into an RDD-based table is not allowed."))
}
}
}
test("SPARK-8608: call `show` on local DataFrame with random columns should return same value") {
val df = testData.select(rand(33))
assert(df.showString(5) == df.showString(5))
// We will reuse the same Expression object for LocalRelation.
val df1 = (1 to 10).map(Tuple1.apply).toDF().select(rand(33))
assert(df1.showString(5) == df1.showString(5))
}
test("SPARK-8609: local DataFrame with random columns should return same value after sort") {
checkAnswer(testData.sort(rand(33)), testData.sort(rand(33)))
// We will reuse the same Expression object for LocalRelation.
val df = (1 to 10).map(Tuple1.apply).toDF()
checkAnswer(df.sort(rand(33)), df.sort(rand(33)))
}
test("SPARK-9083: sort with non-deterministic expressions") {
val seed = 33
val df = (1 to 100).map(Tuple1.apply).toDF("i").repartition(1)
val random = new XORShiftRandom(seed)
val expected = (1 to 100).map(_ -> random.nextDouble()).sortBy(_._2).map(_._1)
val actual = df.sort(rand(seed)).collect().map(_.getInt(0))
assert(expected === actual)
}
test("Sorting columns are not in Filter and Project") {
checkAnswer(
upperCaseData.filter($"N" > 1).select("N").filter($"N" < 6).orderBy($"L".asc),
Row(2) :: Row(3) :: Row(4) :: Row(5) :: Nil)
}
test("SPARK-9323: DataFrame.orderBy should support nested column name") {
val df = spark.read.json(Seq("""{"a": {"b": 1}}""").toDS())
checkAnswer(df.orderBy("a.b"), Row(Row(1)))
}
test("SPARK-9950: correctly analyze grouping/aggregating on struct fields") {
val df = Seq(("x", (1, 1)), ("y", (2, 2))).toDF("a", "b")
checkAnswer(df.groupBy("b._1").agg(sum("b._2")), Row(1, 1) :: Row(2, 2) :: Nil)
}
test("SPARK-10093: Avoid transformations on executors") {
val df = Seq((1, 1)).toDF("a", "b")
df.where($"a" === 1)
.select($"a", $"b", struct($"b"))
.orderBy("a")
.select(struct($"b"))
.collect()
}
test("SPARK-10185: Read multiple Hadoop Filesystem paths and paths with a comma in it") {
withTempDir { dir =>
val df1 = Seq((1, 22)).toDF("a", "b")
val dir1 = new File(dir, "dir,1").getCanonicalPath
df1.write.format("json").save(dir1)
val df2 = Seq((2, 23)).toDF("a", "b")
val dir2 = new File(dir, "dir2").getCanonicalPath
df2.write.format("json").save(dir2)
checkAnswer(spark.read.format("json").load(dir1, dir2),
Row(1, 22) :: Row(2, 23) :: Nil)
checkAnswer(spark.read.format("json").load(dir1),
Row(1, 22) :: Nil)
}
}
test("Alias uses internally generated names 'aggOrder' and 'havingCondition'") {
val df = Seq(1 -> 2).toDF("i", "j")
val query1 = df.groupBy("i")
.agg(max("j").as("aggOrder"))
.orderBy(sum("j"))
checkAnswer(query1, Row(1, 2))
// In the plan, there are two attributes having the same name 'havingCondition'
// One is a user-provided alias name; another is an internally generated one.
val query2 = df.groupBy("i")
.agg(max("j").as("havingCondition"))
.where(sum("j") > 0)
.orderBy($"havingCondition".asc)
checkAnswer(query2, Row(1, 2))
}
test("SPARK-10316: respect non-deterministic expressions in PhysicalOperation") {
withTempDir { dir =>
(1 to 10).toDF("id").write.mode(SaveMode.Overwrite).json(dir.getCanonicalPath)
val input = spark.read.json(dir.getCanonicalPath)
val df = input.select($"id", rand(0).as("r"))
df.as("a").join(df.filter($"r" < 0.5).as("b"), $"a.id" === $"b.id").collect().foreach { row =>
assert(row.getDouble(1) - row.getDouble(3) === 0.0 +- 0.001)
}
}
}
test("SPARK-10743: keep the name of expression if possible when do cast") {
val df = (1 to 10).map(Tuple1.apply).toDF("i").as("src")
assert(df.select($"src.i".cast(StringType)).columns.head === "i")
assert(df.select($"src.i".cast(StringType).cast(IntegerType)).columns.head === "i")
}
test("SPARK-11301: fix case sensitivity for filter on partitioned columns") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("year").parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
checkAnswer(df.filter($"yEAr" > 2000).select($"val"), Row("a"))
}
}
}
/**
* Verifies that there is no Exchange between the Aggregations for `df`
*/
private def verifyNonExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
atFirstAgg = !atFirstAgg
case _ =>
if (atFirstAgg) {
fail("Should not have operators between the two aggregations")
}
}
}
/**
* Verifies that there is an Exchange between the Aggregations for `df`
*/
private def verifyExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
case agg: HashAggregateExec =>
if (atFirstAgg) {
fail("Should not have back to back Aggregates")
}
atFirstAgg = true
case e: ShuffleExchangeExec => atFirstAgg = false
case _ =>
}
}
test("distributeBy and localSort") {
val original = testData.repartition(1)
assert(original.rdd.partitions.length == 1)
val df = original.repartition(5, $"key")
assert(df.rdd.partitions.length == 5)
checkAnswer(original.select(), df.select())
val df2 = original.repartition(10, $"key")
assert(df2.rdd.partitions.length == 10)
checkAnswer(original.select(), df2.select())
// Group by the column we are distributed by. This should generate a plan with no exchange
// between the aggregates
val df3 = testData.repartition($"key").groupBy("key").count()
verifyNonExchangingAgg(df3)
verifyNonExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key", "value").count())
// Grouping by just the first distributeBy expr, need to exchange.
verifyExchangingAgg(testData.repartition($"key", $"value")
.groupBy("key").count())
val data = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData2(i % 10, i))).toDF()
// Distribute and order by.
val df4 = data.repartition($"a").sortWithinPartitions($"b".desc)
// Walk each partition and verify that it is sorted descending and does not contain all
// the values.
df4.rdd.foreachPartition { p =>
// Skip empty partition
if (p.hasNext) {
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue < v) throw new SparkException("Partition is not ordered.")
if (v + 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be globally ordered")
}
}
// Distribute and order by with multiple order bys
val df5 = data.repartition(2, $"a").sortWithinPartitions($"b".asc, $"a".asc)
// Walk each partition and verify that it is sorted ascending
df5.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (allSequential) throw new SparkException("Partition should not be all sequential")
}
// Distribute into one partition and order by. This partition should contain all the values.
val df6 = data.repartition(1, $"a").sortWithinPartitions("b")
// Walk each partition and verify that it is sorted ascending and not globally sorted.
df6.rdd.foreachPartition { p =>
var previousValue: Int = -1
var allSequential: Boolean = true
p.foreach { r =>
val v: Int = r.getInt(1)
if (previousValue != -1) {
if (previousValue > v) throw new SparkException("Partition is not ordered.")
if (v - 1 != previousValue) allSequential = false
}
previousValue = v
}
if (!allSequential) throw new SparkException("Partition should contain all sequential values")
}
}
test("fix case sensitivity of partition by") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val p = path.getAbsolutePath
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("yEAr").parquet(p)
checkAnswer(spark.read.parquet(p).select("YeaR"), Row(2012))
}
}
}
// This test case is to verify a bug when making a new instance of LogicalRDD.
test("SPARK-11633: LogicalRDD throws TreeNode Exception: Failed to Copy Node") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val rdd = sparkContext.makeRDD(Seq(Row(1, 3), Row(2, 1)))
val df = spark.createDataFrame(
rdd,
new StructType().add("f1", IntegerType).add("f2", IntegerType))
.select($"F1", $"f2".as("f2"))
val df1 = df.as("a")
val df2 = df.as("b")
checkAnswer(df1.join(df2, $"a.f2" === $"b.f2"), Row(1, 3, 1, 3) :: Row(2, 1, 2, 1) :: Nil)
}
}
test("SPARK-10656: completely support special chars") {
val df = Seq(1 -> "a").toDF("i_$.a", "d^'a.")
checkAnswer(df.select(df("*")), Row(1, "a"))
checkAnswer(df.withColumnRenamed("d^'a.", "a"), Row(1, "a"))
}
test("SPARK-11725: correctly handle null inputs for ScalaUDF") {
val df = sparkContext.parallelize(Seq(
java.lang.Integer.valueOf(22) -> "John",
null.asInstanceOf[java.lang.Integer] -> "Lucy")).toDF("age", "name")
// passing null into the UDF that could handle it
val boxedUDF = udf[java.lang.Integer, java.lang.Integer] {
(i: java.lang.Integer) => if (i == null) -10 else null
}
checkAnswer(df.select(boxedUDF($"age")), Row(null) :: Row(-10) :: Nil)
spark.udf.register("boxedUDF",
(i: java.lang.Integer) => (if (i == null) -10 else null): java.lang.Integer)
checkAnswer(sql("select boxedUDF(null), boxedUDF(-1)"), Row(-10, null) :: Nil)
val primitiveUDF = udf((i: Int) => i * 2)
checkAnswer(df.select(primitiveUDF($"age")), Row(44) :: Row(null) :: Nil)
}
test("SPARK-12398 truncated toString") {
val df1 = Seq((1L, "row1")).toDF("id", "name")
assert(df1.toString() === "[id: bigint, name: string]")
val df2 = Seq((1L, "c2", false)).toDF("c1", "c2", "c3")
assert(df2.toString === "[c1: bigint, c2: string ... 1 more field]")
val df3 = Seq((1L, "c2", false, 10)).toDF("c1", "c2", "c3", "c4")
assert(df3.toString === "[c1: bigint, c2: string ... 2 more fields]")
val df4 = Seq((1L, Tuple2(1L, "val"))).toDF("c1", "c2")
assert(df4.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string>]")
val df5 = Seq((1L, Tuple2(1L, "val"), 20.0)).toDF("c1", "c2", "c3")
assert(df5.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 1 more field]")
val df6 = Seq((1L, Tuple2(1L, "val"), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(df6.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 2 more fields]")
val df7 = Seq((1L, Tuple3(1L, "val", 2), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df7.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 1 more field> ... 2 more fields]")
val df8 = Seq((1L, Tuple7(1L, "val", 2, 3, 4, 5, 6), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df8.toString ===
"[c1: bigint, c2: struct<_1: bigint, _2: string ... 5 more fields> ... 2 more fields]")
val df9 =
Seq((1L, Tuple4(1L, Tuple4(1L, 2L, 3L, 4L), 2L, 3L), 20.0, 1)).toDF("c1", "c2", "c3", "c4")
assert(
df9.toString ===
"[c1: bigint, c2: struct<_1: bigint," +
" _2: struct<_1: bigint," +
" _2: bigint ... 2 more fields> ... 2 more fields> ... 2 more fields]")
}
test("reuse exchange") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2") {
val df = spark.range(100).toDF()
val join = df.join(df, "id")
val plan = join.queryExecution.executedPlan
checkAnswer(join, df)
assert(
collect(join.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => true }.size === 1)
assert(
collect(join.queryExecution.executedPlan) { case e: ReusedExchangeExec => true }.size === 1)
val broadcasted = broadcast(join)
val join2 = join.join(broadcasted, "id").join(broadcasted, "id")
checkAnswer(join2, df)
assert(
collect(join2.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => true }.size == 1)
assert(
collect(join2.queryExecution.executedPlan) {
case e: BroadcastExchangeExec => true }.size === 1)
assert(
collect(join2.queryExecution.executedPlan) { case e: ReusedExchangeExec => true }.size == 4)
}
}
test("sameResult() on aggregate") {
val df = spark.range(100)
val agg1 = df.groupBy().count()
val agg2 = df.groupBy().count()
// two aggregates with different ExprId within them should have same result
assert(agg1.queryExecution.executedPlan.sameResult(agg2.queryExecution.executedPlan))
val agg3 = df.groupBy().sum()
assert(!agg1.queryExecution.executedPlan.sameResult(agg3.queryExecution.executedPlan))
val df2 = spark.range(101)
val agg4 = df2.groupBy().count()
assert(!agg1.queryExecution.executedPlan.sameResult(agg4.queryExecution.executedPlan))
}
test("SPARK-12512: support `.` in column name for withColumn()") {
val df = Seq("a" -> "b").toDF("col.a", "col.b")
checkAnswer(df.select(df("*")), Row("a", "b"))
checkAnswer(df.withColumn("col.a", lit("c")), Row("c", "b"))
checkAnswer(df.withColumn("col.c", lit("c")), Row("a", "b", "c"))
}
test("SPARK-12841: cast in filter") {
checkAnswer(
Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"),
Row(1, "a"))
}
test("SPARK-12982: Add table name validation in temp table registration") {
val df = Seq("foo", "bar").map(Tuple1.apply).toDF("col")
// invalid table names
Seq("11111", "t~", "#$@sum", "table!#").foreach { name =>
withTempView(name) {
val m = intercept[AnalysisException](df.createOrReplaceTempView(name)).getMessage
assert(m.contains(s"Invalid view name: $name"))
}
}
// valid table names
Seq("table1", "`11111`", "`t~`", "`#$@sum`", "`table!#`").foreach { name =>
withTempView(name) {
df.createOrReplaceTempView(name)
}
}
}
test("assertAnalyzed shouldn't replace original stack trace") {
val e = intercept[AnalysisException] {
spark.range(1).select($"id" as "a", $"id" as "b").groupBy("a").agg($"b")
}
assert(e.getStackTrace.head.getClassName != classOf[QueryExecution].getName)
}
test("SPARK-13774: Check error message for non existent path without globbed paths") {
val uuid = UUID.randomUUID().toString
val baseDir = Utils.createTempDir()
try {
val e = intercept[AnalysisException] {
spark.read.format("csv").load(
new File(baseDir, "file").getAbsolutePath,
new File(baseDir, "file2").getAbsolutePath,
new File(uuid, "file3").getAbsolutePath,
uuid).rdd
}
assert(e.getMessage.startsWith("Path does not exist"))
} finally {
}
}
test("SPARK-13774: Check error message for not existent globbed paths") {
// Non-existent initial path component:
val nonExistentBasePath = "/" + UUID.randomUUID().toString
assert(!new File(nonExistentBasePath).exists())
val e = intercept[AnalysisException] {
spark.read.format("text").load(s"$nonExistentBasePath/*")
}
assert(e.getMessage.startsWith("Path does not exist"))
// Existent initial path component, but no matching files:
val baseDir = Utils.createTempDir()
val childDir = Utils.createTempDir(baseDir.getAbsolutePath)
assert(childDir.exists())
try {
val e1 = intercept[AnalysisException] {
spark.read.json(s"${baseDir.getAbsolutePath}/*/*-xyz.json").rdd
}
assert(e1.getMessage.startsWith("Path does not exist"))
} finally {
Utils.deleteRecursively(baseDir)
}
}
test("SPARK-15230: distinct() does not handle column name with dot properly") {
val df = Seq(1, 1, 2).toDF("column.with.dot")
checkAnswer(df.distinct(), Row(1) :: Row(2) :: Nil)
}
test("SPARK-16181: outer join with isNull filter") {
val left = Seq("x").toDF("col")
val right = Seq("y").toDF("col").withColumn("new", lit(true))
val joined = left.join(right, left("col") === right("col"), "left_outer")
checkAnswer(joined, Row("x", null, null))
checkAnswer(joined.filter($"new".isNull), Row("x", null, null))
}
test("SPARK-16664: persist with more than 200 columns") {
val size = 201L
val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(Seq.range(0, size))))
val schemas = List.range(0, size).map(a => StructField("name" + a, LongType, true))
val df = spark.createDataFrame(rdd, StructType(schemas))
assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100)
}
test("SPARK-17409: Do Not Optimize Query in CTAS (Data source tables) More Than Once") {
withTable("bar") {
withTempView("foo") {
withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") {
sql("select 0 as id").createOrReplaceTempView("foo")
val df = sql("select * from foo group by id")
// If we optimize the query in CTAS more than once, the following saveAsTable will fail
// with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])`
df.write.mode("overwrite").saveAsTable("bar")
checkAnswer(spark.table("bar"), Row(0) :: Nil)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar"))
assert(tableMetadata.provider == Some("json"),
"the expected table is a data source table using json")
}
}
}
}
test("copy results for sampling with replacement") {
val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b")
val sampleDf = df.sample(true, 2.00)
val d = sampleDf.withColumn("c", monotonically_increasing_id).select($"c").collect
assert(d.size == d.distinct.size)
}
private def verifyNullabilityInFilterExec(
df: DataFrame,
expr: String,
expectedNonNullableColumns: Seq[String]): Unit = {
val dfWithFilter = df.where(s"isnotnull($expr)").selectExpr(expr)
dfWithFilter.queryExecution.executedPlan.collect {
// When the child expression in isnotnull is null-intolerant (i.e. any null input will
// result in null output), the involved columns are converted to not nullable;
// otherwise, no change should be made.
case e: FilterExec =>
assert(e.output.forall { o =>
if (expectedNonNullableColumns.contains(o.name)) !o.nullable else o.nullable
})
}
}
test("SPARK-17957: no change on nullability in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> java.lang.Integer.valueOf(3),
java.lang.Integer.valueOf(1) -> null.asInstanceOf[java.lang.Integer],
java.lang.Integer.valueOf(2) -> java.lang.Integer.valueOf(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, _2)", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "coalesce(_1, 0) + Rand()", expectedNonNullableColumns = Seq.empty[String])
verifyNullabilityInFilterExec(df,
expr = "cast(coalesce(cast(coalesce(_1, _2) as double), 0.0) as int)",
expectedNonNullableColumns = Seq.empty[String])
}
test("SPARK-17957: set nullability to false in FilterExec output") {
val df = sparkContext.parallelize(Seq(
null.asInstanceOf[java.lang.Integer] -> java.lang.Integer.valueOf(3),
java.lang.Integer.valueOf(1) -> null.asInstanceOf[java.lang.Integer],
java.lang.Integer.valueOf(2) -> java.lang.Integer.valueOf(4))).toDF()
verifyNullabilityInFilterExec(df,
expr = "_1 + _2 * 3", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1 + _2", expectedNonNullableColumns = Seq("_1", "_2"))
verifyNullabilityInFilterExec(df,
expr = "_1", expectedNonNullableColumns = Seq("_1"))
// `constructIsNotNullConstraints` infers the IsNotNull(_2) from IsNotNull(_2 + Rand())
// Thus, we are able to set nullability of _2 to false.
// If IsNotNull(_2) is not given from `constructIsNotNullConstraints`, the impl of
// isNullIntolerant in `FilterExec` needs an update for more advanced inference.
verifyNullabilityInFilterExec(df,
expr = "_2 + Rand()", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "_2 * 3 + coalesce(_1, 0)", expectedNonNullableColumns = Seq("_2"))
verifyNullabilityInFilterExec(df,
expr = "cast((_1 + _2) as boolean)", expectedNonNullableColumns = Seq("_1", "_2"))
}
test("SPARK-17897: Fixed IsNotNull Constraint Inference Rule") {
val data = Seq[java.lang.Integer](1, null).toDF("key")
checkAnswer(data.filter(!$"key".isNotNull), Row(null))
checkAnswer(data.filter(!(- $"key").isNotNull), Row(null))
}
test("SPARK-17957: outer join + na.fill") {
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
val df1 = Seq((1, 2), (2, 3)).toDF("a", "b")
val df2 = Seq((2, 5), (3, 4)).toDF("a", "c")
val joinedDf = df1.join(df2, Seq("a"), "outer").na.fill(0)
val df3 = Seq((3, 1)).toDF("a", "d")
checkAnswer(joinedDf.join(df3, "a"), Row(3, 0, 4, 1))
}
}
test("SPARK-18070 binary operator should not consider nullability when comparing input types") {
val rows = Seq(Row(Seq(1), Seq(1)))
val schema = new StructType()
.add("array1", ArrayType(IntegerType))
.add("array2", ArrayType(IntegerType, containsNull = false))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
assert(df.filter($"array1" === $"array2").count() == 1)
}
test("SPARK-17913: compare long and string type column may return confusing result") {
val df = Seq(123L -> "123", 19157170390056973L -> "19157170390056971").toDF("i", "j")
checkAnswer(df.select($"i" === $"j"), Row(true) :: Row(false) :: Nil)
}
test("SPARK-19691 Calculating percentile of decimal column fails with ClassCastException") {
val df = spark.range(1).selectExpr("CAST(id as DECIMAL) as x").selectExpr("percentile(x, 0.5)")
checkAnswer(df, Row(BigDecimal(0)) :: Nil)
}
test("SPARK-20359: catalyst outer join optimization should not throw npe") {
val df1 = Seq("a", "b", "c").toDF("x")
.withColumn("y", udf{ (x: String) => x.substring(0, 1) + "!" }.apply($"x"))
val df2 = Seq("a", "b").toDF("x1")
df1
.join(df2, df1("x") === df2("x1"), "left_outer")
.filter($"x1".isNotNull || !$"y".isin("a!"))
.count
}
// The fix of SPARK-21720 avoid an exception regarding JVM code size limit
// TODO: When we make a threshold of splitting statements (1024) configurable,
// we will re-enable this with max threshold to cause an exception
// See https://github.com/apache/spark/pull/18972/files#r150223463
ignore("SPARK-19372: Filter can be executed w/o generated code due to JVM code size limit") {
val N = 400
val rows = Seq(Row.fromSeq(Seq.fill(N)("string")))
val schema = StructType(Seq.tabulate(N)(i => StructField(s"_c$i", StringType)))
val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema)
val filter = (0 until N)
.foldLeft(lit(false))((e, index) => e.or(df.col(df.columns(index)) =!= "string"))
withSQLConf(SQLConf.CODEGEN_FALLBACK.key -> "true") {
df.filter(filter).count()
}
withSQLConf(SQLConf.CODEGEN_FALLBACK.key -> "false") {
val e = intercept[SparkException] {
df.filter(filter).count()
}.getMessage
assert(e.contains("grows beyond 64 KiB"))
}
}
test("SPARK-20897: cached self-join should not fail") {
// force to plan sort merge join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0") {
val df = Seq(1 -> "a").toDF("i", "j")
val df1 = df.as("t1")
val df2 = df.as("t2")
assert(df1.join(df2, $"t1.i" === $"t2.i").cache().count() == 1)
}
}
test("order-by ordinal.") {
checkAnswer(
testData2.select(lit(7), $"a", $"b").orderBy(lit(1), lit(2), lit(3)),
Seq(Row(7, 1, 1), Row(7, 1, 2), Row(7, 2, 1), Row(7, 2, 2), Row(7, 3, 1), Row(7, 3, 2)))
}
test("SPARK-22271: mean overflows and returns null for some decimal variables") {
val d = 0.034567890
val df = Seq(d, d, d, d, d, d, d, d, d, d).toDF("DecimalCol")
val result = df.select($"DecimalCol" cast DecimalType(38, 33))
.select(col("DecimalCol")).describe()
val mean = result.select("DecimalCol").where($"summary" === "mean")
assert(mean.collect().toSet === Set(Row("0.0345678900000000000000000000000000000")))
}
test("SPARK-22520: support code generation for large CaseWhen") {
val N = 30
var expr1 = when($"id" === lit(0), 0)
var expr2 = when($"id" === lit(0), 10)
(1 to N).foreach { i =>
expr1 = expr1.when($"id" === lit(i), -i)
expr2 = expr2.when($"id" === lit(i + 10), i)
}
val df = spark.range(1).select(expr1, expr2.otherwise(0))
checkAnswer(df, Row(0, 10) :: Nil)
assert(df.queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
}
test("SPARK-24165: CaseWhen/If - nullability of nested types") {
val rows = new java.util.ArrayList[Row]()
rows.add(Row(true, ("x", 1), Seq("x", "y"), Map(0 -> "x")))
rows.add(Row(false, (null, 2), Seq(null, "z"), Map(0 -> null)))
val schema = StructType(Seq(
StructField("cond", BooleanType, true),
StructField("s", StructType(Seq(
StructField("val1", StringType, true),
StructField("val2", IntegerType, false)
)), false),
StructField("a", ArrayType(StringType, true)),
StructField("m", MapType(IntegerType, StringType, true))
))
val sourceDF = spark.createDataFrame(rows, schema)
def structWhenDF: DataFrame = sourceDF
.select(when($"cond",
struct(lit("a").as("val1"), lit(10).as("val2"))).otherwise($"s") as "res")
.select($"res".getField("val1"))
def arrayWhenDF: DataFrame = sourceDF
.select(when($"cond", array(lit("a"), lit("b"))).otherwise($"a") as "res")
.select($"res".getItem(0))
def mapWhenDF: DataFrame = sourceDF
.select(when($"cond", map(lit(0), lit("a"))).otherwise($"m") as "res")
.select($"res".getItem(0))
def structIfDF: DataFrame = sourceDF
.select(expr("if(cond, struct('a' as val1, 10 as val2), s)") as "res")
.select($"res".getField("val1"))
def arrayIfDF: DataFrame = sourceDF
.select(expr("if(cond, array('a', 'b'), a)") as "res")
.select($"res".getItem(0))
def mapIfDF: DataFrame = sourceDF
.select(expr("if(cond, map(0, 'a'), m)") as "res")
.select($"res".getItem(0))
def checkResult(): Unit = {
checkAnswer(structWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(arrayWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(mapWhenDF, Seq(Row("a"), Row(null)))
checkAnswer(structIfDF, Seq(Row("a"), Row(null)))
checkAnswer(arrayIfDF, Seq(Row("a"), Row(null)))
checkAnswer(mapIfDF, Seq(Row("a"), Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
checkResult()
// Test with cached relation, the Project will be evaluated with codegen
sourceDF.cache()
checkResult()
}
test("Uuid expressions should produce same results at retries in the same DataFrame") {
val df = spark.range(1).select($"id", new Column(Uuid()))
checkAnswer(df, df.collect())
}
test("SPARK-24313: access map with binary keys") {
val mapWithBinaryKey = map(lit(Array[Byte](1.toByte)), lit(1))
checkAnswer(spark.range(1).select(mapWithBinaryKey.getItem(Array[Byte](1.toByte))), Row(1))
}
test("SPARK-24781: Using a reference from Dataset in Filter/Sort") {
val df = Seq(("test1", 0), ("test2", 1)).toDF("name", "id")
val filter1 = df.select(df("name")).filter(df("id") === 0)
val filter2 = df.select(col("name")).filter(col("id") === 0)
checkAnswer(filter1, filter2.collect())
val sort1 = df.select(df("name")).orderBy(df("id"))
val sort2 = df.select(col("name")).orderBy(col("id"))
checkAnswer(sort1, sort2.collect())
}
test("SPARK-24781: Using a reference not in aggregation in Filter/Sort") {
withSQLConf(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key -> "false") {
val df = Seq(("test1", 0), ("test2", 1)).toDF("name", "id")
val aggPlusSort1 = df.groupBy(df("name")).agg(count(df("name"))).orderBy(df("name"))
val aggPlusSort2 = df.groupBy(col("name")).agg(count(col("name"))).orderBy(col("name"))
checkAnswer(aggPlusSort1, aggPlusSort2.collect())
val aggPlusFilter1 = df.groupBy(df("name")).agg(count(df("name"))).filter(df("name") === 0)
val aggPlusFilter2 = df.groupBy(col("name")).agg(count(col("name"))).filter(col("name") === 0)
checkAnswer(aggPlusFilter1, aggPlusFilter2.collect())
}
}
test("SPARK-25159: json schema inference should only trigger one job") {
withTempPath { path =>
// This test is to prove that the `JsonInferSchema` does not use `RDD#toLocalIterator` which
// triggers one Spark job per RDD partition.
Seq(1 -> "a", 2 -> "b").toDF("i", "p")
// The data set has 2 partitions, so Spark will write at least 2 json files.
// Use a non-splittable compression (gzip), to make sure the json scan RDD has at least 2
// partitions.
.write.partitionBy("p").option("compression", "gzip").json(path.getCanonicalPath)
val numJobs = new AtomicLong(0)
sparkContext.addSparkListener(new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
numJobs.incrementAndGet()
}
})
val df = spark.read.json(path.getCanonicalPath)
assert(df.columns === Array("i", "p"))
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(numJobs.get() == 1L)
}
}
test("SPARK-25402 Null handling in BooleanSimplification") {
val schema = StructType.fromDDL("a boolean, b int")
val rows = Seq(Row(null, 1))
val rdd = sparkContext.parallelize(rows)
val df = spark.createDataFrame(rdd, schema)
checkAnswer(df.where("(NOT a) OR a"), Seq.empty)
}
test("SPARK-25714 Null handling in BooleanSimplification") {
withSQLConf(SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> ConvertToLocalRelation.ruleName) {
val df = Seq(("abc", 1), (null, 3)).toDF("col1", "col2")
checkAnswer(
df.filter("col1 = 'abc' OR (col1 != 'abc' AND col2 == 3)"),
Row ("abc", 1))
}
}
test("SPARK-25816 ResolveReferences works with nested extractors") {
val df = Seq((1, Map(1 -> "a")), (2, Map(2 -> "b"))).toDF("key", "map")
val swappedDf = df.select($"key".as("map"), $"map".as("key"))
checkAnswer(swappedDf.filter($"key"($"map") > "a"), Row(2, Map(2 -> "b")))
}
test("SPARK-26057: attribute deduplication on already analyzed plans") {
withTempView("a", "b", "v") {
val df1 = Seq(("1-1", 6)).toDF("id", "n")
df1.createOrReplaceTempView("a")
val df3 = Seq("1-1").toDF("id")
df3.createOrReplaceTempView("b")
spark.sql(
"""
|SELECT a.id, n as m
|FROM a
|WHERE EXISTS(
| SELECT 1
| FROM b
| WHERE b.id = a.id)
""".stripMargin).createOrReplaceTempView("v")
val res = spark.sql(
"""
|SELECT a.id, n, m
| FROM a
| LEFT OUTER JOIN v ON v.id = a.id
""".stripMargin)
checkAnswer(res, Row("1-1", 6, 6))
}
}
test("SPARK-27671: Fix analysis exception when casting null in nested field in struct") {
val df = sql("SELECT * FROM VALUES (('a', (10, null))), (('b', (10, 50))), " +
"(('c', null)) AS tab(x, y)")
checkAnswer(df, Row("a", Row(10, null)) :: Row("b", Row(10, 50)) :: Row("c", null) :: Nil)
val cast = sql("SELECT cast(struct(1, null) AS struct<a:int,b:int>)")
checkAnswer(cast, Row(Row(1, null)) :: Nil)
}
test("SPARK-27439: Explain result should match collected result after view change") {
withTempView("test", "test2", "tmp") {
spark.range(10).createOrReplaceTempView("test")
spark.range(5).createOrReplaceTempView("test2")
spark.sql("select * from test").createOrReplaceTempView("tmp")
val df = spark.sql("select * from tmp")
spark.sql("select * from test2").createOrReplaceTempView("tmp")
val captured = new ByteArrayOutputStream()
Console.withOut(captured) {
df.explain(extended = true)
}
checkAnswer(df, spark.range(10).toDF)
val output = captured.toString
assert(output.contains(
"""== Parsed Logical Plan ==
|'Project [*]
|+- 'UnresolvedRelation [tmp]""".stripMargin))
assert(output.contains(
"""== Physical Plan ==
|*(1) Range (0, 10, step=1, splits=2)""".stripMargin))
}
}
test("SPARK-29442 Set `default` mode should override the existing mode") {
val df = Seq(Tuple1(1)).toDF()
val writer = df.write.mode("overwrite").mode("default")
val modeField = classOf[DataFrameWriter[Tuple1[Int]]].getDeclaredField("mode")
modeField.setAccessible(true)
assert(SaveMode.ErrorIfExists === modeField.get(writer).asInstanceOf[SaveMode])
}
test("sample should not duplicated the input data") {
val df1 = spark.range(10).select($"id" as "id1", $"id" % 5 as "key1")
val df2 = spark.range(10).select($"id" as "id2", $"id" % 5 as "key2")
val sampled = df1.join(df2, $"key1" === $"key2")
.sample(0.5, 42)
.select("id1", "id2")
val idTuples = sampled.collect().map(row => row.getLong(0) -> row.getLong(1))
assert(idTuples.length == idTuples.toSet.size)
}
test("groupBy.as") {
val df1 = Seq((1, 2, 3), (2, 3, 4)).toDF("a", "b", "c")
.repartition($"a", $"b").sortWithinPartitions("a", "b")
val df2 = Seq((1, 2, 4), (2, 3, 5)).toDF("a", "b", "c")
.repartition($"a", $"b").sortWithinPartitions("a", "b")
implicit val valueEncoder = RowEncoder(df1.schema)
val df3 = df1.groupBy("a", "b").as[GroupByKey, Row]
.cogroup(df2.groupBy("a", "b").as[GroupByKey, Row]) { case (_, data1, data2) =>
data1.zip(data2).map { p =>
p._1.getInt(2) + p._2.getInt(2)
}
}.toDF
checkAnswer(df3.sort("value"), Row(7) :: Row(9) :: Nil)
// Assert that no extra shuffle introduced by cogroup.
val exchanges = collect(df3.queryExecution.executedPlan) {
case h: ShuffleExchangeExec => h
}
assert(exchanges.size == 2)
}
test("groupBy.as: custom grouping expressions") {
val df1 = Seq((1, 2, 3), (2, 3, 4)).toDF("a1", "b", "c")
.repartition($"a1", $"b").sortWithinPartitions("a1", "b")
val df2 = Seq((1, 2, 4), (2, 3, 5)).toDF("a1", "b", "c")
.repartition($"a1", $"b").sortWithinPartitions("a1", "b")
implicit val valueEncoder = RowEncoder(df1.schema)
val groupedDataset1 = df1.groupBy(($"a1" + 1).as("a"), $"b").as[GroupByKey, Row]
val groupedDataset2 = df2.groupBy(($"a1" + 1).as("a"), $"b").as[GroupByKey, Row]
val df3 = groupedDataset1
.cogroup(groupedDataset2) { case (_, data1, data2) =>
data1.zip(data2).map { p =>
p._1.getInt(2) + p._2.getInt(2)
}
}.toDF
checkAnswer(df3.sort("value"), Row(7) :: Row(9) :: Nil)
}
test("groupBy.as: throw AnalysisException for unresolved grouping expr") {
val df = Seq((1, 2, 3), (2, 3, 4)).toDF("a", "b", "c")
implicit val valueEncoder = RowEncoder(df.schema)
val err = intercept[AnalysisException] {
df.groupBy($"d", $"b").as[GroupByKey, Row]
}
assert(err.getMessage.contains("cannot resolve 'd'"))
}
test("emptyDataFrame should be foldable") {
val emptyDf = spark.emptyDataFrame.withColumn("id", lit(1L))
val joined = spark.range(10).join(emptyDf, "id")
joined.queryExecution.optimizedPlan match {
case LocalRelation(Seq(id), Nil, _) =>
assert(id.name == "id")
case _ =>
fail("emptyDataFrame should be foldable")
}
}
test("SPARK-30811: CTE should not cause stack overflow when " +
"it refers to non-existent table with same name") {
val e = intercept[AnalysisException] {
sql("WITH t AS (SELECT 1 FROM nonexist.t) SELECT * FROM t")
}
assert(e.getMessage.contains("Table or view not found:"))
}
test("SPARK-32680: Don't analyze CTAS with unresolved query") {
val v2Source = classOf[FakeV2Provider].getName
val e = intercept[AnalysisException] {
sql(s"CREATE TABLE t USING $v2Source AS SELECT * from nonexist")
}
assert(e.getMessage.contains("Table or view not found:"))
}
test("CalendarInterval reflection support") {
val df = Seq((1, new CalendarInterval(1, 2, 3))).toDF("a", "b")
checkAnswer(df.selectExpr("b"), Row(new CalendarInterval(1, 2, 3)))
}
test("SPARK-31552: array encoder with different types") {
// primitives
val booleans = Array(true, false)
checkAnswer(Seq(booleans).toDF(), Row(booleans))
val bytes = Array(1.toByte, 2.toByte)
checkAnswer(Seq(bytes).toDF(), Row(bytes))
val shorts = Array(1.toShort, 2.toShort)
checkAnswer(Seq(shorts).toDF(), Row(shorts))
val ints = Array(1, 2)
checkAnswer(Seq(ints).toDF(), Row(ints))
val longs = Array(1L, 2L)
checkAnswer(Seq(longs).toDF(), Row(longs))
val floats = Array(1.0F, 2.0F)
checkAnswer(Seq(floats).toDF(), Row(floats))
val doubles = Array(1.0D, 2.0D)
checkAnswer(Seq(doubles).toDF(), Row(doubles))
val strings = Array("2020-04-24", "2020-04-25")
checkAnswer(Seq(strings).toDF(), Row(strings))
// tuples
val decOne = Decimal(1, 38, 18)
val decTwo = Decimal(2, 38, 18)
val tuple1 = (1, 2.2, "3.33", decOne, Date.valueOf("2012-11-22"))
val tuple2 = (2, 3.3, "4.44", decTwo, Date.valueOf("2022-11-22"))
checkAnswer(Seq(Array(tuple1, tuple2)).toDF(), Seq(Seq(tuple1, tuple2)).toDF())
// case classes
val gbks = Array(GroupByKey(1, 2), GroupByKey(4, 5))
checkAnswer(Seq(gbks).toDF(), Row(Array(Row(1, 2), Row(4, 5))))
// We can move this implicit def to [[SQLImplicits]] when we eventually make fully
// support for array encoder like Seq and Set
// For now cases below, decimal/datetime/interval/binary/nested types, etc,
// are not supported by array
implicit def newArrayEncoder[T <: Array[_] : TypeTag]: Encoder[T] = ExpressionEncoder()
// decimals
val decSpark = Array(decOne, decTwo)
val decScala = decSpark.map(_.toBigDecimal)
val decJava = decSpark.map(_.toJavaBigDecimal)
checkAnswer(Seq(decSpark).toDF(), Row(decJava))
checkAnswer(Seq(decScala).toDF(), Row(decJava))
checkAnswer(Seq(decJava).toDF(), Row(decJava))
// datetimes and intervals
val dates = strings.map(Date.valueOf)
checkAnswer(Seq(dates).toDF(), Row(dates))
val localDates = dates.map(d => DateTimeUtils.daysToLocalDate(DateTimeUtils.fromJavaDate(d)))
checkAnswer(Seq(localDates).toDF(), Row(dates))
val timestamps =
Array(Timestamp.valueOf("2020-04-24 12:34:56"), Timestamp.valueOf("2020-04-24 11:22:33"))
checkAnswer(Seq(timestamps).toDF(), Row(timestamps))
val instants =
timestamps.map(t => DateTimeUtils.microsToInstant(DateTimeUtils.fromJavaTimestamp(t)))
checkAnswer(Seq(instants).toDF(), Row(timestamps))
val intervals = Array(new CalendarInterval(1, 2, 3), new CalendarInterval(4, 5, 6))
checkAnswer(Seq(intervals).toDF(), Row(intervals))
// binary
val bins = Array(Array(1.toByte), Array(2.toByte), Array(3.toByte), Array(4.toByte))
checkAnswer(Seq(bins).toDF(), Row(bins))
// nested
val nestedIntArray = Array(Array(1), Array(2))
checkAnswer(Seq(nestedIntArray).toDF(), Row(nestedIntArray.map(wrapIntArray)))
val nestedDecArray = Array(decSpark)
checkAnswer(Seq(nestedDecArray).toDF(), Row(Array(wrapRefArray(decJava))))
}
test("SPARK-31750: eliminate UpCast if child's dataType is DecimalType") {
withTempPath { f =>
sql("select cast(1 as decimal(38, 0)) as d")
.write.mode("overwrite")
.parquet(f.getAbsolutePath)
val df = spark.read.parquet(f.getAbsolutePath).as[BigDecimal]
assert(df.schema === new StructType().add(StructField("d", DecimalType(38, 0))))
}
}
test("SPARK-32640: ln(NaN) should return NaN") {
val df = Seq(Double.NaN).toDF("d")
checkAnswer(df.selectExpr("ln(d)"), Row(Double.NaN))
}
test("SPARK-32761: aggregating multiple distinct CONSTANT columns") {
checkAnswer(sql("select count(distinct 2), count(distinct 2,3)"), Row(1, 1))
}
test("SPARK-32764: -0.0 and 0.0 should be equal") {
val df = Seq(0.0 -> -0.0).toDF("pos", "neg")
checkAnswer(df.select($"pos" > $"neg"), Row(false))
}
test("SPARK-32635: Replace references with foldables coming only from the node's children") {
val a = Seq("1").toDF("col1").withColumn("col2", lit("1"))
val b = Seq("2").toDF("col1").withColumn("col2", lit("2"))
val aub = a.union(b)
val c = aub.filter($"col1" === "2").cache()
val d = Seq("2").toDF("col4")
val r = d.join(aub, $"col2" === $"col4").select("col4")
val l = c.select("col2")
val df = l.join(r, $"col2" === $"col4", "LeftOuter")
checkAnswer(df, Row("2", "2"))
}
test("SPARK-33939: Make Column.named use UnresolvedAlias to assign name") {
val df = spark.range(1).selectExpr("id as id1", "id as id2")
val df1 = df.selectExpr("cast(struct(id1, id2).id1 as int)")
assert(df1.schema.head.name == "CAST(struct(id1, id2).id1 AS INT)")
val df2 = df.selectExpr("cast(array(struct(id1, id2))[0].id1 as int)")
assert(df2.schema.head.name == "CAST(array(struct(id1, id2))[0].id1 AS INT)")
val df3 = df.select(hex(expr("struct(id1, id2).id1")))
assert(df3.schema.head.name == "hex(struct(id1, id2).id1)")
// this test is to make sure we don't have a regression.
val df4 = df.selectExpr("id1 == null")
assert(df4.schema.head.name == "(id1 = NULL)")
}
test("SPARK-33989: Strip auto-generated cast when using Cast.sql") {
Seq("SELECT id == null FROM VALUES(1) AS t(id)",
"SELECT floor(1)",
"SELECT split(struct(c1, c2).c1, ',') FROM VALUES(1, 2) AS t(c1, c2)").foreach { sqlStr =>
assert(!sql(sqlStr).schema.fieldNames.head.toLowerCase(Locale.getDefault).contains("cast"))
}
Seq("SELECT id == CAST(null AS int) FROM VALUES(1) AS t(id)",
"SELECT floor(CAST(1 AS double))",
"SELECT split(CAST(struct(c1, c2).c1 AS string), ',') FROM VALUES(1, 2) AS t(c1, c2)"
).foreach { sqlStr =>
assert(sql(sqlStr).schema.fieldNames.head.toLowerCase(Locale.getDefault).contains("cast"))
}
}
test("SPARK-34318: colRegex should work with column names & qualifiers which contain newlines") {
val df = Seq(1, 2, 3).toDF("test\\n_column").as("test\\n_table")
val col1 = df.colRegex("`tes.*\\n.*mn`")
checkAnswer(df.select(col1), Row(1) :: Row(2) :: Row(3) :: Nil)
val col2 = df.colRegex("test\\n_table.`tes.*\\n.*mn`")
checkAnswer(df.select(col2), Row(1) :: Row(2) :: Row(3) :: Nil)
}
test("SPARK-34763: col(), $\\"<name>\\", df(\\"name\\") should handle quoted column name properly") {
val df1 = spark.sql("SELECT 'col1' AS `a``b.c`")
checkAnswer(df1.selectExpr("`a``b.c`"), Row("col1"))
checkAnswer(df1.select(df1("`a``b.c`")), Row("col1"))
checkAnswer(df1.select(col("`a``b.c`")), Row("col1"))
checkAnswer(df1.select($"`a``b.c`"), Row("col1"))
val df2 = df1.as("d.e`f")
checkAnswer(df2.selectExpr("`a``b.c`"), Row("col1"))
checkAnswer(df2.select(df2("`a``b.c`")), Row("col1"))
checkAnswer(df2.select(col("`a``b.c`")), Row("col1"))
checkAnswer(df2.select($"`a``b.c`"), Row("col1"))
checkAnswer(df2.selectExpr("`d.e``f`.`a``b.c`"), Row("col1"))
checkAnswer(df2.select(df2("`d.e``f`.`a``b.c`")), Row("col1"))
checkAnswer(df2.select(col("`d.e``f`.`a``b.c`")), Row("col1"))
checkAnswer(df2.select($"`d.e``f`.`a``b.c`"), Row("col1"))
val df3 = df1.as("*-#&% ?")
checkAnswer(df3.selectExpr("`*-#&% ?`.`a``b.c`"), Row("col1"))
checkAnswer(df3.select(df3("*-#&% ?.`a``b.c`")), Row("col1"))
checkAnswer(df3.select(col("*-#&% ?.`a``b.c`")), Row("col1"))
checkAnswer(df3.select($"*-#&% ?.`a``b.c`"), Row("col1"))
}
test("SPARK-34776: Nested column pruning should not prune Window produced attributes") {
val df = Seq(
("t1", "123", "bob"),
("t1", "456", "bob"),
("t2", "123", "sam")
).toDF("type", "value", "name")
val test = df.select(
$"*",
struct(count($"*").over(Window.partitionBy($"type", $"value", $"name"))
.as("count"), $"name").as("name_count")
).select(
$"*",
max($"name_count").over(Window.partitionBy($"type", $"value")).as("best_name")
)
checkAnswer(test.select($"best_name.name"), Row("bob") :: Row("bob") :: Row("sam") :: Nil)
}
test("SPARK-34829: Multiple applications of typed ScalaUDFs in higher order functions work") {
val reverse = udf((s: String) => s.reverse)
val reverse2 = udf((b: Bar2) => Bar2(b.s.reverse))
val df = Seq(Array("abc", "def")).toDF("array")
val test = df.select(transform(col("array"), s => reverse(s)))
checkAnswer(test, Row(Array("cba", "fed")) :: Nil)
val df2 = Seq(Array(Bar2("abc"), Bar2("def"))).toDF("array")
val test2 = df2.select(transform(col("array"), b => reverse2(b)))
checkAnswer(test2, Row(Array(Row("cba"), Row("fed"))) :: Nil)
val df3 = Seq(Map("abc" -> 1, "def" -> 2)).toDF("map")
val test3 = df3.select(transform_keys(col("map"), (s, _) => reverse(s)))
checkAnswer(test3, Row(Map("cba" -> 1, "fed" -> 2)) :: Nil)
val df4 = Seq(Map(Bar2("abc") -> 1, Bar2("def") -> 2)).toDF("map")
val test4 = df4.select(transform_keys(col("map"), (b, _) => reverse2(b)))
checkAnswer(test4, Row(Map(Row("cba") -> 1, Row("fed") -> 2)) :: Nil)
val df5 = Seq(Map(1 -> "abc", 2 -> "def")).toDF("map")
val test5 = df5.select(transform_values(col("map"), (_, s) => reverse(s)))
checkAnswer(test5, Row(Map(1 -> "cba", 2 -> "fed")) :: Nil)
val df6 = Seq(Map(1 -> Bar2("abc"), 2 -> Bar2("def"))).toDF("map")
val test6 = df6.select(transform_values(col("map"), (_, b) => reverse2(b)))
checkAnswer(test6, Row(Map(1 -> Row("cba"), 2 -> Row("fed"))) :: Nil)
val reverseThenConcat = udf((s1: String, s2: String) => s1.reverse ++ s2.reverse)
val reverseThenConcat2 = udf((b1: Bar2, b2: Bar2) => Bar2(b1.s.reverse ++ b2.s.reverse))
val df7 = Seq((Map(1 -> "abc", 2 -> "def"), Map(1 -> "ghi", 2 -> "jkl"))).toDF("map1", "map2")
val test7 =
df7.select(map_zip_with(col("map1"), col("map2"), (_, s1, s2) => reverseThenConcat(s1, s2)))
checkAnswer(test7, Row(Map(1 -> "cbaihg", 2 -> "fedlkj")) :: Nil)
val df8 = Seq((Map(1 -> Bar2("abc"), 2 -> Bar2("def")),
Map(1 -> Bar2("ghi"), 2 -> Bar2("jkl")))).toDF("map1", "map2")
val test8 =
df8.select(map_zip_with(col("map1"), col("map2"), (_, b1, b2) => reverseThenConcat2(b1, b2)))
checkAnswer(test8, Row(Map(1 -> Row("cbaihg"), 2 -> Row("fedlkj"))) :: Nil)
val df9 = Seq((Array("abc", "def"), Array("ghi", "jkl"))).toDF("array1", "array2")
val test9 =
df9.select(zip_with(col("array1"), col("array2"), (s1, s2) => reverseThenConcat(s1, s2)))
checkAnswer(test9, Row(Array("cbaihg", "fedlkj")) :: Nil)
val df10 = Seq((Array(Bar2("abc"), Bar2("def")), Array(Bar2("ghi"), Bar2("jkl"))))
.toDF("array1", "array2")
val test10 =
df10.select(zip_with(col("array1"), col("array2"), (b1, b2) => reverseThenConcat2(b1, b2)))
checkAnswer(test10, Row(Array(Row("cbaihg"), Row("fedlkj"))) :: Nil)
}
test("SPARK-34882: Aggregate with multiple distinct null sensitive aggregators") {
withUserDefinedFunction(("countNulls", true)) {
spark.udf.register("countNulls", udaf(new Aggregator[JLong, JLong, JLong] {
def zero: JLong = 0L
def reduce(b: JLong, a: JLong): JLong = if (a == null) {
b + 1
} else {
b
}
def merge(b1: JLong, b2: JLong): JLong = b1 + b2
def finish(r: JLong): JLong = r
def bufferEncoder: Encoder[JLong] = Encoders.LONG
def outputEncoder: Encoder[JLong] = Encoders.LONG
}))
val result = testData.selectExpr(
"countNulls(key)",
"countNulls(DISTINCT key)",
"countNulls(key) FILTER (WHERE key > 50)",
"countNulls(DISTINCT key) FILTER (WHERE key > 50)",
"count(DISTINCT key)")
checkAnswer(result, Row(0, 0, 0, 0, 100))
}
}
test("SPARK-35410: SubExpr elimination should not include redundant child exprs " +
"for conditional expressions") {
val accum = sparkContext.longAccumulator("call")
val simpleUDF = udf((s: String) => {
accum.add(1)
s
})
val df1 = spark.range(5).select(when(functions.length(simpleUDF($"id")) > 0,
functions.length(simpleUDF($"id"))).otherwise(
functions.length(simpleUDF($"id")) + 1))
df1.collect()
assert(accum.value == 5)
val nondeterministicUDF = simpleUDF.asNondeterministic()
val df2 = spark.range(5).select(when(functions.length(nondeterministicUDF($"id")) > 0,
functions.length(nondeterministicUDF($"id"))).otherwise(
functions.length(nondeterministicUDF($"id")) + 1))
df2.collect()
assert(accum.value == 15)
}
test("SPARK-35560: Remove redundant subexpression evaluation in nested subexpressions") {
Seq(1, Int.MaxValue).foreach { splitThreshold =>
withSQLConf(SQLConf.CODEGEN_METHOD_SPLIT_THRESHOLD.key -> splitThreshold.toString) {
val accum = sparkContext.longAccumulator("call")
val simpleUDF = udf((s: String) => {
accum.add(1)
s
})
// Common exprs:
// 1. simpleUDF($"id")
// 2. functions.length(simpleUDF($"id"))
// We should only evaluate `simpleUDF($"id")` once, i.e.
// subExpr1 = simpleUDF($"id");
// subExpr2 = functions.length(subExpr1);
val df = spark.range(5).select(
when(functions.length(simpleUDF($"id")) === 1, lower(simpleUDF($"id")))
.when(functions.length(simpleUDF($"id")) === 0, upper(simpleUDF($"id")))
.otherwise(simpleUDF($"id")).as("output"))
df.collect()
assert(accum.value == 5)
}
}
}
test("isLocal should consider CommandResult and LocalRelation") {
val df1 = sql("SHOW TABLES")
assert(df1.isLocal)
val df2 = (1 to 10).toDF()
assert(df2.isLocal)
}
}
case class GroupByKey(a: Int, b: Int)
case class Bar2(s: String)
| maropu/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala | Scala | apache-2.0 | 113,449 |
package spark.jobserver
import akka.actor.Props
import spark.jobserver.CommonMessages.{JobErroredOut, JobResult}
class JobManagerActorSpec extends JobManagerSpec {
import scala.concurrent.duration._
before {
dao = new InMemoryDAO
manager =
system.actorOf(JobManagerActor.props(dao, "test", JobManagerSpec.config, false))
}
describe("starting jobs") {
it("jobs should be able to cache RDDs and retrieve them through getPersistentRDDs") {
manager ! JobManagerActor.Initialize
expectMsgClass(classOf[JobManagerActor.Initialized])
uploadTestJar()
manager ! JobManagerActor.StartJob("demo", classPrefix + "CacheSomethingJob", emptyConfig,
errorEvents ++ syncEvents)
val JobResult(_, sum: Int) = expectMsgClass(classOf[JobResult])
manager ! JobManagerActor.StartJob("demo", classPrefix + "AccessCacheJob", emptyConfig,
errorEvents ++ syncEvents)
val JobResult(_, sum2: Int) = expectMsgClass(classOf[JobResult])
sum2 should equal (sum)
}
it ("jobs should be able to cache and retrieve RDDs by name") {
manager ! JobManagerActor.Initialize
expectMsgClass(classOf[JobManagerActor.Initialized])
uploadTestJar()
manager ! JobManagerActor.StartJob("demo", classPrefix + "CacheRddByNameJob", emptyConfig,
errorEvents ++ syncEvents)
expectMsgPF(1 second, "Expected a JobResult or JobErroredOut message!") {
case JobResult(_, sum: Int) => sum should equal (1 + 4 + 9 + 16 + 25)
case JobErroredOut(_, _, error: Throwable) => throw error
}
}
}
}
| nachiketa-shukla/spark-jobserver | job-server/test/spark.jobserver/JobManagerActorSpec.scala | Scala | apache-2.0 | 1,603 |
package propagator
import IFDS.{Propagator, KillGenInfo}
import taintanalysis.{Fact, MStmt}
import org.opalj.br.Method
class SpecificMethodKiller extends Propagator {
def canHandle(f: Fact): Boolean = ???
def propagateCallFlow(f: Fact, callStmt: MStmt, destinationMethod: Method): KillGenInfo = ???
def propagateCallToReturnFlow(f: Fact, callSite: MStmt): KillGenInfo = ???
def propagateNormalFlow(f: Fact, curr: MStmt, succ: MStmt): KillGenInfo = ???
def propagateReturnFlow(
f: Fact,
callSite: Option[MStmt],
calleeMethod: Method,
exitStmt: MStmt,
returnSite: Option[MStmt]
): KillGenInfo = ???
}
| packlnd/IFDS-RA | src/main/scala/propagator/SpecificMethodKiller.scala | Scala | mit | 634 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.tree.impurity.{EntropyAggregator, GiniAggregator}
import org.apache.spark.mllib.util.MLlibTestSparkContext
/**
* 测试基尼和熵算法
* Test suites for [[GiniAggregator]] and [[EntropyAggregator]].
*/
class ImpuritySuite extends SparkFunSuite with MLlibTestSparkContext {
test("Gini impurity does not support negative labels") {//基尼杂质不支持负标签
val gini = new GiniAggregator(2)
intercept[IllegalArgumentException] {
gini.update(Array(0.0, 1.0, 2.0), 0, -1, 0.0)
}
}
test("Entropy does not support negative labels") {//熵不支持负标签
val entropy = new EntropyAggregator(2)
intercept[IllegalArgumentException] {
entropy.update(Array(0.0, 1.0, 2.0), 0, -1, 0.0)
}
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/mllib/tree/ImpuritySuite.scala | Scala | apache-2.0 | 1,648 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap.io
import org.apache.hadoop.conf.Configuration
import org.apache.spark.sql.execution.datasources.oap.filecache.{FiberCache, FiberId}
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
private[oap] case class TestDataFile(path: String, schema: StructType, configuration: Configuration)
extends DataFile {
override def iterator(
requiredIds: Array[Int],
filters: Seq[Filter]): OapCompletionIterator[Any] =
new OapCompletionIterator(Iterator.empty, {})
override def iteratorWithRowIds(
requiredIds: Array[Int],
rowIds: Array[Int],
filters: Seq[Filter]):
OapCompletionIterator[Any] = new OapCompletionIterator(Iterator.empty, {})
override def totalRows(): Long = 0
override def getDataFileMeta(): DataFileMeta =
throw new UnsupportedOperationException
override def cache(groupId: Int, fiberId: Int, fiber: FiberId = null): FiberCache =
throw new UnsupportedOperationException
}
| Intel-bigdata/OAP | oap-cache/oap/src/test/scala/org/apache/spark/sql/execution/datasources/oap/io/TestDataFile.scala | Scala | apache-2.0 | 1,831 |
package api.hue.endpoint
import api.hue.Bridge
import api.hue.dao.Group
import api.hue.dao.attribute._
import com.google.inject.Inject
import play.api.libs.json.JsObject
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* /groups endpoint
* @author ddexter
*/
class Groups @Inject() (bridgeInst: Bridge) extends Endpoint[Group] {
import Groups._
override def bridge: Bridge = bridgeInst
override def name: String = NAME
override def get: Future[Map[String, Group]] =
bridge.get(path).map { js => js.as[JsObject].fields.toMap.map { case (k, v) => (k, v.as[Group]) } }
/**
*
* @param groupName The group name to search for
* @return The group id corresponding to a group name or none if DNE
*/
def getGroupId(groupName: String): Future[Option[String]] = {
get.map { groups => { groups.find(entry => entry._2.name == groupName).map(_._1) } }
}
override protected def path: String = PATH
override protected def supportedPutAttributes: Set[String] = SUPPORTED_PUT_ATTRIBUTES
}
object Groups {
private val SUPPORTED_PUT_ATTRIBUTES: Set[String] = Set(
Brightness.NAME,
Hue.NAME,
On.NAME,
Saturation.NAME,
TransitionTime.NAME
)
val NAME: String = "groups"
val PATH: String = "/groups"
}
| ddexter/HomeBackend | src/main/scala/api/hue/endpoint/Groups.scala | Scala | apache-2.0 | 1,310 |
package doobie.hi
import doobie.enum.holdability.Holdability
import doobie.enum.transactionisolation.TransactionIsolation
import doobie.enum.fetchdirection.FetchDirection
import doobie.enum.resultsetconcurrency.ResultSetConcurrency
import doobie.enum.resultsettype.ResultSetType
import doobie.syntax.catchable._
import doobie.free.{ connection => C }
import doobie.free.{ preparedstatement => PS }
import doobie.free.{ callablestatement => CS }
import doobie.free.{ resultset => RS }
import doobie.free.{ statement => S }
import doobie.free.{ databasemetadata => DMD }
import doobie.util.composite._
import java.net.URL
import java.util.{ Date, Calendar }
import java.sql.{ ParameterMetaData, ResultSetMetaData, SQLWarning, Time, Timestamp, Ref, RowId }
import scala.collection.immutable.Map
import scala.collection.JavaConverters._
import scala.Predef.intArrayOps
import scalaz.syntax.id._
/**
* Module of high-level constructors for `StatementIO` actions.
* @group Modules
*/
object statement {
/** @group Typeclass Instances */
implicit val MonadStatementIO = S.MonadStatementIO
/** @group Typeclass Instances */
implicit val CatchableStatementIO = S.CatchableStatementIO
/** @group Batching */
def addBatch(sql: String): StatementIO[Unit] =
S.addBatch(sql)
/** @group Batching */
val clearBatch: StatementIO[Unit] =
S.clearBatch
/** @group Execution */
val executeBatch: StatementIO[List[Int]] =
S.executeBatch.map(_.toList)
/** @group Execution */
def executeQuery[A](sql: String)(k: ResultSetIO[A]): StatementIO[A] =
S.executeQuery(sql).flatMap(s => S.liftResultSet(s, k ensuring RS.close))
/** @group Execution */
def executeUpdate(sql: String): StatementIO[Int] =
S.executeUpdate(sql)
/** @group Properties */
val getFetchDirection: StatementIO[FetchDirection] =
S.getFetchDirection.map(FetchDirection.unsafeFromInt)
/** @group Properties */
val getFetchSize: StatementIO[Int] =
S.getFetchSize
/** @group Results */
def getGeneratedKeys[A](k: ResultSetIO[A]): StatementIO[A] =
S.getGeneratedKeys.flatMap(s => S.liftResultSet(s, k ensuring RS.close))
/** @group Properties */
val getMaxFieldSize: StatementIO[Int] =
S.getMaxFieldSize
/** @group Properties */
val getMaxRows: StatementIO[Int] =
S.getMaxRows
// /** @group Batching */
// def getMoreResults(a: Int): StatementIO[Boolean] =
// Predef.???
/** @group Batching */
val getMoreResults: StatementIO[Boolean] =
S.getMoreResults
/** @group Properties */
val getQueryTimeout: StatementIO[Int] =
S.getQueryTimeout
/** @group Batching */
def getResultSet[A](k: ResultSetIO[A]): StatementIO[A] =
S.getResultSet.flatMap(s => S.liftResultSet(s, k))
/** @group Properties */
val getResultSetConcurrency: StatementIO[ResultSetConcurrency] =
S.getResultSetConcurrency.map(ResultSetConcurrency.unsafeFromInt)
/** @group Properties */
val getResultSetHoldability: StatementIO[Holdability] =
S.getResultSetHoldability.map(Holdability.unsafeFromInt)
/** @group Properties */
val getResultSetType: StatementIO[ResultSetType] =
S.getResultSetType.map(ResultSetType.unsafeFromInt)
/** @group Results */
val getUpdateCount: StatementIO[Int] =
S.getUpdateCount
/** @group Results */
val getWarnings: StatementIO[SQLWarning] =
S.getWarnings
/** @group Properties */
def setCursorName(name: String): StatementIO[Unit] =
S.setCursorName(name)
/** @group Properties */
def setEscapeProcessing(a: Boolean): StatementIO[Unit] =
S.setEscapeProcessing(a)
/** @group Properties */
def setFetchDirection(fd: FetchDirection): StatementIO[Unit] =
S.setFetchDirection(fd.toInt)
/** @group Properties */
def setFetchSize(n: Int): StatementIO[Unit] =
S.setFetchSize(n)
/** @group Properties */
def setMaxFieldSize(n: Int): StatementIO[Unit] =
S.setMaxFieldSize(n)
/** @group Properties */
def setMaxRows(n: Int): StatementIO[Unit] =
S.setMaxRows(n)
/** @group Properties */
def setQueryTimeout(a: Int): StatementIO[Unit] =
S.setQueryTimeout(a)
} | jamescway/doobie | core/src/main/scala/doobie/hi/statement.scala | Scala | mit | 4,135 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.recommendation
import org.scalatest.FunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
class MatrixFactorizationModelSuite extends FunSuite with MLlibTestSparkContext {
val rank = 2
var userFeatures: RDD[(Int, Array[Double])] = _
var prodFeatures: RDD[(Int, Array[Double])] = _
override def beforeAll(): Unit = {
super.beforeAll()
userFeatures = sc.parallelize(Seq((0, Array(1.0, 2.0)), (1, Array(3.0, 4.0))))
prodFeatures = sc.parallelize(Seq((2, Array(5.0, 6.0))))
}
test("constructor") {
val model = new MatrixFactorizationModel(rank, userFeatures, prodFeatures)
assert(model.predict(0, 2) ~== 17.0 relTol 1e-14)
intercept[IllegalArgumentException] {
new MatrixFactorizationModel(1, userFeatures, prodFeatures)
}
val userFeatures1 = sc.parallelize(Seq((0, Array(1.0)), (1, Array(3.0))))
intercept[IllegalArgumentException] {
new MatrixFactorizationModel(rank, userFeatures1, prodFeatures)
}
val prodFeatures1 = sc.parallelize(Seq((2, Array(5.0))))
intercept[IllegalArgumentException] {
new MatrixFactorizationModel(rank, userFeatures, prodFeatures1)
}
}
test("save/load") {
val model = new MatrixFactorizationModel(rank, userFeatures, prodFeatures)
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
def collect(features: RDD[(Int, Array[Double])]): Set[(Int, Seq[Double])] = {
features.mapValues(_.toSeq).collect().toSet
}
try {
model.save(sc, path)
val newModel = MatrixFactorizationModel.load(sc, path)
assert(newModel.rank === rank)
assert(collect(newModel.userFeatures) === collect(userFeatures))
assert(collect(newModel.productFeatures) === collect(prodFeatures))
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
| trueyao/spark-lever | mllib/src/test/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModelSuite.scala | Scala | apache-2.0 | 2,780 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control
import controls._
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms._
import analysis.ElementAnalysis
import xbl.XBLContainer
import org.orbeon.oxf.xforms.BindingContext
import org.orbeon.saxon.om.Item
import collection.JavaConverters._
import org.orbeon.oxf.xforms.state.InstancesControls
import org.orbeon.oxf.util.DynamicVariable
object Controls {
// Create the entire tree of control from the root
def createTree(containingDocument: XFormsContainingDocument, controlIndex: ControlIndex) = {
val bindingContext = containingDocument.getContextStack.resetBindingContext()
val rootControl = containingDocument.getStaticState.topLevelPart.getTopLevelControls.head
buildTree(controlIndex, containingDocument, bindingContext, None, rootControl, Seq()) map
logTreeIfNeeded("after building full tree")
}
// Create a new repeat iteration for insertion into the current tree of controls
def createRepeatIterationTree(
containingDocument: XFormsContainingDocument,
controlIndex: ControlIndex,
repeatControl: XFormsRepeatControl,
iterationIndex: Int) = {
val idSuffix = XFormsUtils.getEffectiveIdSuffixParts(repeatControl.getEffectiveId).toSeq :+ iterationIndex
// This is the context of the iteration
// buildTree() does a pushBinding(), but that won't change the context (no @ref, etc. on the iteration itself)
val container = repeatControl.container
val bindingContext = {
val contextStack = container.getContextStack
contextStack.setBinding(repeatControl.bindingContext)
contextStack.pushIteration(iterationIndex)
}
// This has to be the case at this point, otherwise it's a bug in our code
assert(repeatControl.staticControl.iteration.isDefined)
val controlOpt =
buildTree(
controlIndex,
container,
bindingContext,
Some(repeatControl),
repeatControl.staticControl.iteration.get,
idSuffix
) map
logTreeIfNeeded("after building repeat iteration tree")
controlOpt.get.asInstanceOf[XFormsRepeatIterationControl] // we "know" this, right?
}
// Create a new subtree of controls (used by xxf:dynamic)
def createSubTree(
container: XBLContainer,
controlIndex: ControlIndex,
containerControl: XFormsContainerControl,
rootAnalysis: ElementAnalysis) = {
val idSuffix = XFormsUtils.getEffectiveIdSuffixParts(containerControl.getEffectiveId).toSeq
val bindingContext = containerControl.bindingContextForChild
buildTree(
controlIndex,
container,
bindingContext,
Some(containerControl),
rootAnalysis,
idSuffix
) map
logTreeIfNeeded("after building subtree")
}
// Build a component subtree
private def buildTree(
controlIndex: ControlIndex,
container: XBLContainer,
bindingContext: BindingContext,
parentOption: Option[XFormsControl],
staticElement: ElementAnalysis,
idSuffix: Seq[Int]): Option[XFormsControl] = {
// Determine effective id
val effectiveId =
if (idSuffix.isEmpty)
staticElement.prefixedId
else
staticElement.prefixedId + REPEAT_SEPARATOR + (idSuffix mkString REPEAT_INDEX_SEPARATOR_STRING)
// Instantiate the control
// TODO LATER: controls must take ElementAnalysis, not Element
// NOTE: If we are unable to create a control (case of Model at least), this has no effect
XFormsControlFactory.createXFormsControl(container, parentOption.orNull, staticElement, effectiveId) map {
control ⇒
// Index the new control
// NOTE: We used to do this after evaluating the binding. In general it shouldn't hurt to do it here.
// The reason to move indexing before is that
controlIndex.indexControl(control)
// Determine binding
control.evaluateBindingAndValues(bindingContext, update = false)
// Build the control's children if any
control.buildChildren(buildTree(controlIndex, _, _, Some(control), _, _), idSuffix)
control
}
}
// Build children controls if any, delegating the actual construction to the given `buildTree` function
def buildChildren(
control: XFormsControl,
children: ⇒ Iterable[ElementAnalysis],
buildTree: (XBLContainer, BindingContext, ElementAnalysis, Seq[Int]) ⇒ Option[XFormsControl],
idSuffix: Seq[Int]) {
// Start with the context within the current control
var newBindingContext = control.bindingContextForChild
// Build each child
children foreach { childElement ⇒
buildTree(control.container, newBindingContext, childElement, idSuffix) foreach { newChildControl ⇒
// Update the context based on the just created control
newBindingContext = newChildControl.bindingContextForFollowing
}
}
}
/**
* Find an effective control id based on a source and a control static id, following XBL scoping and the repeat
* structure.
*
* @param sourceEffectiveId reference to source control, e.g. "list$age.3"
* @param targetPrefixedId reference to target control, e.g. "list$xf-10"
* @return effective control id, or null if not found
*/
def findEffectiveControlId(ops: StaticStateGlobalOps, controls: XFormsControls, sourceEffectiveId: String, targetPrefixedId: String): String = {
val tree = controls.getCurrentControlTree
// Don't do anything if there are no controls
if (tree.getChildren.isEmpty)
return null
// NOTE: The implementation tries to do a maximum using the static state. One reason is that the source
// control's effective id might not yet have an associated control during construction. E.g.:
//
// <xf:group id="my-group" ref="employee[index('employee-repeat')]">
//
// In that case, the XFormsGroupControl does not yet exist when its binding is evaluated. However, its
// effective id is known and passed as source, and can be used for resolving the id passed to the index()
// function.
//
// We trust the caller to pass a valid source effective id. That value is always internal, i.e. not created by a
// form author. On the other hand, the target id cannot be trusted as it is typically specified by the form
// author.
// 1: Check preconditions
require(sourceEffectiveId ne null, "Source effective id is required.")
// 3: Implement XForms 1.1 "4.7.1 References to Elements within a repeat Element" algorithm
// Find closest common ancestor repeat
val sourcePrefixedId = XFormsUtils.getPrefixedId(sourceEffectiveId)
val sourceParts = XFormsUtils.getEffectiveIdSuffixParts(sourceEffectiveId)
val targetIndexBuilder = new StringBuilder
def appendIterationToSuffix(iteration: Int) {
if (targetIndexBuilder.isEmpty)
targetIndexBuilder.append(REPEAT_SEPARATOR)
else if (targetIndexBuilder.length != 1)
targetIndexBuilder.append(REPEAT_INDEX_SEPARATOR)
targetIndexBuilder.append(iteration.toString)
}
val ancestorRepeatPrefixedId = ops.findClosestCommonAncestorRepeat(sourcePrefixedId, targetPrefixedId)
ancestorRepeatPrefixedId foreach { ancestorRepeatPrefixedId ⇒
// There is a common ancestor repeat, use the current common iteration as starting point
for (i ← 0 to ops.getAncestorRepeatIds(ancestorRepeatPrefixedId).size)
appendIterationToSuffix(sourceParts(i))
}
// Find list of ancestor repeats for destination WITHOUT including the closest ancestor repeat if any
// NOTE: make a copy because the source might be an immutable wrapped Scala collection which we can't reverse
val targetAncestorRepeats = ops.getAncestorRepeatIds(targetPrefixedId, ancestorRepeatPrefixedId).reverse
// Follow repeat indexes towards target
for (repeatPrefixedId ← targetAncestorRepeats) {
val repeatControl = tree.getControl(repeatPrefixedId + targetIndexBuilder.toString).asInstanceOf[XFormsRepeatControl]
// Control might not exist
if (repeatControl eq null)
return null
// Update iteration suffix
appendIterationToSuffix(repeatControl.getIndex)
}
// Return target
targetPrefixedId + targetIndexBuilder.toString
}
// Update the container's and all its descendants' bindings
def updateBindings(control: XFormsContainerControl) = {
val xpathDependencies = control.containingDocument.getXPathDependencies
xpathDependencies.bindingUpdateStart()
val startBindingContext =
control.preceding map (_.bindingContextForFollowing) getOrElse control.parent.bindingContextForChild
val updater = new BindingUpdater(control.containingDocument, startBindingContext)
visitControls(control, updater, includeCurrent = true)
xpathDependencies.bindingUpdateDone()
Option(control) foreach logTreeIfNeeded("after subtree update")
updater
}
// Update the bindings for the entire tree of controls
def updateBindings(containingDocument: XFormsContainingDocument) = {
val updater = new BindingUpdater(containingDocument, containingDocument.getContextStack.resetBindingContext())
visitAllControls(containingDocument, updater)
Option(containingDocument.getControls.getCurrentControlTree.getRoot) foreach logTreeIfNeeded("after full tree update")
updater
}
class BindingUpdater(val containingDocument: XFormsContainingDocument, val startBindingContext: BindingContext) extends XFormsControlVisitorListener {
private var newIterationsIds = Set.empty[String]
// Start with initial context
private var bindingContext = startBindingContext
private val xpathDependencies = containingDocument.getXPathDependencies
private var level = 0
private var relevanceChangeLevel = -1
private var _visitedCount = 0
def visitedCount = _visitedCount
private var _updatedCount = 0
def updatedCount = _updatedCount
private var _optimizedCount = 0
def optimizedCount = _optimizedCount
var _partialFocusRepeatOption: Option[XFormsRepeatControl] = None
def partialFocusRepeat = _partialFocusRepeatOption
def startVisitControl(control: XFormsControl): Boolean = {
// If this is a new iteration, don't recurse into it
if (newIterationsIds.nonEmpty && control.isInstanceOf[XFormsRepeatIterationControl] && newIterationsIds(control.effectiveId))
return false
level += 1
_visitedCount += 1
// Value of relevance of content before messing with the binding
val wasContentRelevant = control.wasContentRelevant
// Update is required if:
//
// - we are within a container whose content relevance has changed
// - or dependencies tell us an update is required
// - or the control has a @model attribute (TODO TEMP HACK: because that causes model variable evaluation!)
def mustReEvaluateBinding =
(relevanceChangeLevel != -1 && level > relevanceChangeLevel) ||
xpathDependencies.requireBindingUpdate(control.prefixedId) ||
(control.staticControl.element.attribute(XFormsConstants.MODEL_QNAME) ne null)
// Only update the binding if needed
if (mustReEvaluateBinding) {
control match {
case repeatControl: XFormsRepeatControl ⇒
// Update iterations
val oldRepeatSeq = control.bindingContext.getNodeset.asScala
control.evaluateBindingAndValues(bindingContext, update = true)
val (newIterations, partialFocusRepeatOption) = repeatControl.updateIterations(oldRepeatSeq, null, isInsertDelete = false)
// Remember partial focus out of repeat if needed
if (this._partialFocusRepeatOption.isEmpty && partialFocusRepeatOption.isDefined)
this._partialFocusRepeatOption = partialFocusRepeatOption
// Remember newly created iterations so we don't recurse into them in startRepeatIteration()
// o It is not needed to recurse into them because their bindings are up to date since they have just been created
// o However they have not yet been evaluated. They will be evaluated at the same time the other controls are evaluated
// NOTE: don't call ControlTree.initializeRepeatIterationTree() here because refresh evaluates controls and dispatches events
this.newIterationsIds = newIterations map (_.getEffectiveId) toSet
case control ⇒
// Simply set new binding
control.evaluateBindingAndValues(bindingContext, update = true)
}
_updatedCount += 1
} else {
control.refreshBindingAndValues(bindingContext)
_optimizedCount += 1
}
// Update context for children controls
bindingContext = control.bindingContextForChild
// Remember whether we are in a container whose content relevance has changed
// NOTE: The correct logic at this time is to force binding re-evaluation if container relevance has
// changed. Doing this only when content becomes relevant is not enough as shown with the following bug:
// https://github.com/orbeon/orbeon-forms/issues/939
if (relevanceChangeLevel == -1 && control.isInstanceOf[XFormsContainerControl] && wasContentRelevant != control.contentRelevant)
relevanceChangeLevel = level // entering level of containing
true
}
def endVisitControl(control: XFormsControl) = {
// Check if we are exiting the level of a container whose content relevance has changed
if (relevanceChangeLevel == level)
relevanceChangeLevel = -1
// Update context for following controls
bindingContext = control.bindingContextForFollowing
// When we exit a repeat control, discard the list of new iterations so we don't unnecessarily test on them
if (control.isInstanceOf[XFormsRepeatControl])
newIterationsIds = Set.empty[String]
level -= 1
}
}
// Whether two nodesets contain identical items
def compareNodesets(nodeset1: Seq[Item], nodeset2: Seq[Item]): Boolean = {
// Can't be the same if the size has changed
if (nodeset1.size != nodeset2.size)
return false
val j = nodeset2.iterator
for (currentItem1 ← nodeset1) {
val currentItem2 = j.next()
if (! XFormsUtils.compareItems(currentItem1, currentItem2))
return false
}
true
}
// Iterator over a control's ancestors
class AncestorOrSelfIterator(start: XFormsControl) extends Iterator[XFormsControl] {
private var _next = start
def hasNext = _next ne null
def next() = {
val result = _next
_next = _next.parent
result
}
}
trait XFormsControlVisitorListener {
def startVisitControl(control: XFormsControl): Boolean
def endVisitControl(control: XFormsControl)
}
class XFormsControlVisitorAdapter extends XFormsControlVisitorListener {
def startVisitControl(control: XFormsControl) = true
def endVisitControl(control: XFormsControl) = ()
}
// Visit all the controls
def visitAllControls(containingDocument: XFormsContainingDocument, listener: XFormsControlVisitorListener): Unit =
visitAllControls(containingDocument.getControls.getCurrentControlTree, listener)
// Visit all the controls
def visitAllControls(tree: ControlTree, listener: XFormsControlVisitorListener): Unit =
visitSiblings(listener, tree.getChildren.asScala)
// Iterator over the given control and its descendants
case class ControlsIterator(private val start: XFormsControl, private val includeSelf: Boolean) extends Iterator[XFormsControl] {
private val children = start match {
case containerControl: XFormsContainerControl ⇒ containerControl.children.iterator
case control ⇒ Iterator.empty
}
private var descendants: Iterator[XFormsControl] = Iterator.empty
private def findNext(): XFormsControl =
if (descendants.hasNext)
// Descendants of current child
descendants.next()
else if (children.hasNext) {
// Move to next child
val next = children.next()
if (next.isInstanceOf[XFormsContainerControl])
descendants = ControlsIterator(next, includeSelf = false)
next
} else
null
private var current =
if (includeSelf)
start
else
findNext()
def next() = {
val result = current
current = findNext()
result
}
def hasNext = current ne null
}
// Evaluate the body with InstancesControls in scope
def withDynamicStateToRestore[T](instancesControls: InstancesControls, topLevel: Boolean = false)(body: ⇒ T) =
instancesControlsToRestore.withValue((instancesControls, topLevel))(body)
// Evaluate the body with InstancesControls in scope (Java callers)
def withDynamicStateToRestoreJava(instancesControls: InstancesControls, runnable: Runnable) =
withDynamicStateToRestore(instancesControls, topLevel = true)(runnable.run())
// Get state to restore
private def restoringDynamicState = instancesControlsToRestore.value
def restoringControl(effectiveId: String) = restoringDynamicState flatMap (_._1.controls.get(effectiveId))
def restoringInstancesJava = restoringDynamicState map (_._1.instancesJava) orNull
// Whether we are restoring state
def isRestoringDynamicState = restoringDynamicState exists (_._2)
// ThreadLocal for dynamic state restoration
private val instancesControlsToRestore = new DynamicVariable[(InstancesControls, Boolean)]
// Visit all the descendant controls of the given container control
// FIXME: Use ControlsIterator instead and then remove this when done
def visitControls(control: XFormsControl, listener: XFormsControlVisitorListener, includeCurrent: Boolean): Unit =
control match {
case containerControl: XFormsContainerControl ⇒
// Container itself
if (includeCurrent)
if (! listener.startVisitControl(containerControl))
return
// Children
visitSiblings(listener, containerControl.children)
// Container itself
if (includeCurrent)
listener.endVisitControl(containerControl)
case control ⇒
if (includeCurrent) {
listener.startVisitControl(control)
listener.endVisitControl(control)
}
}
private def visitSiblings(listener: XFormsControlVisitorListener, children: Seq[XFormsControl]): Unit =
for (currentControl ← children) {
if (listener.startVisitControl(currentControl)) {
currentControl match {
case container: XFormsContainerControl ⇒
visitSiblings(listener, container.children)
case nonContainer ⇒
// NOTE: Unfortunately we handle children actions of non container controls a bit differently
val childrenActions = nonContainer.getChildrenActions.asScala
if (childrenActions.nonEmpty)
visitSiblings(listener, childrenActions)
}
listener.endVisitControl(currentControl)
}
}
// Log a subtree of controls as XML
private def logTreeIfNeeded(message: String) = {
control: XFormsControl ⇒
if (XFormsProperties.getDebugLogging.contains("control-tree"))
control.containingDocument.getControls.getIndentedLogger.logDebug(message, control.toXMLString)
control
}
} | evlist/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/control/Controls.scala | Scala | lgpl-2.1 | 22,290 |
import java.util.Scanner
import java.io.File
import scala.collection.immutable.SortedMap
val in = new Scanner(new File("example.txt"))
var counts = SortedMap[String, Int]() withDefault (_ => 0)
while (in.hasNext) {
val key = in.next
val currentCount = counts(key)
counts = counts - key + (key -> (currentCount + 1))
}
for ((word, count) <- counts) println(word + ": " + count)
| demiazz/scala-impatient | chapter-04/exercise-04/main.scala | Scala | unlicense | 400 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
/**
* Build a map with String type of key, and it also supports either key case
* sensitive or insensitive.
*/
object StringKeyHashMap {
def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = if (caseSensitive) {
new StringKeyHashMap[T](identity)
} else {
new StringKeyHashMap[T](_.toLowerCase)
}
}
class StringKeyHashMap[T](normalizer: (String) => String) {
private val base = new collection.mutable.HashMap[String, T]()
def apply(key: String): T = base(normalizer(key))
def get(key: String): Option[T] = base.get(normalizer(key))
def put(key: String, value: T): Option[T] = base.put(normalizer(key), value)
def remove(key: String): Option[T] = base.remove(normalizer(key))
def iterator: Iterator[(String, T)] = base.toIterator
def clear(): Unit = base.clear()
}
| kimoonkim/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala | Scala | apache-2.0 | 1,654 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package extraction
package object innerfuns {
object trees extends Trees with inox.ast.SimpleSymbols {
case class Symbols(
functions: Map[Identifier, FunDef],
sorts: Map[Identifier, ADTSort]
) extends SimpleSymbols with InnerFunsAbstractSymbols {
override val symbols: this.type = this
}
override def mkSymbols(functions: Map[Identifier, FunDef], sorts: Map[Identifier, ADTSort]): Symbols = {
Symbols(functions, sorts)
}
object printer extends Printer { val trees: innerfuns.trees.type = innerfuns.trees }
}
def extractor(using inox.Context) = {
utils.DebugPipeline("FunctionClosure", FunctionClosure(trees, inlining.trees))
}
def fullExtractor(using inox.Context) = extractor andThen nextExtractor
def nextExtractor(using inox.Context) = inlining.fullExtractor
def phaseSemantics(using inox.Context): inox.SemanticsProvider { val trees: innerfuns.trees.type } = {
extraction.phaseSemantics(innerfuns.trees)(fullExtractor)
}
def nextPhaseSemantics(using inox.Context): inox.SemanticsProvider { val trees: inlining.trees.type } = {
inlining.phaseSemantics
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/extraction/innerfuns/package.scala | Scala | apache-2.0 | 1,207 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Usman Nisar, John Miller
* @version 1.3
* @date Mon May 6 10:50:37 EDT 2013
* @see LICENSE (MIT style license file).
*
* @see www2012.wwwconference.org/proceedings/proceedings/p949.pdf
*
* Graph Dual Simulation Using Immutable Sets
*/
package scalation.graphalytics
import scala.collection.immutable.{Set => SET}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSim` class provides an implementation for Dual Graph Simulation.
* @param g the data graph G(V, E, l)
* @param q the query graph Q(U, D, k)
*/
class DualSim (g: Graph, q: Graph)
extends GraphMatcher (g, q)
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Dual Graph Simulation pattern matching algorithm to find the mappings
* from the query graph 'q' to the data graph 'g'. These are represented by a
* multi-valued function 'phi' that maps each query graph vertex 'u' to a
* set of data graph vertices '{v}'.
*/
def mappings (): Array [SET [Int]] = nisarDualSim (feasibleMates ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given the mappings 'phi' produced by the 'feasibleMates' method,
* eliminate mappings 'u -> v' when (1) v's children fail to match u's
* or (2) v's parents fail to match u's.
* @param phi array of mappings from a query vertex u to { graph vertices v }
*/
def nisarDualSim (phi: Array [SET [Int]]): Array [SET [Int]] =
{
var alter = true
while (alter) { // check for matching children and parents
alter = false
// loop over query vertices u, data vertices v in phi(u), and u's children u_c
for (u <- qRange; v <- phi(u); u_c <- q.ch(u)) {
if ((g.ch(v) & phi(u_c)).isEmpty) { // v must have a child in phi(u_c)
phi(u) -= v // remove v due to lack of child match
alter = true
} // if
} //for
// loop over query vertices u, data vertices v in phi(u), and u's parents u_p
for (u <- qRange; v <- phi(u); u_p <- q.pa(u)) {
if ((g.pa(v) & phi(u_p)).isEmpty) { // v must have a parent in phi(u_p)
phi(u) -= v // remove v due to lack of parent match
alter = true
} // if
} //for
} // while
phi
} // nisarDualSim
} // DualSim class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSimTest` object is used to test the `DualSim` class.
* > run-main scalation.graphalytics.DualSimTest
*/
object DualSimTest extends App
{
val g = Graph.g1p
val q = Graph.q1p
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new DualSim (g, q)).test ("DualSim") // Dual Graph Simulation Pattern Matcher
} // DualSimTest object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSimTest2` object is used to test the `DualSim` class.
* > run-main scalation.graphalytics.DualSimTest2
*/
object DualSimTest2 extends App
{
val g = Graph.g2p
val q = Graph.q2p
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new DualSim (g, q)).test ("DualSim") // Dual Graph Simulation Pattern Matcher
} // DualSimTest2 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSimTest3` object is used to test the `DualSim` class. Read the
* query graph 'q' and data graph 'g' from files.
* > run-main scalation.graphalytics.DualSimTest3
*/
object DualSimTest3 extends App
{
val g = GraphIO ("gfile")
val q = GraphIO ("qfile")
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new DualSim (g, q)).test ("DualSim") // Dual Graph Simulation Pattern Matcher
} // DualSimTest3 object
| scalation/fda | scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/DualSim.scala | Scala | mit | 4,303 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.orm
import org.beangle.data.model.LongId
import org.beangle.data.model.pojo.Hierarchical
import org.beangle.data.model.pojo.Named
import org.beangle.data.model.Entity
import org.beangle.data.model.LongIdEntity
trait Menu extends Named with LongIdEntity{
var parent: Option[Menu] = None
}
abstract class AbstractMenu extends LongId with Menu {
@transient var someVar: String = _
var title: String = _
}
class UrlMenu extends AbstractMenu {
var url: String = _
}
| beangle/data | orm/src/test/scala/org/beangle/data/orm/Menu.scala | Scala | lgpl-3.0 | 1,219 |
package uk.co.bbc.redux
import scala.xml._
import scala.collection.immutable.Seq._
import scala.util.matching.Regex
object Schedule {
val diskRefUrl = new Regex("""\\/programme\\/(\\d+)\\/download\\/image-150.jpg""")
def createFromXMLResponse(xml:NodeSeq) : scala.collection.immutable.Seq[String] = {
xml \\\\ "img" flatMap {
node => node \\ "@src" text match {
case diskRefUrl(diskRef) => Some(diskRef)
case _ => None
}
}
}
}
| bbcsnippets/redux-client-scala | src/main/scala/uk/co/bbc/redux/Schedule.scala | Scala | apache-2.0 | 466 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.sentenceembedding.models
import breeze.linalg.DenseVector
import org.apache.spark.mllib.linalg.{Vectors, Vector => MLVector}
/**
* Represents a sentence embedding.
* @param id index of the sentence used to identify it
* @param sentence the text used to compute the sentence embedding
* @param embedding the computed sentence embedding
* @param cluster the cluster the sentence embedding was assigned to
*/
case class SentenceEmbedding(
id: Long,
sentence: Option[String] = None,
embedding: List[Double] = Nil,
cluster: Option[Int] = None,
tokens: List[String] = Nil
) {
/**
* Transforms this sentence embedding to a Breeze dense vector.
* @return Breeze dense vector containing the sentence embedding
*/
def toBreezeVector: DenseVector[Double] = {
DenseVector(embedding.toArray)
}
/**
* Transforms this sentence embedding to a Spark dense vector.
* @return Spark vector containing the sentence embedding
*/
def toSparkVector: MLVector = {
Vectors.dense(embedding.toArray)
}
}
| bpn1/ingestion | src/main/scala/de/hpi/ingestion/sentenceembedding/models/SentenceEmbedding.scala | Scala | apache-2.0 | 1,715 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.shp
import java.io.InputStream
import java.util.Collections
import org.geotools.data.shapefile.{ShapefileDataStore, ShapefileDataStoreFactory}
import org.geotools.data.{DataStoreFinder, Query}
import org.locationtech.geomesa.convert.{EnrichmentCache, EvaluationContext}
import org.locationtech.geomesa.convert2.AbstractConverter
import org.locationtech.geomesa.convert2.AbstractConverter.{BasicConfig, BasicField, BasicOptions}
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.CRS_EPSG_4326
import org.locationtech.geomesa.utils.io.{CloseWithLogging, PathUtils}
import org.locationtech.geomesa.utils.text.TextTools
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
class ShapefileConverter(sft: SimpleFeatureType, config: BasicConfig, fields: Seq[BasicField], options: BasicOptions)
extends AbstractConverter[SimpleFeature, BasicConfig, BasicField, BasicOptions](sft, config, fields, options) {
import org.locationtech.geomesa.convert.shp.ShapefileFunctionFactory.{InputSchemaKey, InputValuesKey}
override def createEvaluationContext(globalParams: Map[String, Any]): EvaluationContext = {
// inject placeholders for shapefile attributes into the evaluation context
// used for accessing shapefile properties by name in ShapefileFunctionFactory
val shpParams = Map(InputSchemaKey -> Array.empty[String], InputValuesKey -> Array.empty[Any])
super.createEvaluationContext(globalParams ++ shpParams)
}
// noinspection ScalaDeprecation
override def createEvaluationContext(
globalParams: Map[String, Any],
caches: Map[String, EnrichmentCache],
counter: org.locationtech.geomesa.convert.Counter): EvaluationContext = {
// inject placeholders for shapefile attributes into the evaluation context
// used for accessing shapefile properties by name in ShapefileFunctionFactory
val shpParams = Map(InputSchemaKey -> Array.empty[String], InputValuesKey -> Array.empty[Any])
super.createEvaluationContext(globalParams ++ shpParams, caches, counter)
}
override protected def parse(is: InputStream, ec: EvaluationContext): CloseableIterator[SimpleFeature] = {
CloseWithLogging(is) // we don't use the input stream, just close it
val path = ec.getInputFilePath.getOrElse {
throw new IllegalArgumentException(s"Shapefile converter requires '${EvaluationContext.InputFilePathKey}' " +
"to be set in the evaluation context")
}
val ds = ShapefileConverter.getDataStore(path)
val schema = ds.getSchema()
val names = Array.tabulate(schema.getAttributeCount)(i => schema.getDescriptor(i).getLocalName)
val array = Array.ofDim[Any](schema.getAttributeCount + 1)
val i = ec.indexOf(InputSchemaKey)
val j = ec.indexOf(InputValuesKey)
if (i == -1 || j == -1) {
logger.warn("Input schema not found in evaluation context, shapefile functions " +
s"${TextTools.wordList(new ShapefileFunctionFactory().functions.map(_.names.head))} will not be available")
} else {
ec.set(i, names)
ec.set(j, array)
}
val q = new Query
// Only ask to reproject if the Shapefile has a CRS
if (ds.getSchema.getCoordinateReferenceSystem != null) {
q.setCoordinateSystemReproject(CRS_EPSG_4326)
} else {
logger.warn(s"Shapefile does not have CRS info")
}
val reader = CloseableIterator(ds.getFeatureSource.getReader(q)).map { f => ec.line += 1; f }
CloseableIterator(reader, { CloseWithLogging(reader); ds.dispose() })
}
override protected def values(parsed: CloseableIterator[SimpleFeature],
ec: EvaluationContext): CloseableIterator[Array[Any]] = {
val i = ec.indexOf(InputValuesKey)
val j = ec.indexOf(InputSchemaKey)
if (i == -1 || j == -1) {
logger.warn("Input schema not found in evaluation context, shapefile functions " +
s"${TextTools.wordList(new ShapefileFunctionFactory().functions.map(_.names.head))} will not be available")
}
if (i == -1) {
var array: Array[Any] = null
parsed.map { feature =>
if (array == null) {
array = Array.ofDim(feature.getAttributeCount + 1)
}
var i = 1
while (i < array.length) {
array(i) = feature.getAttribute(i - 1)
i += 1
}
array(0) = feature.getID
array
}
} else {
val array = ec.get(i).asInstanceOf[Array[Any]]
parsed.map { feature =>
var i = 1
while (i < array.length) {
array(i) = feature.getAttribute(i - 1)
i += 1
}
array(0) = feature.getID
array
}
}
}
}
object ShapefileConverter {
/**
* Creates a URL, needed for the shapefile data store
*
* @param path input path
* @return
*/
def getDataStore(path: String): ShapefileDataStore = {
val params = Collections.singletonMap(ShapefileDataStoreFactory.URLP.key, PathUtils.getUrl(path))
val ds = DataStoreFinder.getDataStore(params).asInstanceOf[ShapefileDataStore]
if (ds == null) {
throw new IllegalArgumentException(s"Could not read shapefile using path '$path'")
}
ds
}
}
| elahrvivaz/geomesa | geomesa-convert/geomesa-convert-shp/src/main/scala/org/locationtech/geomesa/convert/shp/ShapefileConverter.scala | Scala | apache-2.0 | 5,745 |
package controllers.registration.attachments
import itutil.ControllerISpec
import models.{ApplicantDetails, TransactorDetails}
import models.api._
import play.api.libs.json.Json
import play.api.test.Helpers._
class MultipleDocumentsRequiredControllerISpec extends ControllerISpec {
val showUrl: String = routes.MultipleDocumentsRequiredController.show.url
s"GET $showUrl" must {
"return OK" in {
given()
.user.isAuthorised()
.audit.writesAudit()
.audit.writesAuditMerged()
.vatScheme.has("attachments", Json.toJson(Attachments(Some(Post), List[AttachmentType](IdentityEvidence, VAT2))))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData))
val res = buildClient(showUrl).get()
whenReady(res) { result =>
result.status mustBe OK
}
}
"return OK for a transactor" in {
given()
.user.isAuthorised()
.audit.writesAudit()
.audit.writesAuditMerged()
.vatScheme.has("attachments", Json.toJson(Attachments(Some(Post), List[AttachmentType](IdentityEvidence, VAT2))))
.registrationApi.getSection[TransactorDetails](Some(validTransactorDetails))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(isTransactor = true)))
.vatScheme.has("applicant-details", Json.toJson(validFullApplicantDetails)(ApplicantDetails.writes))
val res = buildClient(showUrl).get()
whenReady(res) { result =>
result.status mustBe OK
}
}
}
}
| hmrc/vat-registration-frontend | it/controllers/registration/attachments/MultipleDocumentsRequiredControllerISpec.scala | Scala | apache-2.0 | 1,581 |
package com.sksamuel.elastic4s.requests.searches
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.{ElasticError, ElasticRequest, Handler, HttpEntity, HttpResponse, ResponseHandler}
case class ClearScrollResponse(succeeded: Boolean, num_freed: Int)
object SearchScrollHandlers extends SearchScrollHandlers
trait SearchScrollHandlers {
implicit object ClearScrollHandler extends Handler[ClearScrollRequest, ClearScrollResponse] {
override def responseHandler: ResponseHandler[ClearScrollResponse] = new ResponseHandler[ClearScrollResponse] {
override def handle(response: HttpResponse): Either[ElasticError, ClearScrollResponse] =
response.statusCode match {
case 200 =>
Right(ResponseHandler.fromResponse[ClearScrollResponse](response))
case _ =>
Left(ElasticError.parse(response))
}
}
override def build(request: ClearScrollRequest): ElasticRequest = {
val (method, endpoint) = ("DELETE", s"/_search/scroll/")
val body = ClearScrollContentFn(request).string()
logger.debug("Executing clear scroll: " + body)
val entity = HttpEntity(body, "application/json")
ElasticRequest(method, endpoint, entity)
}
}
implicit object SearchScrollHandler extends Handler[SearchScrollRequest, SearchResponse] {
override def responseHandler: ResponseHandler[SearchResponse] = new ResponseHandler[SearchResponse] {
override def handle(response: HttpResponse): Either[ElasticError, SearchResponse] = response.statusCode match {
case 200 => Right(ResponseHandler.fromResponse[SearchResponse](response))
case _ => Left(ElasticError.parse(response))
}
}
override def build(req: SearchScrollRequest): ElasticRequest = {
val body = SearchScrollBuilderFn(req).string()
logger.debug("Executing search scroll: " + body)
val entity = HttpEntity(body, "application/json")
ElasticRequest("POST", "/_search/scroll", entity)
}
}
}
object SearchScrollBuilderFn {
def apply(req: SearchScrollRequest): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
req.keepAlive.foreach(builder.field("scroll", _))
builder.field("scroll_id", req.id)
builder.endObject()
}
}
object ClearScrollContentFn {
def apply(req: ClearScrollRequest): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.array("scroll_id", req.ids.toArray)
builder.endObject()
}
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/SearchScrollHandlers.scala | Scala | apache-2.0 | 2,531 |
package hello
import org.springframework.boot.SpringApplication
/**
* Sample application for running springboot with gradle and scala stack
* Base repo : https://github.com/sithu/HelloScala
* Original author sithu
* Via Gradle: gradle bootRun
*
* @author harsh00008
* @since 1.0
*/
object HelloWebApplication {
def main(args: Array[String]) {
SpringApplication.run(classOf[HelloConfig]);
}
}
| harsh00008/hello-world-scala | src/main/scala/hello/HelloWebApplication.scala | Scala | mit | 407 |
package sandbox.hashing
import java.lang.Long.rotateLeft
object MurmurHash3 {
private val c1: Long = 0x87c37b91114253d5L
private val c2: Long = 0x4cf5ad432745937fL
def getLongLittleEndian(buf: Array[Byte], offset: Int): Long = {
(buf(offset + 7).toLong << 56) |
((buf(offset + 6) & 0xffL) << 48) |
((buf(offset + 5) & 0xffL) << 40) |
((buf(offset + 4) & 0xffL) << 32) |
((buf(offset + 3) & 0xffL) << 24) |
((buf(offset + 2) & 0xffL) << 16) |
((buf(offset + 1) & 0xffL) << 8) |
buf(offset) & 0xffL
}
def fmix64(l: Long): Long = {
var k = l
k ^= k >>> 33
k *= 0xff51afd7ed558ccdL
k ^= k >>> 33
k *= 0xc4ceb9fe1a85ec53L
k ^= k >>> 33
k
}
def murmurhash3_x64_128(key: Array[Byte], offset: Int, len: Int, seed: Int): (Long, Long) = {
var h1: Long = seed & 0x00000000FFFFFFFFL
var h2: Long = seed & 0x00000000FFFFFFFFL
val roundedEnd = offset + (len & 0xFFFFFFF0); // round down to 16 byte block
var i = offset
while (i < roundedEnd) {
var k1 = getLongLittleEndian(key, i)
var k2 = getLongLittleEndian(key, i + 8)
k1 *= c1; k1 = rotateLeft(k1, 31); k1 *= c2; h1 ^= k1
h1 = rotateLeft(h1, 27); h1 += h2; h1 = h1 * 5 + 0x52dce729
k2 *= c2; k2 = rotateLeft(k2, 33); k2 *= c1; h2 ^= k2
h2 = rotateLeft(h2, 31); h2 += h1; h2 = h2 * 5 + 0x38495ab5
i += 16
}
var k1: Long = 0
var k2: Long = 0
val lenVar = len & 15
if (lenVar == 15) k2 = (key(roundedEnd + 14) & 0xffL) << 48
if (lenVar >= 14) k2 |= (key(roundedEnd + 13) & 0xffL) << 40
if (lenVar >= 13) k2 |= (key(roundedEnd + 12) & 0xffL) << 32
if (lenVar >= 12) k2 |= (key(roundedEnd + 11) & 0xffL) << 24
if (lenVar >= 11) k2 |= (key(roundedEnd + 10) & 0xffL) << 16
if (lenVar >= 10) k2 |= (key(roundedEnd + 9) & 0xffL) << 8
if (lenVar >= 9) {
k2 |= (key(roundedEnd + 8) & 0xffL)
k2 *= c2
k2 = rotateLeft(k2, 33)
k2 *= c1
h2 ^= k2
}
if (lenVar >= 8) k1 = key(roundedEnd + 7).toLong << 56
if (lenVar >= 7) k1 |= (key(roundedEnd + 6) & 0xffL) << 48
if (lenVar >= 6) k1 |= (key(roundedEnd + 5) & 0xffL) << 40
if (lenVar >= 5) k1 |= (key(roundedEnd + 4) & 0xffL) << 32
if (lenVar >= 4) k1 |= (key(roundedEnd + 3) & 0xffL) << 24
if (lenVar >= 3) k1 |= (key(roundedEnd + 2) & 0xffL) << 16
if (lenVar >= 2) k1 |= (key(roundedEnd + 1) & 0xffL) << 8
if (lenVar >= 1) {
k1 |= (key(roundedEnd) & 0xffL)
k1 *= c1
k1 = rotateLeft(k1, 31)
k1 *= c2
h1 ^= k1
}
h1 ^= len; h2 ^= len
h1 += h2
h2 += h1
h1 = fmix64(h1)
h2 = fmix64(h2)
h1 += h2
h2 += h1
(h1, h2)
}
}
| alexandrnikitin/bloom-filter-scala | sandbox/src/main/scala/sandbox/hashing/MurmurHash3.scala | Scala | mit | 2,746 |
package org.scribe.oauth
import org.scribe.model._
import org.scribe.builder.api.DefaultApi20
import org.scribe.extractors.AccessTokenExtractor
import org.specs2.mutable.Specification
import org.specs2.mock._
class ProxyAuth20WithHeadersServiceSpec extends Specification with Mockito {
val mockApi = mock[DefaultApi20]
mockApi.getAccessTokenExtractor returns mock[AccessTokenExtractor]
def createTestInstance(fakeRequest: ProxyOAuthRequest)(getParameter: Boolean = false, addGrantType: Boolean = false, scope: Option[String] = None) = {
val mockCfg = mock[OAuthConfig]
mockCfg.hasScope returns scope.isDefined
if (mockCfg.hasScope) {
mockCfg.getScope returns scope.get
}
new ProxyAuth20WithHeadersServiceImpl(
mockApi, mockCfg, 1, 2, null, 0, getParameter, addGrantType) {
override private[oauth] def createRequest: ProxyOAuthRequest = fakeRequest
override def addHeaders(requestToken: Token, api: DefaultApi20, config: OAuthConfig): List[(String, String)] = {
List("foo" -> "bar")
}
}
}
"The OAuth20-With-Headers service" should {
"by default, add no headers" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = new ProxyAuth20WithHeadersServiceImpl(
mockApi, mock[OAuthConfig], 1, 2, null, 0, false, false) {
override private[oauth] def createRequest: ProxyOAuthRequest = r
}
i.getAccessToken(mock[Token], mock[Verifier])
there was no(r).addHeader(anyString, anyString)
}
}
"The OAuth20-With-Headers service - POST body mode" should {
"support adding the grant_type to a request" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = createTestInstance(r)(false, true)
i.getAccessToken(mock[Token], mock[Verifier])
there was one(r).addBodyParameter("grant_type", "authorization_code")
}
"support adding the scope to a request" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = createTestInstance(r)(false, false, Some("test-scope-variable"))
i.getAccessToken(mock[Token], mock[Verifier])
there was one(r).addBodyParameter("scope", "test-scope-variable")
}
"support adding headers to a request" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = createTestInstance(r)()
i.getAccessToken(mock[Token], mock[Verifier])
there was one(r).addHeader("foo", "bar")
}
}
"The OAuth20-With-Headers service - query-string mode" should {
"support adding the grant_type to a request" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = createTestInstance(r)(true, true)
i.getAccessToken(mock[Token], mock[Verifier])
there was one(r).addQuerystringParameter("grant_type", "authorization_code")
}
"support adding the scope to a request" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = createTestInstance(r)(true, false, Some("test-scope-variable"))
i.getAccessToken(mock[Token], mock[Verifier])
there was one(r).addQuerystringParameter("scope", "test-scope-variable")
}
"support adding headers to a request" in {
val r = mock[ProxyOAuthRequest]
r.send returns mock[Response]
val i = createTestInstance(r)(true)
i.getAccessToken(mock[Token], mock[Verifier])
there was one(r).addHeader("foo", "bar")
}
}
}
| themillhousegroup/pac4j-underarmour | src/test/scala/org/scribe/oauth/ProxyAuth20WithHeadersServiceSpec.scala | Scala | mit | 3,512 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager.utils
import kafka.manager.{Kafka_0_8_1_1, ClusterConfig}
import org.scalatest.{Matchers, FunSuite}
/**
* @author hiral
*/
class TestClusterConfig extends FunSuite with Matchers {
test("invalid name") {
intercept[IllegalArgumentException] {
ClusterConfig("qa!","0.8.1.1","localhost",jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
}
}
test("invalid kafka version") {
intercept[IllegalArgumentException] {
ClusterConfig("qa","0.8.1","localhost:2181",jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
}
}
test("serialize and deserialize") {
val cc = ClusterConfig("qa","0.8.2.0","localhost:2181", jmxEnabled = true, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
val serialize: String = ClusterConfig.serialize(cc)
val deserialize = ClusterConfig.deserialize(serialize)
assert(deserialize.isSuccess === true)
assert(cc == deserialize.get)
}
test("serialize and deserialize +jmx credentials") {
val cc = ClusterConfig("qa","0.8.2.0","localhost:2181", jmxEnabled = true, jmxUser = Some("mario"), jmxPass = Some("rossi"), pollConsumers = true, filterConsumers = true)
val serialize: String = ClusterConfig.serialize(cc)
val deserialize = ClusterConfig.deserialize(serialize)
assert(deserialize.isSuccess === true)
assert(cc == deserialize.get)
}
test("deserialize without version and jmxEnabled") {
val cc = ClusterConfig("qa","0.8.2.0","localhost:2181", jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
val serialize: String = ClusterConfig.serialize(cc)
val noverison = serialize.replace(""","kafkaVersion":"0.8.2.0"""","")
assert(!noverison.contains("kafkaVersion"))
val deserialize = ClusterConfig.deserialize(noverison)
assert(deserialize.isSuccess === true)
assert(cc.copy(version = Kafka_0_8_1_1) == deserialize.get)
}
test("deserialize from 0.8.2-beta as 0.8.2.0") {
val cc = ClusterConfig("qa","0.8.2-beta","localhost:2181", jmxEnabled = false, pollConsumers = true, filterConsumers = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None)
val serialize: String = ClusterConfig.serialize(cc)
val noverison = serialize.replace(""","kafkaVersion":"0.8.2.0"""",""","kafkaVersion":"0.8.2-beta"""")
val deserialize = ClusterConfig.deserialize(noverison)
assert(deserialize.isSuccess === true)
assert(cc == deserialize.get)
}
}
| zeph/kafka-manager | test/kafka/manager/utils/TestClusterConfig.scala | Scala | apache-2.0 | 2,714 |
package main.scala.projectEulerScala
import scala.collection.immutable.HashSet
// 748317
object P37_TruncatablePrimes {
val primeStream: Stream[Int] = 2 #:: Stream.from(3, 2).filter { i =>
primeStream.takeWhile(j => j * j <= i).forall(j => i % j > 0)
}
val primes = HashSet() ++ (0 to 70000).map(primeStream.drop(_).head)
def stringIsPrime(s: String): Boolean = primes.contains(s.toInt)
def isTruncatablePrime(n: Int): Boolean = {
val ns = n.toString
ns.tails.filter(_.length > 0).forall(stringIsPrime) &&
ns.inits.filter(_.length > 0).forall(stringIsPrime)
}
def main(args: Array[String]) {
val tp = primes.filter(_ > 7).filter(isTruncatablePrime)
println(tp)
println(tp.sum)
}
}
| rck109d/projectEuler | src/main/scala/projectEulerScala/P37_TruncatablePrimes.scala | Scala | lgpl-3.0 | 732 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.server.protocol.swank
import java.io.File
import org.ensime.sexp._
import org.ensime.api._
import org.ensime.util.{ EnsimeSpec, EscapingStringInterpolation }
import org.scalactic.source.Position
class SwankFormatsSpec extends EnsimeSpec with EnsimeTestData {
import SwankFormats._
import SwankTestData._
import EscapingStringInterpolation._
def marshal(value: EnsimeServerMessage, via: Option[String])(implicit p: Position): Unit = {
val envelope = value match {
case r: RpcResponse => RpcResponseEnvelope(Some(666), value)
case e: EnsimeEvent => RpcResponseEnvelope(None, value)
}
val sexp = envelope.toSexp match {
case SexpList(
SexpSymbol(":return") ::
SexpList(SexpSymbol(":ok") :: payload :: Nil) ::
SexpNumber(callId) :: Nil
) if callId == 666 => payload
case payload => payload
}
via match {
case None => println(s"$value = ${sexp.compactPrint}")
// using String form because SexpSymbol("nil") for BasicTypeHint is not commutative
case Some(expected) => sexp.compactPrint shouldBe expected
}
}
def marshal(value: EnsimeServerMessage, via: String)(implicit p: Position): Unit = marshal(value, Some(via))
def unmarshal(from: String, to: RpcRequest)(implicit p: Position): Unit = {
val sexp = s"(:swank-rpc ${from} 666)"
//println(sexp + " => " + sexp.parseSexp)
sexp.parseSexp.convertTo[RpcRequestEnvelope].req shouldBe to
}
implicit def toFile(raw: RawFile): File = raw.file.toFile
"SWANK Formats" should "unmarshal startup messages" in {
unmarshal(
"(swank:connection-info)",
ConnectionInfoReq: RpcRequest
)
}
it should "unmarshal RpcSearchRequests" in {
unmarshal(
"""(swank:public-symbol-search ("foo" "bar") 10)""",
PublicSymbolSearchReq(List("foo", "bar"), 10): RpcRequest
)
unmarshal(
s"""(swank:import-suggestions "$file1" 1 ("foo" "bar") 10)""",
ImportSuggestionsReq(Left(file1), 1, List("foo", "bar"), 10): RpcRequest
)
}
it should "unmarshal RpcAnalyserRequests" in {
unmarshal(
s"""(swank:remove-file "$file1")""",
RemoveFileReq(file1): RpcRequest
)
unmarshal(
s"""(swank:typecheck-file (:file "$file1" :contents "{/* code here */}" :contents-in "$file2"))""",
TypecheckFileReq(sourceFileInfo): RpcRequest
)
unmarshal(
s"""(swank:typecheck-files ("$file1" "$file2"))""",
TypecheckFilesReq(List(Left(file1), Left(file2))): RpcRequest
)
unmarshal(
s"""(swank:typecheck-files ((:file "$file1") (:file "$file2" :contents "xxx")))""",
TypecheckFilesReq(List(Right(SourceFileInfo(file1)), Right(SourceFileInfo(file2, Some("xxx"), None)))): RpcRequest
)
unmarshal(
"""(swank:unload-all)""",
UnloadAllReq: RpcRequest
)
unmarshal(
"""(swank:typecheck-all)""",
TypecheckAllReq: RpcRequest
)
unmarshal(
s"""(swank:doc-uri-at-point "$file1" (1 10))""",
DocUriAtPointReq(Left(file1), OffsetRange(1, 10)): RpcRequest
)
unmarshal(
s"""(swank:doc-uri-at-point (:file "$file1" :contents-in "$file2") (1 10))""",
DocUriAtPointReq(Right(SourceFileInfo(file1, None, Some(file2))), OffsetRange(1, 10)): RpcRequest
)
unmarshal(
s"""(swank:doc-uri-for-symbol "foo.bar" "Baz" nil)""",
DocUriForSymbolReq("foo.bar", Some("Baz"), None): RpcRequest
)
unmarshal(
s"""(swank:completions (:file "$file1" :contents "{/* code here */}" :contents-in "$file2") 10 100 t nil)""",
CompletionsReq(sourceFileInfo, 10, 100, true, false): RpcRequest
)
unmarshal(
"""(swank:package-member-completion "foo" "bar")""",
PackageMemberCompletionReq("foo", "bar"): RpcRequest
)
unmarshal(
s"""(swank:uses-of-symbol-at-point "$file1" 100)""",
UsesOfSymbolAtPointReq(Left(file1), 100): RpcRequest
)
unmarshal(
s"""(swank:type-by-name "foo.bar")""",
TypeByNameReq("foo.bar"): RpcRequest
)
unmarshal(
s"""(swank:type-by-name-at-point "foo.bar" "$file1" (1 10))""",
TypeByNameAtPointReq("foo.bar", Left(file1), OffsetRange(1, 10)): RpcRequest
)
unmarshal(
s"""(swank:type-at-point "$file1" (1 100))""",
TypeAtPointReq(Left(file1), OffsetRange(1, 100)): RpcRequest
)
unmarshal(
s"""(swank:inspect-type-at-point "$file1" (1 100))""",
InspectTypeAtPointReq(Left(file1), OffsetRange(1, 100)): RpcRequest
)
unmarshal(
s"""(swank:inspect-type-by-name "foo.Bar")""",
InspectTypeByNameReq("foo.Bar"): RpcRequest
)
unmarshal(
s"""(swank:symbol-at-point "$file1" 101)""",
SymbolAtPointReq(Left(file1), 101): RpcRequest
)
unmarshal(
s"""(swank:symbol-by-name "foo.Bar" "baz" nil)""",
SymbolByNameReq("foo.Bar", Some("baz"), None): RpcRequest
)
unmarshal(
s"""(swank:inspect-package-by-path "foo.bar")""",
InspectPackageByPathReq("foo.bar"): RpcRequest
)
unmarshal(
s"""(swank:diff-refactor 1 (end 100 file "$file1" newName "bar" start 1) nil)""",
RefactorReq(1, RenameRefactorDesc("bar", file1, 1, 100), false): RpcRequest
)
unmarshal(
s"""(swank:symbol-designations "$file1" 1 100 (object val))""",
SymbolDesignationsReq(
Left(file1), 1, 100,
List(ObjectSymbol, ValSymbol)
): RpcRequest
)
unmarshal(
s"""(swank:symbol-designations (:file "$file1") 1 100 (object val))""",
SymbolDesignationsReq(
Right(SourceFileInfo(file1, None, None)), 1, 100,
List(ObjectSymbol, ValSymbol)
): RpcRequest
)
unmarshal(
s"""(swank:expand-selection "$file1" 100 200)""",
ExpandSelectionReq(file1, 100, 200): RpcRequest
)
unmarshal(
s"""(swank:implicit-info "$file1" (0 123))""",
ImplicitInfoReq(Left(file1), OffsetRange(0, 123))
)
unmarshal(
s"""(swank:structure-view (:file "$file1" :contents "{/* code here */}" :contents-in "$file2"))""",
StructureViewReq(sourceFileInfo): RpcRequest
)
unmarshal(
s"""(swank:ast-at-point (:file "$file1" :contents "{/* code here */}" :contents-in "$file2") (1 100))""",
AstAtPointReq(sourceFileInfo, OffsetRange(1, 100)): RpcRequest
)
unmarshal(
s"""(swank:unload-file (:file "$file1"))""",
UnloadFileReq(sourceFileInfo2): RpcRequest
)
}
it should "unmarshal RpcDebugRequests" in {
unmarshal(
"""(swank:debug-active-vm)""",
DebugActiveVmReq: RpcRequest
)
unmarshal(
"""(swank:debug-attach "mylovelyhorse" "13")""",
DebugAttachReq("mylovelyhorse", "13"): RpcRequest
)
unmarshal(
"""(swank:debug-stop)""",
DebugStopReq: RpcRequest
)
unmarshal(
s"""(swank:debug-set-break "$file1" 13)""",
DebugSetBreakReq(file1, 13): RpcRequest
)
unmarshal(
s"""(swank:debug-clear-break "$file1" 13)""",
DebugClearBreakReq(file1, 13): RpcRequest
)
unmarshal(
s"""(swank:debug-clear-all-breaks)""",
DebugClearAllBreaksReq: RpcRequest
)
unmarshal(
s"""(swank:debug-list-breakpoints)""",
DebugListBreakpointsReq: RpcRequest
)
unmarshal(
s"""(swank:debug-run)""",
DebugRunReq: RpcRequest
)
unmarshal(
s"""(swank:debug-continue "13")""",
DebugContinueReq(dtid): RpcRequest
)
unmarshal(
s"""(swank:debug-step "13")""",
DebugStepReq(dtid): RpcRequest
)
unmarshal(
s"""(swank:debug-next "13")""",
DebugNextReq(dtid): RpcRequest
)
unmarshal(
s"""(swank:debug-step-out "13")""",
DebugStepOutReq(dtid): RpcRequest
)
unmarshal(
s"""(swank:debug-locate-name "13" "foo")""",
DebugLocateNameReq(dtid, "foo"): RpcRequest
)
unmarshal(
s"""(swank:debug-value (:type element :object-id "13" :index 14))""",
DebugValueReq(debugLocationArray): RpcRequest
)
unmarshal(
s"""(swank:debug-to-string "13" (:type element :object-id "13" :index 14))""",
DebugToStringReq(dtid, debugLocationArray): RpcRequest
)
unmarshal(
s"""(swank:debug-set-value (:type element :object-id "13" :index 14) "bar")""",
DebugSetValueReq(debugLocationArray, "bar"): RpcRequest
)
unmarshal(
s"""(swank:debug-backtrace "13" 100 200)""",
DebugBacktraceReq(dtid, 100, 200): RpcRequest
)
}
it should "marshal EnsimeGeneralEvent as EnsimeEvent" in {
marshal(
SendBackgroundMessageEvent("ABCDEF", 1): EnsimeEvent,
"""(:background-message 1 "ABCDEF")"""
)
marshal(
AnalyzerReadyEvent: EnsimeEvent,
"(:compiler-ready)"
)
marshal(
FullTypeCheckCompleteEvent: EnsimeEvent,
"(:full-typecheck-finished)"
)
marshal(
IndexerReadyEvent: EnsimeEvent,
"(:indexer-ready)"
)
marshal(
NewScalaNotesEvent(
isFull = false,
List(Note("foo.scala", "testMsg", NoteWarn, 50, 55, 77, 5))
): EnsimeEvent,
"""(:scala-notes (:notes ((:file "foo.scala" :msg "testMsg" :severity warn :beg 50 :end 55 :line 77 :col 5))))"""
)
marshal(
ClearAllScalaNotesEvent: EnsimeEvent,
"(:clear-all-scala-notes)"
)
}
it should "marshal DebugEvent as EnsimeEvent" in {
marshal(
DebugOutputEvent("XXX"): EnsimeEvent,
"""(:debug-event (:type output :body "XXX"))"""
)
marshal(
DebugStepEvent(DebugThreadId(207), "threadNameStr", sourcePos1.file, sourcePos1.line): EnsimeEvent,
s"""(:debug-event (:type step :thread-id "207" :thread-name "threadNameStr" :file "$file1" :line 57))"""
)
marshal(
DebugBreakEvent(DebugThreadId(209), "threadNameStr", sourcePos1.file, sourcePos1.line): EnsimeEvent,
s"""(:debug-event (:type breakpoint :thread-id "209" :thread-name "threadNameStr" :file "$file1" :line 57))"""
)
marshal(
DebugVmStartEvent: EnsimeEvent,
"""(:debug-event (:type start))"""
)
marshal(
DebugVmDisconnectEvent: EnsimeEvent,
"""(:debug-event (:type disconnect))"""
)
marshal(
DebugExceptionEvent(33L, dtid, "threadNameStr", Some(sourcePos1.file), Some(sourcePos1.line)): EnsimeEvent,
s"""(:debug-event (:type exception :exception 33 :thread-id "13" :thread-name "threadNameStr" :file "$file1" :line 57))"""
)
marshal(
DebugExceptionEvent(33L, dtid, "threadNameStr", None, None): EnsimeEvent,
"""(:debug-event (:type exception :exception 33 :thread-id "13" :thread-name "threadNameStr"))"""
)
marshal(
DebugThreadStartEvent(dtid): EnsimeEvent,
"""(:debug-event (:type threadStart :thread-id "13"))"""
)
marshal(
DebugThreadDeathEvent(dtid): EnsimeEvent,
"""(:debug-event (:type threadDeath :thread-id "13"))"""
)
}
it should "marshal DebugLocation" in {
marshal(
DebugObjectReference(57L): DebugLocation,
"""(:type reference :object-id "57")"""
)
marshal(
DebugArrayElement(DebugObjectId(58L), 2): DebugLocation,
"""(:type element :object-id "58" :index 2)"""
)
marshal(
DebugObjectField(DebugObjectId(58L), "fieldName"): DebugLocation,
"""(:type field :object-id "58" :field "fieldName")"""
)
marshal(
DebugStackSlot(DebugThreadId(27), 12, 23): DebugLocation,
"""(:type slot :thread-id "27" :frame 12 :offset 23)"""
)
}
it should "marshal DebugValue" in {
marshal(
DebugPrimitiveValue("summaryStr", "typeNameStr"): DebugValue,
"""(:val-type prim :summary "summaryStr" :type-name "typeNameStr")"""
)
marshal(
DebugStringInstance("summaryStr", List(debugClassField), "typeNameStr", DebugObjectId(5L)): DebugValue,
"""(:val-type str :summary "summaryStr" :fields ((:index 19 :name "nameStr" :type-name "typeNameStr" :summary "summaryStr")) :type-name "typeNameStr" :object-id "5")"""
)
marshal(
DebugObjectInstance("summaryStr", List(debugClassField), "typeNameStr", DebugObjectId(5L)): DebugValue,
"""(:val-type obj :summary "summaryStr" :fields ((:index 19 :name "nameStr" :type-name "typeNameStr" :summary "summaryStr")) :type-name "typeNameStr" :object-id "5")"""
)
marshal(
DebugNullValue("typeNameStr"): DebugValue,
"""(:val-type null :type-name "typeNameStr")"""
)
marshal(
DebugArrayInstance(3, "typeName", "elementType", DebugObjectId(5L)): DebugValue,
"""(:val-type arr :length 3 :type-name "typeName" :element-type-name "elementType" :object-id "5")"""
)
marshal(
debugClassField: DebugClassField,
"""(:index 19 :name "nameStr" :type-name "typeNameStr" :summary "summaryStr")"""
)
marshal(
debugStackLocal1: DebugStackLocal,
"""(:index 3 :name "name1" :summary "summary1" :type-name "type1")"""
)
marshal(
debugStackFrame: DebugStackFrame,
s"""(:index 7 :locals ((:index 3 :name "name1" :summary "summary1" :type-name "type1") (:index 4 :name "name2" :summary "summary2" :type-name "type2")) :num-args 4 :class-name "class1" :method-name "method1" :pc-location (:file "$file1" :line 57) :this-object-id "7")"""
)
marshal(
DebugBacktrace(List(debugStackFrame), dtid, "thread1"): DebugBacktrace,
s"""(:frames ((:index 7 :locals ((:index 3 :name "name1" :summary "summary1" :type-name "type1") (:index 4 :name "name2" :summary "summary2" :type-name "type2")) :num-args 4 :class-name "class1" :method-name "method1" :pc-location (:file "$file1" :line 57) :this-object-id "7")) :thread-id "13" :thread-name "thread1")"""
)
marshal(
sourcePos1: SourcePosition,
s"""(:type line :file "$file1" :line 57)"""
)
marshal(
sourcePos2: SourcePosition,
s"""(:type line :file "$file1" :line 59)"""
)
marshal(
sourcePos3: SourcePosition,
"(:type empty)"
)
marshal(
sourcePos4: SourcePosition,
s"""(:type offset :file "$file1" :offset 456)"""
)
marshal(
breakPoint1: Breakpoint,
s"""(:file "$file1" :line 57)"""
)
marshal(
BreakpointList(List(breakPoint1), List(breakPoint2)): BreakpointList,
s"""(:active ((:file "$file1" :line 57)) :pending ((:file "$file1" :line 59)))"""
)
marshal(
DebugVmSuccess(): DebugVmStatus,
"""(:type success :status "success")"""
)
marshal(
DebugVmError(303, "xxxx"): DebugVmStatus,
"""(:type error :error-code 303 :details "xxxx" :status "error")"""
)
}
it should "marshal various informational types" in {
marshal(
note1: Note,
note1Str
)
marshal(
completionInfo: CompletionInfo,
"""(:type-info (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1") :name "name" :relevance 90 :to-insert "BAZ")"""
)
marshal(
completionInfo2: CompletionInfo,
"""(:name "nam" :relevance 91 :is-infix t)"""
)
marshal(
CompletionInfoList("fooBar", List(completionInfo)): CompletionInfoList,
"""(:prefix "fooBar" :completions ((:type-info (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1") :name "name" :relevance 90 :to-insert "BAZ")))"""
)
marshal(
SymbolInfo("name", "localName", None, typeInfo): SymbolInfo,
"""(:name "name" :local-name "localName" :type (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1"))"""
)
marshal(
NamedTypeMemberInfo("typeX", typeInfo, None, None, DeclaredAs.Method): EntityInfo,
"""(:name "typeX" :type (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1") :decl-as method)"""
)
marshal(
entityInfo: EntityInfo,
entityInfoStr
)
marshal(
entityInfoTypeParams: EntityInfo,
entityInfoTypeParamsStr
)
marshal(
typeInfo: EntityInfo,
typeInfoStr
)
marshal(
packageInfo: EntityInfo,
"""(:info-type package :name "name" :full-name "fullName")"""
)
marshal(
interfaceInfo: InterfaceInfo,
"""(:type (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1") :via-view "DEF")"""
)
marshal(
TypeInspectInfo(typeInfo, List(interfaceInfo)): TypeInspectInfo,
"""(:type (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1") :interfaces ((:type (:arrow-type nil :name "type1" :decl-as method :full-name "FOO.type1") :via-view "DEF")) :info-type typeInspect)"""
)
marshal(
structureView: StructureView,
s"""(:view ((:keyword "class" :name "StructureView" :position (:type line :file "$file1" :line 57)) (:keyword "object" :name "StructureView" :position (:type line :file "$file1" :line 59) :members ((:keyword "type" :name "BasicType" :position (:type offset :file "$file1" :offset 456))))))"""
)
marshal(
astInfo: AstInfo,
"""(:ast "List(Apply(Select(Literal(Constant(1)), TermName(\\"$plus\\")), List(Literal(Constant(1)))))")"""
)
}
it should "marshal search related responses" in {
marshal(
SymbolSearchResults(List(methodSearchRes, typeSearchRes)): SymbolSearchResults,
s"""((:type method :name "abc" :local-name "a" :decl-as method :pos (:type line :file "$abd" :line 10) :owner-name "ownerStr") (:type type :name "abc" :local-name "a" :decl-as trait :pos (:type line :file "$abd" :line 10)))"""
)
marshal(
ImportSuggestions(List(List(methodSearchRes, typeSearchRes))): ImportSuggestions,
s"""(((:type method :name "abc" :local-name "a" :decl-as method :pos (:type line :file "$abd" :line 10) :owner-name "ownerStr") (:type type :name "abc" :local-name "a" :decl-as trait :pos (:type line :file "$abd" :line 10))))"""
)
marshal(
methodSearchRes: SymbolSearchResult,
s"""(:type method :name "abc" :local-name "a" :decl-as method :pos (:type line :file "$abd" :line 10) :owner-name "ownerStr")"""
)
marshal(
typeSearchRes: SymbolSearchResult,
s"""(:type type :name "abc" :local-name "a" :decl-as trait :pos (:type line :file "$abd" :line 10))"""
)
}
it should "marshal ranges and semantic highlighting" in {
marshal(
ERangePositions(ERangePosition(batchSourceFile, 75, 70, 90) :: Nil),
s"""((:file "$batchSourceFile" :offset 75 :start 70 :end 90))"""
)
marshal(
FileRange("/abc", 7, 9): FileRange,
"""(:file "/abc" :start 7 :end 9)"""
)
marshal(
SymbolDesignations(
symFile, List(
SymbolDesignation(7, 9, VarFieldSymbol),
SymbolDesignation(11, 22, ClassSymbol)
)
): SymbolDesignations,
s"""(:file "$symFile" :syms ((varField 7 9) (class 11 22)))"""
)
marshal(
ImplicitInfos(List(ImplicitConversionInfo(5, 6, symbolInfo))): ImplicitInfos,
s"""((:type conversion :start 5 :end 6 :fun $symbolInfoStr))"""
)
marshal(
ImplicitInfos(List(ImplicitParamInfo(5, 6, symbolInfo, List(symbolInfo, symbolInfo), true))): ImplicitInfos,
s"""((:type param :start 5 :end 6 :fun $symbolInfoStr :params ($symbolInfoStr $symbolInfoStr) :fun-is-implicit t))"""
)
}
it should "marshal refactoring messages" in {
marshal(
RefactorFailure(7, "message"): RefactorFailure,
"""(:procedure-id 7 :reason "message" :status failure)"""
)
marshal(
refactorDiffEffect: RefactorDiffEffect,
s"""(:procedure-id 9 :refactor-type addImport :diff "$file2")"""
)
}
it should "marshal legacy raw response types" in {
marshal(
FalseResponse,
"nil"
)
marshal(
TrueResponse,
"t"
)
marshal(
StringResponse("wibble"),
""""wibble""""
)
marshal(
VoidResponse,
"""nil"""
)
}
}
| pascr/ensime-server | protocol-swanky/src/test/scala/org/ensime/server/protocol/swank/SwankFormatsSpec.scala | Scala | gpl-3.0 | 20,017 |
package com.lynbrookrobotics.potassium.frc
import com.ctre.phoenix.CANifier
import com.lynbrookrobotics.potassium.{ClockMocking, Signal}
import edu.wpi.first.wpilibj.DriverStation
import org.scalatest.mockito.MockitoSugar
import org.scalatest.FunSuite
import com.lynbrookrobotics.potassium.streams._
import edu.wpi.first.wpilibj.DriverStation.Alliance
import org.mockito.Mockito._
import org.mockito.{ArgumentCaptor, ArgumentMatchers}
import squants.time.Milliseconds
class CANifierTest extends FunSuite with MockitoSugar {
test("Custom color class stores RGB values") {
val color = Color.rgb(255, 255, 255)
assert(color.red == 255 && color.blue == 255 && color.green == 255)
}
test("Custom color class does not allow RGB values outside of valid range") {
val lowerColor = Color.rgb(-10, -10, -10)
assert(lowerColor == Color.rgb(0, 0, 0))
val higherColor = Color.rgb(300, 300, 300)
assert(higherColor == Color.rgb(255, 255, 255))
}
test("HSV to RGB conversion works correctly") {
val color0 = Color.hsv(0, 1, 1)
val color60 = Color.hsv(60, 1, 1)
val color120 = Color.hsv(120, 1, 1)
val color180 = Color.hsv(180, 1, 1)
val color240 = Color.hsv(240, 1, 1)
val color300 = Color.hsv(300, 1, 1)
val color359 = Color.hsv(359, 1, 1)
assert(color0 == Color.rgb(255, 0, 0))
assert(color60 == Color.rgb(255, 255, 0))
assert(color120 == Color.rgb(0, 255, 0))
assert(color180 == Color.rgb(0, 255, 255))
assert(color240 == Color.rgb(0, 0, 255))
assert(color300 == Color.rgb(255, 0, 255))
assert(color359 == Color.rgb(255, 0, 4))
}
test("Setting LEDs to specific color outputs results in correct status") {
val period = Milliseconds(10)
implicit val (mockedClock, trigger) = ClockMocking.mockedClockTicker
val mockedCanifier = mock[CANifier]
val argumentCaptor: ArgumentCaptor[Double] = ArgumentCaptor.forClass(0.0.getClass)
val hardware = LEDControllerHardware(mockedCanifier)
val coreTicks = Stream.periodic[Unit](period)()
val alliance = Signal.constant(Alliance.Red)
val component = new LEDController(coreTicks, alliance)(hardware)
component.resetToDefault()
trigger.apply(period)
component.setController(Stream.periodic[Color](period)(Color.rgb(0, 255, 255)))
trigger.apply(period)
verify(mockedCanifier, times(2))
.setLEDOutput(argumentCaptor.capture(), ArgumentMatchers.eq(CANifier.LEDChannel.LEDChannelA))
verify(mockedCanifier, times(2))
.setLEDOutput(argumentCaptor.capture(), ArgumentMatchers.eq(CANifier.LEDChannel.LEDChannelB))
verify(mockedCanifier, times(2))
.setLEDOutput(argumentCaptor.capture(), ArgumentMatchers.eq(CANifier.LEDChannel.LEDChannelC))
val arguments = argumentCaptor.getAllValues
assert(arguments.get(0) == 0.0)
assert(arguments.get(1) == 1.0)
assert(arguments.get(2) == 1.0)
assert(arguments.get(3) == 0.0)
assert(arguments.get(4) == 0.0)
assert(arguments.get(5) == 1.0)
}
}
| Team846/potassium | frc/jvm/src/test/scala/com/lynbrookrobotics/potassium/frc/CANifierTest.scala | Scala | mit | 3,003 |
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene.util
import org.apache.cassandra.tracing.{Tracing => Tracer}
/** Wrapper for [[Tracer]] avoiding test environment failures.
*
* @author Andres de la Pena `adelapena@stratio.com`
*/
final class Tracer extends Logging {
/** If Cassandra tracing is enabled. */
lazy val canTrace: Boolean = try {
Tracer.isTracing
true
} catch {
case e: Error =>
logger.warn(s"Unable to trace: ${e.getMessage}", e)
false
}
/** Traces the specified string message.
*
* @param message the message to be traced
*/
def trace(message: => String) = if (canTrace && Tracer.isTracing) Tracer.trace(message)
}
| Stratio/cassandra-lucene-index | plugin/src/main/scala/com/stratio/cassandra/lucene/util/Tracer.scala | Scala | apache-2.0 | 1,295 |
package drt.shared
import java.util.UUID
import drt.shared.CrunchApi.MillisSinceEpoch
import drt.shared.Terminals.Terminal
object StaffMovements {
def assignmentsToMovements(staffAssignments: Seq[StaffAssignment]): Seq[StaffMovement] = {
staffAssignments.flatMap(assignment => {
val uuid: UUID = UUID.randomUUID()
StaffMovement(assignment.terminal, assignment.name + " start", time = assignment.startDt, assignment.numberOfStaff, uuid, createdBy = assignment.createdBy) ::
StaffMovement(assignment.terminal, assignment.name + " end", time = assignment.endDt, -assignment.numberOfStaff, uuid, createdBy = assignment.createdBy) :: Nil
}).sortBy(_.time.millisSinceEpoch)
}
def adjustmentsAt(movements: Seq[StaffMovement])(dateTime: SDateLike): Int = movements.sortBy(_.time.millisSinceEpoch).takeWhile(_.time.millisSinceEpoch <= dateTime.millisSinceEpoch).map(_.delta).sum
def terminalStaffAt(shiftAssignments: ShiftAssignments)(movements: Seq[StaffMovement])(terminalName: Terminal, dateTime: SDateLike): Int = {
val baseStaff = shiftAssignments.terminalStaffAt(terminalName, dateTime)
val movementAdjustments = adjustmentsAt(movements.filter(_.terminal == terminalName))(dateTime)
baseStaff + movementAdjustments
}
val empty: StaffMovements = StaffMovements(Seq())
}
case class StaffMovements(movements: Seq[StaffMovement]) extends HasExpireables[StaffMovements] {
def +(movementsToAdd: Seq[StaffMovement]): StaffMovements =
copy(movements = movements ++ movementsToAdd)
def -(movementsToRemove: Seq[UUID]): StaffMovements =
copy(movements = movements.filterNot(sm => movementsToRemove.contains(sm.uUID)))
def purgeExpired(expireBefore: () => SDateLike): StaffMovements = {
val expireBeforeMillis = expireBefore().millisSinceEpoch
val unexpiredPairsOfMovements = movements
.groupBy(_.uUID)
.values
.filter(pair => {
val neitherHaveExpired = pair.exists(!_.isExpired(expireBeforeMillis))
neitherHaveExpired
})
.flatten.toSeq
copy(movements = unexpiredPairsOfMovements)
}
def forDay(day: SDateLike): Seq[StaffMovement] = {
val startOfDayMillis = day.getLocalLastMidnight.millisSinceEpoch
val endOfDayMillis = day.getLocalNextMidnight.millisSinceEpoch
movements
.groupBy(_.uUID)
.filter { case (_, movementsPair) => areInWindow(startOfDayMillis, endOfDayMillis, movementsPair) }
.values
.flatten
.toSeq
}
def areInWindow(startOfDayMillis: MillisSinceEpoch,
endOfDayMillis: MillisSinceEpoch,
movementsPair: Seq[StaffMovement]): Boolean = {
val chronologicalMovementsPair = movementsPair.sortBy(_.time.millisSinceEpoch).toList
chronologicalMovementsPair match {
case singleMovement :: Nil =>
val movementMillis = singleMovement.time.millisSinceEpoch
isInWindow(startOfDayMillis, endOfDayMillis, movementMillis)
case start :: end :: Nil =>
val firstInWindow = isInWindow(startOfDayMillis, endOfDayMillis, start.time.millisSinceEpoch)
val lastInWindow = isInWindow(startOfDayMillis, endOfDayMillis, end.time.millisSinceEpoch)
firstInWindow || lastInWindow
case _ => false
}
}
def isInWindow(startOfDayMillis: MillisSinceEpoch,
endOfDayMillis: MillisSinceEpoch,
movementMillis: MillisSinceEpoch): Boolean = {
startOfDayMillis <= movementMillis && movementMillis <= endOfDayMillis
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | shared/src/main/scala/drt/shared/StaffMovements.scala | Scala | apache-2.0 | 3,517 |
package io.youi.font
class MaterialIconView(protected val element: html.Element,
val existing: Boolean = false) extends HTMLComponent[html.Element] with MaterialIconViewTheme {
def this() = {
this(create[html.Element]("i"))
}
element.classList.add("material-icons")
Material.load().map(fnt => font @= fnt)
override protected def defaultParentTheme: Theme = MaterialIconView
override def componentType: String = "MaterialIconView"
}
object MaterialIconView extends MaterialIconViewTheme {
override protected def defaultParentTheme: Theme = HTMLComponent
def existing(id: String, in: html.Element = document.body): MaterialIconViewTheme = {
new MaterialIconView(in.byId[html.Element](id), existing = true)
}
} | outr/youi | ui/js/src/main/scala/io/youi/font/MaterialIconView.scala | Scala | mit | 763 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.cancelables
import monix.execution.Cancelable
import monix.execution.Cancelable.Empty
import monix.execution.atomic.AtomicAny
/**
* Represents a Cancelable that can be queried for the canceled status.
*/
trait BooleanCancelable extends Cancelable {
/** @return true in case this cancelable hasn't been canceled,
* or false otherwise.
*/
def isCanceled: Boolean
}
object BooleanCancelable {
/** Builder for [[BooleanCancelable]]. */
def apply(): BooleanCancelable =
new BooleanCancelable {
@volatile private[this] var _isCanceled = false
def isCanceled = _isCanceled
def cancel(): Unit = {
if (!_isCanceled) _isCanceled = true
}
}
/** Builder for [[BooleanCancelable]].
*
* @param callback is a function that will execute exactly once
* on canceling.
*/
def apply(callback: () => Unit): BooleanCancelable =
new BooleanCancelableTask(callback)
/** Returns an instance of a [[BooleanCancelable]] that's
* already canceled.
*/
val alreadyCanceled: BooleanCancelable with Empty =
new BooleanCancelable with Empty {
val isCanceled = true
def cancel() = ()
}
/** Returns a [[BooleanCancelable]] that can never be canceled.
*
* Useful as a low-overhead instance whose `isCanceled` value
* is always `false`, thus similar in spirit with [[alreadyCanceled]].
*/
val dummy: BooleanCancelable =
new BooleanCancelable with Cancelable.IsDummy {
val isCanceled = false
def cancel() = ()
}
private final class BooleanCancelableTask(cb: () => Unit) extends BooleanCancelable {
private[this] val callbackRef = AtomicAny(cb)
def isCanceled: Boolean = callbackRef.get() eq null
def cancel(): Unit = {
// Setting the callback to null with a `getAndSet` is solving
// two problems: `cancel` is thus idempotent, plus we allow
// the garbage collector to collect the task.
val callback = callbackRef.getAndSet(null)
if (callback != null) callback()
}
}
}
| alexandru/monifu | monix-execution/shared/src/main/scala/monix/execution/cancelables/BooleanCancelable.scala | Scala | apache-2.0 | 2,770 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.reporter
import java.util.UUID
import scala.concurrent.duration.DurationInt
import akka.util.Timeout
import org.powerapi.UnitTest
import org.powerapi.core.Tick
import org.powerapi.core.power._
import org.powerapi.core.target.{Application, Process, Target}
import org.powerapi.module.PowerChannel.AggregatePowerReport
class ConsoleDisplaySuite extends UnitTest {
val timeout = Timeout(1.seconds)
override def afterAll() = {
system.terminate()
}
"A ConsoleDisplay" should "display an AggPowerReport message in console" in {
val stream = new java.io.ByteArrayOutputStream()
val muid = UUID.randomUUID()
val baseTick = new Tick {
val topic = ""
val timestamp = System.currentTimeMillis()
}
val baseTargets = Set[Target](Application("firefox"), Process(1), Process(2))
val baseDevices = Set[String]("cpu", "gpu", "ssd")
val basePower = 10.W
val aggregatePowerReport = new AggregatePowerReport(muid) {
override def ticks = Set(baseTick)
override def targets = baseTargets
override def devices = baseDevices
override def power = basePower
}
Console.withOut(stream) {
val out = new ConsoleDisplay
out.display(aggregatePowerReport)
}
new String(stream.toByteArray) should equal(
s"muid=$muid;timestamp=${baseTick.timestamp};targets=${baseTargets.mkString(",")};devices=${baseDevices.mkString(",")};power=${basePower.toMilliWatts} mW\\n"
)
}
}
| Spirals-Team/powerapi | powerapi-core/src/test/scala/org/powerapi/reporter/ConsoleDisplaySuite.scala | Scala | agpl-3.0 | 2,362 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
/**
* This class exists to restrict the visibility of TaskContext setters.
*/
private [spark] object TaskContextHelper {
def setTaskContext(tc: TaskContext): Unit = TaskContext.setTaskContext(tc)
def unset(): Unit = TaskContext.unset()
}
| Dax1n/spark-core | core/src/main/scala/org/apache/spark/TaskContextHelper.scala | Scala | apache-2.0 | 1,077 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.benchmarks
import java.io.{File, FileOutputStream, InputStreamReader}
import java.lang.reflect.Modifier
import java.net.URI
import ai.h2o.sparkling.H2OContext
import ai.h2o.sparkling.backend.utils.RestApiUtils
import com.google.common.reflect.ClassPath
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.json4s._
import org.json4s.jackson.Serialization._
import scala.collection.JavaConverters._
object Runner extends RestApiUtils {
val defaultDatasetSpecificationsFile = "datasets.json"
val defaultOutputDir = new File("benchmarks", "output")
val defaultWorkingDir = new URI("hdfs:///user/hadoop/")
val spark = SparkSession
.builder()
.config(createSparkConf())
.getOrCreate()
val hc = H2OContext.getOrCreate()
def createSparkConf(): SparkConf = {
val conf = new SparkConf()
// If master is not defined in system properties or environment variables, fallback to local.
val master = conf.get("spark.master", "local[*]")
conf.setMaster(master)
// If the application name is not defined in system properties or environment variables,
// set it to the class name.
val appName = conf.get("spark.app.name", this.getClass.getSimpleName)
conf.setAppName(appName)
conf
}
def main(args: Array[String]): Unit = {
val settings = processArguments(args)
val datasetDetails = loadDatasetDetails(settings.datasetSpecificationsFile)
val benchmarks = getBenchmarkClasses()
val algorithms = AlgorithmBenchmarkBase.supportedAlgorithms
val filteredDatasetDetails = filterCollection[DatasetDetails]("Dataset", settings.dataset, datasetDetails, _.name)
val filteredBenchmarks = filterCollection[Class[_]]("Benchmark", settings.benchmark, benchmarks, _.getSimpleName)
val filteredAlgorithms = filterCollection[AlgorithmBundle](
"Algorithm",
settings.algorithm,
algorithms,
_.h2oAlgorithm.getClass.getSimpleName)
val outputDir = settings.outputDir match {
case Some(dir) => new File(dir)
case None => defaultOutputDir
}
val workingDir = settings.workingDir match {
case Some(dir) => new URI(dir)
case None => defaultWorkingDir
}
val batches = createBatches(filteredDatasetDetails, filteredBenchmarks, filteredAlgorithms, workingDir)
batches.foreach(batch => executeBatch(batch, outputDir))
hc.stop(stopSparkContext = true)
}
private def processArguments(args: Array[String]): Settings = {
require(
args.length % 2 == 0,
"Wrong arguments. Example: -s datasetSpecificationFile " +
"-b benchmarkName -d datasetName -a algorithmName -o outputDir -w workingDir")
val (keys, values) = args.zipWithIndex.partition { case (_, idx) => idx % 2 == 0 }
val map = keys.map(_._1).zip(values.map(_._1)).toMap
Settings(
datasetSpecificationsFile = map.getOrElse("-s", defaultDatasetSpecificationsFile),
benchmark = map.get("-b"),
dataset = map.get("-d"),
algorithm = map.get("-a"),
outputDir = map.get("-o"),
workingDir = map.get("-w"))
}
private def loadDatasetDetails(datasetSpecificationsFile: String): Seq[DatasetDetails] = {
val stream = getClass.getClassLoader.getResourceAsStream(datasetSpecificationsFile)
val reader = new InputStreamReader(stream)
implicit val formats = DefaultFormats
try {
read[Seq[DatasetDetails]](reader)
} finally {
reader.close()
}
}
private def getBenchmarkClasses(): Seq[Class[_]] = {
val classLoader = Thread.currentThread().getContextClassLoader
val classPath = ClassPath.from(classLoader)
val packageName = this.getClass.getPackage.getName
def isBenchmark(clazz: Class[_]) = {
val isAbstract = Modifier.isAbstract(clazz.getModifiers)
val inheritsFromBenchmarkBase = classOf[BenchmarkBase[_, _]].isAssignableFrom(clazz)
!isAbstract && inheritsFromBenchmarkBase
}
val classes = classPath.getTopLevelClasses(packageName).asScala.map(_.load())
classes.filter(isBenchmark(_)).toSeq
}
private def filterCollection[T](
entity: String,
filter: Option[String],
collection: Seq[T],
nameGetter: T => String): Seq[T] = filter match {
case None => collection
case Some(name) =>
val result = collection.filter(nameGetter(_) == name)
require(result.nonEmpty, s"$entity '$name' does not exist!")
result
}
private def createBatches(
datasetDetails: Seq[DatasetDetails],
benchmarkClasses: Seq[Class[_]],
algorithms: Seq[AlgorithmBundle],
workingDir: URI): Seq[BenchmarkBatch] = {
def isAlgorithmBenchmark(clazz: Class[_]): Boolean = classOf[AlgorithmBenchmarkBase[_, _]].isAssignableFrom(clazz)
val benchmarkContexts = datasetDetails.map(BenchmarkContext(spark, hc, _, workingDir))
benchmarkClasses.map { benchmarkClass =>
val parameterSets = if (isAlgorithmBenchmark(benchmarkClass)) {
for (context <- benchmarkContexts; algorithm <- algorithms) yield Array(context, algorithm.newInstance())
} else {
benchmarkContexts.map(Array(_))
}
val benchmarkInstances = parameterSets.map { parameterSet =>
benchmarkClass.getConstructors()(0).newInstance(parameterSet: _*).asInstanceOf[BenchmarkBase[_, _]]
}
BenchmarkBatch(benchmarkClass.getSimpleName, benchmarkInstances)
}
}
private def executeBatch(batch: BenchmarkBatch, outputDir: File): Unit = {
println(s"Executing benchmark batch '${batch.name}' ...")
batch.benchmarks.foreach { benchmark =>
benchmark.run()
benchmark.exportMeasurements(System.out)
val endpoint = getClusterEndpoint(hc.getConf)
delete(endpoint, "/3/DKV", hc.getConf)
}
outputDir.mkdirs()
val sparkMaster = spark.conf.get("spark.master")
val outputFile = new File(outputDir, s"${sparkMaster}_${hc.getConf.backendClusterMode}_${batch.name}.txt")
val outputStream = new FileOutputStream(outputFile)
try {
batch.benchmarks.foreach(_.exportMeasurements(outputStream))
} finally {
outputStream.close()
}
println(s"Benchmark batch '${batch.name}' has finished.")
}
private case class BenchmarkBatch(name: String, benchmarks: Seq[BenchmarkBase[_, _]])
private case class Settings(
datasetSpecificationsFile: String,
benchmark: Option[String],
dataset: Option[String],
algorithm: Option[String],
outputDir: Option[String],
workingDir: Option[String])
}
| h2oai/sparkling-water | benchmarks/src/main/scala/ai/h2o/sparkling/benchmarks/Runner.scala | Scala | apache-2.0 | 7,346 |
package com.blinkbox.books.marvin.watcher
import java.net.URL
import java.util.concurrent.TimeUnit
import com.blinkbox.books.config._
import com.blinkbox.books.rabbitmq.RabbitMqConfig
import com.blinkbox.books.rabbitmq.RabbitMqConfirmedPublisher.PublisherConfiguration
import com.blinkbox.books.rabbitmq.RabbitMqConsumer.QueueConfiguration
import com.typesafe.config.Config
import com.blinkbox.books.config.RichConfig
import scala.concurrent.duration._
case class AppConfig(processingDirectory: String, inboundDirectory: String, storageDirectory: String, errorDirectory: String, messaging: MessagingConfig)
case class MessagingConfig(rabbitmq: RabbitMqConfig, retryInterval: FiniteDuration, marvin: PublisherConfiguration)
object AppConfig {
val prefix = "service.watcher"
def apply(config: Config) = new AppConfig(
config.getString(s"$prefix.directories.processing"),
config.getString(s"$prefix.directories.inbound"),
config.getString(s"$prefix.directories.storage"),
config.getString(s"$prefix.directories.error"),
MessagingConfig(config, s"$prefix.rabbitmq")
)
}
object MessagingConfig {
def apply(config: Config, prefix: String) = new MessagingConfig(
RabbitMqConfig(config.getConfig(AppConfig.prefix)),
config.getFiniteDuration(s"$prefix.retryInterval"),
PublisherConfiguration(config.getConfig(s"$prefix.output"))
)
} | blinkboxbooks/watcher | src/main/scala/com/blinkbox/books/marvin/watcher/AppConfig.scala | Scala | mit | 1,371 |
/**
* Copyright (C) 2012 FuseSource Corp. All rights reserved.
* http://fusesource.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.mq.leveldb.util
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class LongCounter(private var value:Long = 0) extends Serializable {
def clear() = value=0
def get() = value
def set(value:Long) = this.value = value
def incrementAndGet() = addAndGet(1)
def decrementAndGet() = addAndGet(-1)
def addAndGet(amount:Long) = {
value+=amount
value
}
def getAndIncrement() = getAndAdd(1)
def getAndDecrement() = getAndAdd(-11)
def getAndAdd(amount:Long) = {
val rc = value
value+=amount
rc
}
override def toString() = get().toString
} | fusesource/fuse-extra | fusemq-leveldb/src/main/scala/org/fusesource/mq/leveldb/util/LongCounter.scala | Scala | apache-2.0 | 1,290 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.room.model
import org.beangle.commons.lang.time.WeekTime
import org.beangle.data.model.LongId
import org.beangle.data.model.pojo.Updated
import org.openurp.base.edu.model.{Classroom, Project}
/** 可用时间
**/
class RoomAvailableTime extends LongId with Updated {
var project: Project = _
/** 教室 */
var room: Classroom = _
/** 时间 */
var time = new WeekTime
}
| openurp/api | edu/src/main/scala/org/openurp/edu/room/model/RoomAvailableTime.scala | Scala | lgpl-3.0 | 1,132 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.examples.cookbook
import com.spotify.scio.bigquery._
import com.spotify.scio.testing._
class DistinctByKeyExampleTest extends PipelineSpec {
val input: Seq[TableRow] = Seq(
("c1", "verylongword1"),
("c1", "verylongword1"),
("c1", "verylongword1"),
("c2", "verylongword2"),
("c2", "verylongword2"),
("c2", "verylongword3"),
("c1", "sw1"),
("c2", "sw2")
).map(kv => TableRow("corpus" -> kv._1, "word" -> kv._2))
val expected: Seq[TableRow] = Seq(
("verylongword1", "c1"),
("verylongword2", "c2"),
("verylongword3", "c2")
).map(kv => TableRow("word" -> kv._1, "reference_play" -> kv._2))
"DistinctByKeyExample" should "work" in {
JobTest[com.spotify.scio.examples.cookbook.DistinctByKeyExample.type]
.args("--input=input.table", "--output=dataset.table")
.input(BigQueryIO("input.table"), input)
.output(BigQueryIO[TableRow]("dataset.table")) { coll =>
coll should containInAnyOrder(expected)
}
.run()
}
}
| spotify/scio | scio-examples/src/test/scala/com/spotify/scio/examples/cookbook/DistinctByKeyExampleTest.scala | Scala | apache-2.0 | 1,630 |
package im.dlg.api
import treehugger.forest._, definitions._
import treehuggerDSL._
private[api] trait ApiServiceTrees extends TreeHelpers with StringHelperTrees {
protected val baseServiceTrees: Vector[Tree] = {
Vector(
TRAITDEF("Service") := BLOCK(
TYPEVAR("ApiClientData"),
TYPEVAR("HandleResult") withFlags Flags.PROTECTED := REF("Either") APPLYTYPE (
"RpcError",
"RpcOk"
),
TYPEVAR("HandlerResult[A <: RpcResponse]") withFlags Flags.PROTECTED := REF("Either") APPLYTYPE (
"RpcError",
"A"
),
VAL("handleRequestPartial", valueCache("PartialFunction[RpcRequest, ApiClientData => Future[HandleResult]]")),
DEF("onFailure", TYPE_REF(REF(PartialFunctionClass) APPLYTYPE ("Throwable", "RpcError"))) :=
(REF("PartialFunction") DOT "empty" APPLYTYPE ("Throwable", "RpcError")),
DEF("recoverFailure[A <: RpcResponse]", TYPE_REF(REF(PartialFunctionClass) APPLYTYPE ("Throwable", "HandlerResult[A]"))) withFlags Flags.FINAL :=
REF("onFailure") DOT "andThen" APPLY LAMBDA(PARAM("e")) ==> BLOCK(REF("Left") APPLY REF("e"))
)
)
}
protected def packageApiServiceTrees(packageName: String, items: Vector[Item]): Vector[Tree] = {
val rpcs = items.filter(_.isInstanceOf[RpcContent])
if (rpcs.isEmpty) {
Vector.empty
} else {
val handlers: Vector[Tree] = (rpcs map {
case RpcContent(_, name, attributes, doc, response) ⇒
val params = attributes map { attr ⇒
def scalaTyp(typ: Types.AttributeType): Type = typ match {
case Types.Int32 ⇒ IntClass
case Types.Int64 ⇒ LongClass
case Types.Bool ⇒ BooleanClass
case Types.Double ⇒ DoubleClass
case Types.String ⇒ StringClass
case Types.Bytes ⇒ arrayType(ByteClass)
case Types.UUID ⇒ valueCache("java.util.UUID")
case enum @ Types.Enum(_) ⇒ valueCache(s"Refs.${enum.name}.${enum.name}")
case Types.Opt(optAttrType) ⇒ optionType(scalaTyp(optAttrType))
case Types.List(listAttrType) ⇒ indexedSeqType(scalaTyp(listAttrType))
case struct @ Types.Struct(_) ⇒ valueCache(s"Refs.${struct.name}")
case trai @ Types.Trait(_) ⇒ valueCache(s"Refs.${trai.name}")
case alias @ Types.Alias(aliasName) ⇒ scalaTyp(aliasesPrim.get(aliasName).get)
}
PARAM(attr.name, scalaTyp(attr.typ)): ValDef
}
val respType = response match {
case _: AnonymousRpcResponse ⇒ f"Response$name%s"
case named: NamedRpcResponse ⇒ f"Refs.Response${named.name}%s"
}
val hname = f"handle$name%s"
val htype = valueCache(f"Future[HandlerResult[$respType%s]]")
// workaround for eed3si9n/treehugger#26
val shname =
if (params.isEmpty)
hname + "()"
else
hname
val doHname = "do" + hname.capitalize
val paramsWithClient = params :+ PARAM("clientData", valueCache("ApiClientData")).tree
val attrNamesWithClient = attributes.map(a ⇒ REF(a.name)) :+ REF("clientData")
Vector(
DEF(doHname, htype)
.withFlags(Flags.PROTECTED)
.withParams(paramsWithClient).tree
.withDoc(generateDoc(doc): _*),
DEF(shname, htype)
.withFlags(Flags.FINAL)
.withParams(params)
.withParams(PARAM("clientData", valueCache("ApiClientData")).withFlags(Flags.IMPLICIT)) :=
REF(doHname) APPLY attrNamesWithClient DOT "recover" APPLY REF("recoverFailure")
)
}).flatten
val pfType = valueCache("PartialFunction[RpcRequest, ApiClientData => Future[HandleResult]]")
val handleRequestDefPF = VAL("handleRequestPartial", pfType) withFlags Flags.OVERRIDE :=
BLOCK(
rpcs map {
case RpcContent(_, name, attributes, _, _) ⇒
val rqParams: Vector[Tree] = attributes map { attr ⇒
REF("r") DOT attr.name: Tree
}
CASE(REF("r") withType valueCache(f"Request$name%s")) ==> (
LAMBDA(PARAM("clientData", valueCache("ApiClientData"))) ==> BLOCK(
VAL("f") := (if (rqParams.isEmpty) {
REF(f"handle$name%s()") APPLY REF("clientData")
} else
REF(f"handle$name%s") APPLY rqParams APPLY REF("clientData")),
REF("f") DOT "map" APPLY BLOCK(
CASE(REF("Right") APPLY REF("rsp")) ==> (
REF("Right") APPLY (REF("RpcOk") APPLY REF("rsp"))
),
CASE(REF("Left") APPLY REF("err")) ==> (
REF("Left") APPLY (REF("err"))
)
)
)
)
}
)
val handleRequestDef = DEF("handleRequest", valueCache("Future[HandleResult]")) withParams (
PARAM("clientData", valueCache("ApiClientData")),
PARAM("request", valueCache(f"${packageName.capitalize}%sRpcRequest"))
) := BLOCK(
REF("handleRequestPartial") APPLY REF("request") APPLY REF("clientData")
)
val ecDef: Tree = VAL("ec", valueCache("ExecutionContext")) withFlags (Flags.IMPLICIT, Flags.PROTECTED)
Vector(
TRAITDEF(f"${packageName.capitalize}Service")
withParents "Service" := BLOCK(
Vector(ecDef, handleRequestDefPF, handleRequestDef) ++
handlers
)
)
}
}
}
| dialogs/sbt-dialog-api | src/main/scala/im/dlg/api/ApiServiceTrees.scala | Scala | mit | 5,896 |
package model
import scalaz.std.list._
import scalaz.syntax.traverse._
/**
* Created by ghseeli on 1/14/17.
*/
sealed trait Coordinate {
def values: Seq[Int]
def display: String = values.foldLeft("(")((acc,i) => acc + i + ",").dropRight(1) + ")"
}
trait Addable[+A] {
def add[B >: A](coordinate: B): B with Addable[B]
def negate: A with Addable[A]
def neighbors: Seq[A with Addable[A]]
}
case class NDimlCoordinate(values: Seq[Int]) extends Coordinate with Addable[NDimlCoordinate] {
val dim: Int = values.length
override def neighbors: Seq[NDimlCoordinate] = {
val base = List(-1,0,1)
val diffs = (0 until dim).toList.map(i => base).sequence
val removedDuplicate = diffs.map(i => this.add(NDimlCoordinate(i))).filter(coord => coord != this)
val removedOutOfBounds = removedDuplicate.filter(coord => coord.values.forall(_ > 0))
removedOutOfBounds
}
def add[B >: NDimlCoordinate](coordinate: B): NDimlCoordinate = {
coordinate match {
case NDimlCoordinate(otherValues) => NDimlCoordinate(otherValues.zip(values).map{case (i,j) => i+j})
}
}
override def negate: NDimlCoordinate = NDimlCoordinate(values.map(-1*_))
}
| ghseeli/four-dim-tic-tac-toe | src/main/scala/model/Coordinate.scala | Scala | gpl-3.0 | 1,183 |
package satisfaction
package hadoop
package hive.ms
import satisfaction.Track
import satisfaction.TrackDescriptor
import satisfaction.fs.Path
import org.apache.hadoop.hive.conf.HiveConf
/**
* Allow the track to access the currently configured Hive MetaStore
* by extending HiveTrack, instead of just Track
*
*/
class HiveTrack(tr : TrackDescriptor) extends Track(tr) with Logging {
implicit lazy val hiveConf = createHiveConf()
implicit lazy val ms : MetaStore = createMetaStore()
def createHiveConf() : HiveConf = {
val hc = new HiveConf( Config(track), this.getClass() )
hc.setClassLoader( this.getClass().getClassLoader )
hc
}
def createMetaStore() : MetaStore = {
new MetaStore()(hiveConf)
}
}
| jeromebanks/satisfaction | modules/hive-ms/src/main/scala/satisfaction/hive/ms/HiveTrack.scala | Scala | apache-2.0 | 783 |
package us.feliscat.text.normalizer.en
import us.feliscat.text.StringOption
/**
* @author K. Sakamoto
* Created on 2017/07/12
*/
object EnglishPunctuations {
def remove(sentenceOpt: StringOption): StringOption = {
sentenceOpt map {
sentence: String =>
sentence.replaceAll("""[\\p{Punct}&&[^.]]""", "")
}
}
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/text/normalizer/en/EnglishPunctuations.scala | Scala | apache-2.0 | 349 |
package com.metebalci
import org.scalatest._
import org.scalatest.Assertions._
class FirstAndSecondLensSpec extends FunSuite {
test("first lens") {
val foo = (1, 2)
assert( Lenses.first.get(foo) == 1 )
assert( Lenses.first.put(3, foo) == (3, 2) )
}
test("second lens") {
val foo = (1, 2)
assert( Lenses.second.get(foo) == 2 )
assert( Lenses.second.put(4, foo) == (1, 4) )
}
}
| metebalci/experiment-lenses-scala | src/test/scala/06-FirstAndSecondLensSpec.scala | Scala | gpl-2.0 | 428 |
package extracells.integration.igw
import java.awt.Desktop
import java.io.File
import java.net.URL
import java.net.URLConnection
import java.util.List
import net.minecraft.client.Minecraft
import net.minecraft.command.CommandBase
import net.minecraft.command.ICommandSender
import net.minecraft.util.ChatComponentText
import net.minecraft.util.EnumChatFormatting
import net.minecraft.util.IChatComponent
import net.minecraftforge.client.ClientCommandHandler
import net.minecraftforge.common.config.Configuration
import org.apache.commons.io.FileUtils
import cpw.mods.fml.client.FMLClientHandler
import cpw.mods.fml.common.FMLCommonHandler
import cpw.mods.fml.common.Loader
import cpw.mods.fml.common.ModContainer
import cpw.mods.fml.common.eventhandler.SubscribeEvent
import cpw.mods.fml.common.gameevent.TickEvent
import cpw.mods.fml.relauncher.Side
/**
* This class is meant to be copied to your own mod which implements IGW-Mod. When properly implemented by instantiating a new instance somewhere in your mod
* loading stage, this will notify the player when it doesn't have IGW in the instance. It also needs to have the config option enabled to
* notify the player. This config option will be generated in its own config file.
* @author MineMaarten https://github.com/MineMaarten/IGW-mod
*/
object IGWSupportNotifier {
private val LATEST_DL_URL: String = "http://minecraft.curseforge.com/mc-mods/223815-in-game-wiki-mod/files/latest"
private var supportingMod: String = null
/**
* Needs to be instantiated somewhere in your mod's loading stage.
*/
if (FMLCommonHandler.instance.getSide == Side.CLIENT && !Loader.isModLoaded("IGWMod")) {
val dir: File = new File(".", "config")
val config: Configuration = new Configuration(new File(dir, "IGWMod.cfg"))
config.load
if (config.get(Configuration.CATEGORY_GENERAL, "enable_missing_notification", true, "When enabled, this will notify players when IGW-Mod is not installed even though mods add support.").getBoolean) {
val mc: ModContainer = Loader.instance.activeModContainer
val modid: String = mc.getModId
val loadedMods: List[ModContainer] = Loader.instance.getActiveModList
import scala.collection.JavaConversions._
for (container <- loadedMods) {
if (container.getModId == modid) {
supportingMod = container.getName
FMLCommonHandler.instance.bus.register(this)
ClientCommandHandler.instance.registerCommand(new CommandDownloadIGW)
}
}
}
config.save
}
@SubscribeEvent
def onPlayerJoin(event: TickEvent.PlayerTickEvent) {
if (event.player.worldObj.isRemote && event.player == FMLClientHandler.instance.getClientPlayerEntity) {
event.player.addChatComponentMessage(IChatComponent.Serializer.func_150699_a("[\\"" + EnumChatFormatting.GOLD + "The mod " + supportingMod + " is supporting In-Game Wiki mod. " + EnumChatFormatting.GOLD + "However, In-Game Wiki isn't installed! " + "[\\"," + "{\\"text\\":\\"Download Latest\\",\\"color\\":\\"green\\",\\"clickEvent\\":{\\"action\\":\\"run_command\\",\\"value\\":\\"/igwmod_download\\"}}," + "\\"]\\"]"))
FMLCommonHandler.instance.bus.unregister(this)
}
}
private class CommandDownloadIGW extends CommandBase {
override def getRequiredPermissionLevel: Int = {
return -100
}
def getCommandName: String = {
return "igwmod_download"
}
def getCommandUsage(p_71518_1_ : ICommandSender): String = {
return getCommandName
}
def processCommand(p_71515_1_ : ICommandSender, p_71515_2_ : Array[String]) {
new ThreadDownloadIGW
}
}
private class ThreadDownloadIGW extends Thread {
setName("IGW-Mod Download Thread")
start
override def run {
try {
if (Minecraft.getMinecraft.thePlayer != null) Minecraft.getMinecraft.thePlayer.addChatMessage(new ChatComponentText("Downloading IGW-Mod..."))
val url: URL = new URL(IGWSupportNotifier.LATEST_DL_URL)
val connection: URLConnection = url.openConnection
connection.connect
val fileName: String = "IGW-Mod.jar"
val dir: File = new File(".", "mods")
val f: File = new File(dir, fileName)
FileUtils.copyURLToFile(url, f)
if (Minecraft.getMinecraft.thePlayer != null) Minecraft.getMinecraft.thePlayer.addChatMessage(new ChatComponentText(EnumChatFormatting.GREEN + "Successfully downloaded. Restart Minecraft to apply."))
Desktop.getDesktop.open(dir)
finalize
}
catch {
case e: Throwable => {
e.printStackTrace
if (Minecraft.getMinecraft.thePlayer != null) Minecraft.getMinecraft.thePlayer.addChatComponentMessage(new ChatComponentText(EnumChatFormatting.RED + "Failed to download"))
try {
finalize
}
catch {
case e1: Throwable => {
e1.printStackTrace
}
}
}
}
}
}
}
| AmethystAir/ExtraCells2 | src/main/scala/extracells/integration/igw/IGWSupportNotifier.scala | Scala | mit | 4,962 |
/*
* Copyright 2014-16 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package au.com.intelix.rs.core.codec.binary
import au.com.intelix.rs.core.codec.binary.BinaryProtocolMessages.{BinaryDialectInbound, BinaryDialectOutbound}
import au.com.intelix.rs.core.services.endpoint.akkastreams.ProtocolDialectStageBuilder
trait BinaryDialectStageBuilder extends ProtocolDialectStageBuilder[BinaryDialectInbound, BinaryDialectOutbound]
| intelix/reactiveservices | platform/codec-binary/src/main/scala/au/com/intelix/rs/core/codec/binary/BinaryDialectStageBuilder.scala | Scala | apache-2.0 | 962 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector
import slamdata.Predef._
import quasar.{NonTerminal, RenderTree, ScalarStages}, RenderTree.ops._
import cats.~>
import cats.implicits._
import fs2.Stream
import monocle.Lens
import qdata.QDataDecode
import scalaz.Show
import scalaz.syntax.show._
import shims.showToScalaz
import tectonic.Plate
sealed trait QueryResult[F[_]] extends Product with Serializable {
def stages: ScalarStages
def mapK[G[_]](f: F ~> G): QueryResult[G]
}
object QueryResult extends QueryResultInstances {
final case class Parsed[F[_], A](
decode: QDataDecode[A],
data: Stream[F, A],
stages: ScalarStages)
extends QueryResult[F] {
def mapK[G[_]](f: F ~> G): QueryResult[G] =
Parsed[G, A](decode, data.translate[F, G](f), stages)
}
final case class Typed[F[_]](
format: DataFormat,
data: Stream[F, Byte],
stages: ScalarStages)
extends QueryResult[F] {
def mapK[G[_]](f: F ~> G): QueryResult[G] =
Typed[G](format, data.translate[F, G](f), stages)
}
final case class Stateful[F[_], P <: Plate[Unit], S](
format: DataFormat,
plateF: F[P],
state: P => F[Option[S]],
data: Option[S] => Stream[F, Byte],
stages: ScalarStages)
extends QueryResult[F] {
def mapK[G[_]](f: F ~> G): QueryResult[G] =
Stateful[G, P, S](
format,
f(plateF),
p => f(state(p)),
data(_).translate[F, G](f),
stages)
}
def parsed[F[_], A](q: QDataDecode[A], d: Stream[F, A], ss: ScalarStages)
: QueryResult[F] =
Parsed(q, d, ss)
def typed[F[_]](tpe: DataFormat, data: Stream[F, Byte], ss: ScalarStages)
: QueryResult[F] =
Typed(tpe, data, ss)
def stateful[F[_], P <: Plate[Unit], S](
format: DataFormat,
plateF: F[P],
state: P => F[Option[S]],
data: Option[S] => Stream[F, Byte],
stages: ScalarStages)
: QueryResult[F] =
Stateful(format, plateF, state, data, stages)
def stages[F[_]]: Lens[QueryResult[F], ScalarStages] =
Lens((_: QueryResult[F]).stages)(ss => {
case Parsed(q, d, _) => Parsed(q, d, ss)
case Typed(f, d, _) => Typed(f, d, ss)
case Stateful(f, p, s, d, _) => Stateful(f, p, s, d, ss)
})
}
sealed abstract class QueryResultInstances {
import QueryResult._
implicit def renderTree[F[_]]: RenderTree[QueryResult[F]] =
RenderTree make {
case Parsed(_, _, ss) =>
NonTerminal(List("Parsed"), none, List(ss.render))
case Typed(f, _, ss) =>
NonTerminal(List("Typed"), none, List(f.shows.render, ss.render))
case Stateful(f, _, _, _, ss) =>
NonTerminal(List("Stateful"), none, List(f.shows.render, ss.render))
}
implicit def show[F[_]]: Show[QueryResult[F]] =
RenderTree.toShow
}
| slamdata/quasar | connector/src/main/scala/quasar/connector/QueryResult.scala | Scala | apache-2.0 | 3,404 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// Example: Demonstrates a streaming job with periodically refreshing side input
// Usage:
// `sbt "scio-examples/runMain com.spotify.scio.examples.extra.RefreshingSideInputExample
// --project=[PROJECT] --runner=[RUNNER] --zone=[ZONE] --input=[PUBSUB_SUBSCRIPTION]"`
package com.spotify.scio.examples.extra
import com.spotify.scio._
import com.spotify.scio.pubsub._
import com.spotify.scio.values.WindowOptions
import org.apache.beam.sdk.io.GenerateSequence
import org.apache.beam.sdk.options.StreamingOptions
import org.apache.beam.sdk.transforms.windowing.Window.ClosingBehavior
import org.apache.beam.sdk.transforms.windowing.{AfterPane, Repeatedly}
import org.apache.beam.sdk.values.WindowingStrategy.AccumulationMode
import org.joda.time.{Duration, Instant}
import org.slf4j.LoggerFactory
import scala.util.{Random, Success, Try}
/**
* Streaming job with periodically updating side input, modeled as a basic lottery game. A side
* input holds a sequence of randomly generated numbers that are the current "winning" numbers, and
* refreshes every 10 seconds. Meanwhile, a Pub/Sub subscription reads in lottery tickets
* (represented as Strings) and checks if they match the winning numbers.
*/
object RefreshingSideInputExample {
case class LotteryTicket(numbers: Seq[Int])
case class LotteryResult(
eventTime: Instant,
processTime: Instant,
isWinner: Boolean,
ticket: Seq[Int],
winningNumbers: Seq[Int]
)
private lazy val logger = LoggerFactory.getLogger(this.getClass)
private val ticketSize = 5
def main(cmdlineArgs: Array[String]): Unit = {
val (sc, args) = ContextAndArgs(cmdlineArgs)
sc.optionsAs[StreamingOptions].setStreaming(true)
// An unbounded input that produces a sequence of 5 randomly generated winning lottery numbers,
// refreshed every 10 seconds. Materialized as a singleton `SideInput`.
val winningLotteryNumbers = sc
.customInput(
"winningLotteryNumbers",
GenerateSequence
.from(0)
.withRate(1, Duration.standardSeconds(10))
)
.withFixedWindows(
duration = Duration.standardSeconds(10),
offset = Duration.ZERO,
options = WindowOptions(
trigger = Repeatedly.forever(AfterPane.elementCountAtLeast(1)),
accumulationMode = AccumulationMode.DISCARDING_FIRED_PANES,
closingBehavior = ClosingBehavior.FIRE_IF_NON_EMPTY,
allowedLateness = Duration.standardSeconds(0)
)
)
.map(_ => Seq.fill(ticketSize)(Random.nextInt(100)))
// A default is needed in case an empty pane is fired
.asSingletonSideInput(Seq.fill(ticketSize)(-1))
// Sample PubSub topic modeling lottery tickets as a comma-separated list of numbers.
// For example, a message might contain the string "10,7,3,1,9"
sc.pubsubTopic[String](args("input"))
.flatMap(toLotteryTicket)
.withFixedWindows(Duration.standardSeconds(5))
.withTimestamp
.withSideInputs(winningLotteryNumbers)
.map { case ((lotteryTicket, eventTime), side) =>
val currentWinningNumbers = side(winningLotteryNumbers)
val isWinner = lotteryTicket.numbers == currentWinningNumbers
val result = LotteryResult(
eventTime,
Instant.now(),
isWinner,
lotteryTicket.numbers,
currentWinningNumbers
)
logger.info(s"Lottery result: $result")
} // Can save output to PubSub, BigQuery, etc.
sc.run()
()
}
private def toLotteryTicket(message: String): Option[LotteryTicket] =
Try(LotteryTicket(message.split(",").map(_.toInt))) match {
case Success(s) if s.numbers.size == ticketSize => Some(s)
case _ =>
logger.error(s"Malformed message: $message")
None
}
}
| spotify/scio | scio-examples/src/main/scala/com/spotify/scio/examples/extra/RefreshingSideInputExample.scala | Scala | apache-2.0 | 4,399 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence.testkit
import akka.actor.{ ActorRef, ActorSystem, Props, actorRef2Scala }
import akka.persistence.PersistentActor
import akka.testkit.{ ImplicitSender, TestKitBase }
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import org.scalatest.{ Matchers, WordSpecLike }
import scala.concurrent.duration._
object AbstractEmbeddedPersistentActorSpec {
final case class Cmd(data: String)
final case class Evt(data: String)
case object Get
final case class State(data: Vector[String] = Vector.empty) {
def apply(evt: Evt): State = {
copy(data :+ evt.data)
}
}
def props(persistenceId: String): Props =
Props(new Persistent(persistenceId))
class Persistent(override val persistenceId: String) extends PersistentActor {
var state = State()
override def receiveRecover = {
case evt: Evt => state = state(evt)
}
override def receiveCommand = {
case Cmd(data) =>
persist(Evt(data.toUpperCase)) { evt =>
state = state(evt)
}
case Get => sender() ! state
}
}
}
trait AbstractEmbeddedPersistentActorSpec { spec: ActorSystemSpec =>
import AbstractEmbeddedPersistentActorSpec._
"A persistent actor" must {
"store events in the embedded Cassandra journal" in within(15.seconds) {
val p = system.actorOf(props("p1"))
println(implicitly[ActorRef])
p ! Get
expectMsg(State())
p ! Cmd("a")
p ! Cmd("b")
p ! Cmd("c")
p ! Get
expectMsg(State(Vector("A", "B", "C")))
// start another with same persistenceId should recover state
val p2 = system.actorOf(props("p1"))
p2 ! Get
expectMsg(State(Vector("A", "B", "C")))
}
}
}
| edouardKaiser/lagom | persistence/scaladsl/src/test/scala/com/lightbend/lagom/scaladsl/persistence/testkit/AbstractEmbeddedPersistentActorSpec.scala | Scala | apache-2.0 | 1,885 |
package com.jh.board
import org.specs2.mutable._
class BoardSpec extends Specification {
"Board" should {
}
}
| huragan/chess_board | src/test/scala/com/jh/board/BoardSpec.scala | Scala | mit | 118 |
package dzufferey.utils
import scala.sys.process._
/** executing command as children process */
object SysCmd {
type ExecResult = (Int, String, String)
//TODO add an option for timeout
def apply(cmds: Array[String], input: Option[String], addToEnv: (String,String)*): ExecResult = {
val process = Process(cmds, None, addToEnv:_*)
val withInput = input match {
case Some(str) => process #< ( new java.io.ByteArrayInputStream(str.getBytes) )
case None => process
}
val bufferOut = new StringBuilder()
val bufferErr = new StringBuilder()
val processLogger =
ProcessLogger(
line => {bufferOut append line; bufferOut append "\\n"},
line => {bufferErr append line; bufferErr append "\\n"}
)
//Logger("Utils", Info, "Executing "+ cmds.mkString(""," ",""))
val exitCode = withInput ! processLogger
(exitCode, bufferOut.toString, bufferErr.toString)
}
def apply(cmds: Array[String], input: String, addToEnv: (String,String)*): ExecResult =
apply(cmds, Some(input), addToEnv: _*)
def apply(cmds: Array[String], addToEnv: (String,String)*): ExecResult =
apply(cmds, None, addToEnv: _*)
def execWithoutOutput(cmds: Array[String], input: Option[String], addToEnv: (String,String)*): Int = {
val process = Process(cmds, None, addToEnv:_*)
val withInput = input match {
case Some(str) => process #< ( new java.io.ByteArrayInputStream(str.getBytes) )
case None => process
}
//Logger("Utils", Info, "Executing "+ cmds.mkString(""," ",""))
withInput.!
}
def execRedirectToOutput(cmds: Array[String], input: Option[String], addToEnv: (String,String)*): Int = {
val process = Process(cmds, None, addToEnv:_*)
val withInput = input match {
case Some(str) => process #< ( new java.io.ByteArrayInputStream(str.getBytes) )
case None => process
}
val processLogger = ProcessLogger(
out => Console.println(out),
err => Console.err.println(err))
//Logger("Utils", Info, "Executing "+ cmds.mkString(""," ",""))
withInput ! processLogger
}
def execOutputAndLog(cmds: Array[String], input: Option[String], addToEnv: (String,String)*): ExecResult = {
val process = Process(cmds, None, addToEnv:_*)
val withInput = input match {
case Some(str) => process #< ( new java.io.ByteArrayInputStream(str.getBytes) )
case None => process
}
val bufferOut = new StringBuilder()
val bufferErr = new StringBuilder()
val processLogger =
ProcessLogger(
line => { Console.println(line); bufferOut append line; bufferOut append "\\n"},
line => { Console.err.println(line); bufferErr append line; bufferErr append "\\n"}
)
//Logger("Utils", Info, "Executing "+ cmds.mkString(""," ",""))
val exitCode = withInput ! processLogger
(exitCode, bufferOut.toString, bufferErr.toString)
}
}
| dzufferey/misc-scala-utils | src/main/scala/dzufferey/utils/SysCmd.scala | Scala | apache-2.0 | 2,907 |
class rankGraph(var NUM_TEAMS: Int) {
//include import statementsinside class or else they wont work
import org.apache.spark._
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
import scala.util.Random
import scala.collection.mutable.ArrayBuffer
import breeze.linalg._
import breeze.numerics._ //numerical processing library; helps to solve linear system
import breeze.math._
import org.apache.spark.mllib.linalg.{Vectors,Vector,DenseVector}
import org.apache.spark.mllib.linalg.{Matrix,Matrices,DenseMatrix}
import org.apache.spark.rdd.RDD._
import org.apache.spark.{SparkConf,SparkContext}
private val numTeams = NUM_TEAMS
//define getter method for reference var
def getNumTeams(): Int = {
return this.numTeams }
//create rand matrix using reference var numTeams (dimension of matrix);
//will be skew symmetric
def createRandMatrix(numTeams:Int): Array[Array[Int]] = {
var graph = GraphGenerators.logNormalGraph(sc,teams).mapVertices((id,_) => id.toFloat)
/*
val matrix = Array.ofDim[Int](numTeams,numTeams)
var r = scala.util.Random //use this r variable to create random score differences
//in the numTeamXnumTeam matrix
for (i <- 0 to numTeams) {
for (j <- 0 to numTeams) {
if (i == j)
matrix(i)(j) = 0
}
}
for (i <- 0 to numTeams) {
for (j <- (i + 1) to numTeams) {
matrix(i)(j) = r.nextInt(25) //max score difference is 25
}
}
for (i <- 0 to numTeams) {
for (j <- 0 to i) {
matrix(i)(j) = -matrix(j)(i) // lower triangular portion is negative
}
}
*/
return matrix // returns random skew symmetric matrix with 0s on diagonal; score differences
}
//define method to build graph using built matrix
//numVertices = numTeams;
def buildGraph(matrix: Array[Array[Int]]): Graph[String,Int] = {
//create array of (numTeams) vertices
var vertexArray = Array.ofDim[(Long,String)](this.numTeams)
for ( i <- 0 to this.numTeams)
{ vertexArray(i) = (i.toLong, ("team " + i))} //array of (long, string)s
var vertexRDD: RDD[(Long, String)] = sc.parallelize(vertexArray)
//use arraybuffer to enable appending to edgeArray
var edgeArray = ArrayBuffer[Edge[Int]]()
for (i <-0 to this.numTeams -1) {
for (j <- i + 1 to this.numTeams) {
//index the vertex array to link vertices with edges
edgeArray += Edge(vertexArray(i)._1,vertexArray(j)._1,matrix(i)(j))
}
}
//now create edges corresponding to lower half of skew symmetric matrix
for (i <- 0 to this.numTeams) {
for (j <- 0 to i) {
edgeArray += Edge(vertexArray(i)._1,vertexArray(j)._1,-matrix(j)(i))
}
}
var edgeRDD: RDD[Edge[Int]] = sc.parallelize(edgeArray)
var graph: Graph[String,Int] = Graph(vertexRDD,edgeRDD)
return graph
}
def printWins(graph: Graph[String,Int]): Unit = {
graph.triplets.foreach(println(s"${triplet.srcAttr} beat ${triplet.dstAttr}"))
}
def Colley(graph: Graph[ ,]): Graph[ , ] = {
val totals: RDD[(Long, Double)] = graph.aggregateMessages[Double](
triplet => { // Map Functon
//
triplet.sendToDst(triplet.attr)
triplet.sendToSrc(triplet.attr)
},
// Merge Function
(a,b) => a+b,
// Optimization preference
TripletFields.EdgeOnly
)
val wins: RDD[(Long, Double)] = graph.aggregateMessages[Double](
triplet => { // Map Function
//
triplet.sendToSrc(triplet.attr)
},
// Merge Function
(a,b) => a+b,
// Optimization preference
TripletFields.EdgeOnly
)
// saves totals, wins, losses
// Join the two lists, preserving all ID's
// leftOuterJoin leaves weird Some(value) and None Data types, convert to value and 0
val list = totals.leftOuterJoin(wins).mapValues( vertexAttr => vertexAttr._2 match{
// Save as (totals, wins, losses)
case Some(win) => (vertexAttr._1, win.toDouble, vertexAttr._1-win)
case None => (vertexAttr._1, 0.0, vertexAttr._1)
}
)
// infoGraph is constructed to hold the game number totals, wins, losses
val infoGraph: Graph[(String, (Double,Double,Double)), Int] = graph.outerJoinVertices(list)((vid, oldAttr, degOpt) => (oldAttr,degOpt.getOrElse(0,0,0)))
// Using info from infoGraph, rankGraph is initialized with the inital rank (1+Nw)/(2+Ntotal)
var rankGraph: Graph[(String, Double), Int] = infoGraph.mapVertices{case (id,(team, (totals, wins, losses))) => (team, (1 + wins)/(2 + totals))}
// Using info from infoGraph, templateGraph holds the two parts of the combined iterative equation - the numerator (1+ (Nw,i - Nl,i)/2){ Note: or entry bi in Cr = b} and the denominator 2 + Ntotal. These will be combined with an updating ranklist each iteration.
val templateGraph: Graph[(String, Double, Double), Int] = infoGraph.mapVertices{case (id,(team, (totals, wins, losses))) => (team,(1+(wins-losses)/2), 2 + totals)}
// Differences of ranking must vary less than epsilon to indicate convergence
val epsilon = 1E-4
// initialize the variables holding the previous rankGraph to compare ranking deviations (prevRankGraph) and the one holding the max ranking deviation to compare to epsilon (rankDiffMax)
var prevRankGraph: Graph[(String,Double),Int] = null
var rankDiffMax = 1.0
while (rankDiffMax > epsilon){
//Check to see if collect neighbors works better
val rankUpdate = rankGraph.aggregateMessages[Double](
triplet =>{
triplet.sendToDst(triplet.attr*triplet.srcAttr._2)
triplet.sendToSrc(triplet.attr*triplet.dstAttr._2)
},
(a,b) => a + b,
TripletFields.All
)
prevRankGraph = rankGraph
rankGraph = templateGraph.outerJoinVertices(rankUpdate)((vid, oldAttr, degOpt) => (oldAttr._1, (oldAttr._2 + degOpt.getOrElse(0).asInstanceOf[Double])/oldAttr._3))
rankDiffMax = rankGraph.vertices.leftOuterJoin(prevRankGraph.vertices).map( vertexAttr => vertexAttr._2._2 match {
case Some(rank) => (rank._2 - vertexAttr._2._1._2).abs
case None => 0
}).max
}
}
/*def main(): Unit = {
val masseyExp1 = new masseyGraph(10)
val winLossData = masseyExp1.createRandMatrix()
for (i <- 0 to masseyExp1.getNumTeams()){
for (j <-0 to masseyExp1.getNumTeams()){
print(winLossData(i)(j) + "\\t")
}
}
val graphStuff = masseyExp1.buildGraph(winLossData)
masseyExp1.printWins(graphStuff)
}
*/
def main(): Unit = {
val masseyExp1 = new rankGraph(25)
val numTeams = masseyExp1.getNumTeams()
var winLossData = masseyExp1.createRandMatrix(numTeams)
for (i <- 0 to masseyExp1.getNumTeams()){
for (j <-0 to masseyExp1.getNumTeams()){
print(winLossData(i)(j) + "\\t")
}
}
val graphStuff = masseyExp1.buildGraph(winLossData)
masseyExp1.printWins(graphStuff)}
main()
}
| IGARDS/rankability | SCALA/masseyGraph.scala | Scala | mit | 6,925 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Time, StreamingContext}
import scala.reflect.ClassTag
/**
* An input stream that always returns the same RDD on each timestep. Useful for testing.
*/
class ConstantInputDStream[T: ClassTag](ssc_ : StreamingContext, rdd: RDD[T])
extends InputDStream[T](ssc_) {
override def start() {}
override def stop() {}
override def compute(validTime: Time): Option[RDD[T]] = {
Some(rdd)
}
}
| dotunolafunmiloye/spark | streaming/src/main/scala/org/apache/spark/streaming/dstream/ConstantInputDStream.scala | Scala | apache-2.0 | 1,312 |
package blended.updater.config
import org.scalatest.FreeSpec
import scala.util.{Failure, Success}
class MvnUrlTest extends FreeSpec {
"toUrl" - {
def ok(mvn: MvnGav, url: String) = {
s"should process ${mvn}" in {
assert(mvn.toUrl("") === url)
assert(mvn.toUrl("http://org.example/repo1") === s"http://org.example/repo1/${url}")
assert(mvn.toUrl("http://org.example/repo1/") === s"http://org.example/repo1/${url}")
}
}
ok(MvnGav("g", "a", "1"), "g/a/1/a-1.jar")
ok(MvnGav("a.b.c", "d.e.f", "1"), "a/b/c/d.e.f/1/d.e.f-1.jar")
ok(MvnGav("a.b.c", "d.e.f", "1", fileExt = "zip"), "a/b/c/d.e.f/1/d.e.f-1.zip")
ok(MvnGav("a.b.c", "d.e.f", "1", Some("test")), "a/b/c/d.e.f/1/d.e.f-1-test.jar")
ok(MvnGav("a.b.c", "d.e.f", "1", Some("container"), "zip"), "a/b/c/d.e.f/1/d.e.f-1-container.zip")
ok(MvnGav("a.b.c", "d.e.f", "1", None, "war"), "a/b/c/d.e.f/1/d.e.f-1.war")
ok(MvnGav("a.b.c", "d.e.f", "1", Some("jar"), "jar"), "a/b/c/d.e.f/1/d.e.f-1.jar")
}
"parse" - {
def ok(gav: String, mvn: MvnGav) = {
s"should parse ${gav} to ${mvn}" in {
assert(MvnGav.parse(gav) === Success(mvn))
}
}
def notOk(gav: String) = {
s"should not parse ${gav}" in {
assert(MvnGav.parse(gav).isInstanceOf[Failure[_]])
}
}
ok("g:a:1", MvnGav("g", "a", "1", None, "jar"))
ok("g:a:pom:1", MvnGav("g", "a", "1", Some("pom"), "pom"))
ok("g:a:pom:1:pom", MvnGav("g", "a", "1", Some("pom"), "pom"))
ok("g:a:jdk16:1", MvnGav("g", "a", "1", Some("jdk16"), "jar"))
ok("g:a::1:war", MvnGav("g", "a", "1", None, "war"))
ok("g:a:jar:1", MvnGav("g", "a", "1", None, "jar"))
ok("io.hawt:hawtio-osgi-jmx::1.4.51:jar", MvnGav("io.hawt", "hawtio-osgi-jmx", "1.4.51", None, "jar"))
ok("g:a::1:zip", MvnGav("g", "a", "1", None, "zip"))
notOk("g:a")
notOk("a")
notOk("g:a:1:")
notOk("a:b:c:d:e:")
}
}
| lefou/blended | blended.updater.config/jvm/src/test/scala/blended/updater/config/MvnUrlTest.scala | Scala | apache-2.0 | 1,948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.neoremind.kraps
class RpcException(message: String, cause: Throwable)
extends Exception(message, cause) {
def this(message: String) = this(message, null)
}
| neoremind/kraps-rpc | kraps-core/src/main/scala/com/neoremind/kraps/RpcException.scala | Scala | apache-2.0 | 977 |
/*
* Copyright (C) 2017 Michael Dippery <michael@monkey-robot.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mipadi.jupiter.net
/** Extends `java.net.URI` with useful methods and a more Scala-like API.
*
* `RichURI` is especially useful for implicit conversions to
* more specific types of URIs. For example, it could be used to create
* a specialized `RichMongoURI` implicit:
*
* {{{
* import com.mipadi.jupiter.net.RichURI
* implicit class RichMongoURI(uri: URI)(implicit ev: Addressable[URI])
* extends ConvertibleURI {
* lazy val database: Option[String] = uri.pathComponents.lift(1)
* lazy val collection: Option[String] = uri.pathComponents.lift(2)
* }
* }}}
*
* @constructor
* Creates a new `RichURI`. The `uri` parameter is the wrapped URI-like
* object. Also takes an implicit `Addressable[T]` parameter, to which
* `RichURI` delegates many of its URI-like operations. `Addressable`
* implicits for both `java.net.URI` and `java.net.URL` can be imported
* from the `[[com.mipadi.jupiter.net.Addressable Addressable]]` object:
*
* {{{
* import com.mipadi.jupiter.net.Addressable._
* }}}
*
* @param uri
* The wrapped URI-like object
* @param ev
* The delegate responsible for getting data from the URI-like object
*/
class RichURI[T](uri: T)(implicit ev: Addressable[T]) {
/** The URI's protocol. */
lazy val protocol: String = ev.getScheme(uri)
/** The URI's host. */
lazy val host: String = ev.getHost(uri)
/** The URI's path. */
lazy val path: String = ev.getPath(uri)
/** The URI's port.
*
* If a port is not specified, `None` is returned instead.
*/
lazy val port: Option[Int] = ev.getPort(uri) match {
case -1 => None
case p => Some(p)
}
/** Each part of the URI's path. */
lazy val pathComponents: Seq[String] = path match {
case "/" => Array("/").toIndexedSeq
case p => Array("/").toIndexedSeq ++ p.split("/").tail
}
}
/** Additional operations for `java.net.URI` and `java.net.URL` classes.
*
* In particular, `RichURI` provides an implicit conversion to
* `[[com.mipadi.jupiter.net.RichURI.ExtendedURI ExtendedURI]]`, which allows
* additional methods to be called on `java.net.URI` objects:
*
* {{{
* import java.net.URI
* import com.mipadi.jupiter.net.RichURI._
* val uri = new URI("http://monkey-robot.com/archives")
* val parts = uri.pathComponents
* }}}
*/
object RichURI {
/** Implicit converts a instances of `java.net.URI` and `java.net.URL`,
* adding additional methods.
*
* For example:
*
* {{{
* import java.net.URI
* import com.mipadi.jupiter.net.RichURI._
* val uri = new URI("http://monkey-robot.com/archives")
* val parts = uri.pathComponents
* }}}
*
* @param uri
* The wrapped URI
* @param ev
* The addressable delegate
*/
implicit class ExtendedURI[T](uri: T)(implicit ev: Addressable[T])
extends RichURI[T](uri)
}
| mdippery/jupiter | src/main/scala/com/mipadi/jupiter/net/RichURI.scala | Scala | apache-2.0 | 3,517 |
package se.culvertsoft.mgen.visualdesigner.view
import java.awt.Color
import java.awt.Graphics2D
import java.awt.Rectangle
import java.awt.event.MouseAdapter
import java.awt.event.MouseEvent
import se.culvertsoft.mgen.visualdesigner.control.MouseSelectionBoxAction
import se.culvertsoft.mgen.visualdesigner.control.UiPos
import se.culvertsoft.mgen.visualdesigner.util.RichMouseEvent.RichMouseEventOps
object SelectionBoxable {
val BOX_COLOR = Color.DARK_GRAY
}
trait SelectionBoxable
extends AbstractView
with BackGrounded {
import Graphics2DOps._
override def drawInnerPanelComponentAfterChildren(g: Graphics2D) {
super.drawInnerPanelComponentAfterChildren(g)
controller.mouseInputMgr.mouseDragAction() match {
case action: MouseSelectionBoxAction if (action.containerEntity eq entity) =>
val pOld = action.initPos.onComponent
val pNew = UiPos.getCompCoordFromScreen(controller.mouseInputMgr.mousePos().onScreen, action.initPos.component)
val x0 = math.min(pOld.x, pNew.x)
val y0 = math.min(pOld.y, pNew.y)
val x1 = math.max(pOld.x, pNew.x)
val y1 = math.max(pOld.y, pNew.y)
val w = x1 - x0
val h = y1 - y0
val selectionBox = new Rectangle(x0, y0, w, h)
g.drawRect(x0, y0, w, h)
case _ =>
}
}
val selectionBoxableListener = new MouseAdapter {
override def mouseDragged(e: MouseEvent) {
if (e.isLeftBtn) {
controller.mouseInputMgr.startMouseSelectionBoxAction(e, entity)
}
}
}
innerPanel.addMouseMotionListener(selectionBoxableListener)
override def drawBackground(g: Graphics2D) {
super.drawBackground(g)
}
}
| culvertsoft/mgen-visualdesigner | src/main/scala/se/culvertsoft/mgen/visualdesigner/view/SelectionBoxable.scala | Scala | gpl-2.0 | 1,685 |
import scala.util.{Try, Success, Failure}
object Test extends App {
def readTextFile(filename: String): Try[List[String]] = {
Try(io.Source.fromFile(filename).getLines.toList)
}
val filename = "src/main/scala/MyData.txt"
readTextFile(filename) match {
case Success(lines) => lines.foreach(println)
case Failure(f) => println(f)
}
def toInt(s: String): Option[Int] = {
try {
Some(Integer.parseInt(s.trim))
} catch {
case e: Exception => None
}
}
val d ="123s"
toInt(d) match {
case Some(i) => println(i)
case None => println(0);
}
} | JayaprakashReddy/hello-akka | src/main/scala/Test.scala | Scala | cc0-1.0 | 583 |
/**
* Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package djinni
import djinni.ast.Record.DerivingType
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
import djinni.writer.IndentWriter
import scala.collection.mutable
class JavaGenerator(spec: Spec) extends Generator(spec) {
val javaAnnotationHeader = spec.javaAnnotation.map(pkg => '@' + pkg.split("\\\\.").last)
val javaNullableAnnotation = spec.javaNullableAnnotation.map(pkg => '@' + pkg.split("\\\\.").last)
val javaNonnullAnnotation = spec.javaNonnullAnnotation.map(pkg => '@' + pkg.split("\\\\.").last)
val marshal = new JavaMarshal(spec)
class JavaRefs() {
var java = mutable.TreeSet[String]()
spec.javaAnnotation.foreach(pkg => java.add(pkg))
spec.javaNullableAnnotation.foreach(pkg => java.add(pkg))
spec.javaNonnullAnnotation.foreach(pkg => java.add(pkg))
def find(ty: TypeRef) { find(ty.resolved) }
def find(tm: MExpr) {
tm.args.foreach(find)
find(tm.base)
}
def find(m: Meta) = for(r <- marshal.references(m)) r match {
case ImportRef(arg) => java.add(arg)
case _ =>
}
}
def writeJavaFile(ident: String, origin: String, refs: Iterable[String], f: IndentWriter => Unit) {
createFile(spec.javaOutFolder.get, idJava.ty(ident) + ".java", (w: IndentWriter) => {
w.wl("// AUTOGENERATED FILE - DO NOT MODIFY!")
w.wl("// This file generated by Djinni from " + origin)
w.wl
spec.javaPackage.foreach(s => w.wl(s"package $s;").wl)
if (refs.nonEmpty) {
refs.foreach(s => w.wl(s"import $s;"))
w.wl
}
f(w)
})
}
def generateJavaConstants(w: IndentWriter, consts: Seq[Const]) = {
def writeJavaConst(w: IndentWriter, ty: TypeRef, v: Any): Unit = v match {
case l: Long if marshal.fieldType(ty).equalsIgnoreCase("long") => w.w(l.toString + "l")
case l: Long => w.w(l.toString)
case d: Double if marshal.fieldType(ty).equalsIgnoreCase("float") => w.w(d.toString + "f")
case d: Double => w.w(d.toString)
case b: Boolean => w.w(if (b) "true" else "false")
case s: String => w.w(s)
case e: EnumValue => w.w(s"${marshal.typename(ty)}.${idJava.enum(e)}")
case v: ConstRef => w.w(idJava.const(v))
case z: Map[_, _] => { // Value is record
val recordMdef = ty.resolved.base.asInstanceOf[MDef]
val record = recordMdef.body.asInstanceOf[Record]
val vMap = z.asInstanceOf[Map[String, Any]]
w.wl(s"new ${marshal.typename(ty)}(")
w.increase()
// Use exact sequence
val skipFirst = SkipFirst()
for (f <- record.fields) {
skipFirst {w.wl(",")}
writeJavaConst(w, f.ty, vMap.apply(f.ident.name))
w.w(" /* " + idJava.field(f.ident) + " */ ")
}
w.w(")")
w.decrease()
}
}
for (c <- consts) {
writeDoc(w, c.doc)
javaAnnotationHeader.foreach(w.wl)
marshal.nullityAnnotation(c.ty).foreach(w.wl)
w.w(s"public static final ${marshal.fieldType(c.ty)} ${idJava.const(c.ident)} = ")
writeJavaConst(w, c.ty, c.value)
w.wl(";")
w.wl
}
}
override def generateEnum(origin: String, ident: Ident, doc: Doc, e: Enum) {
val refs = new JavaRefs()
writeJavaFile(ident, origin, refs.java, w => {
writeDoc(w, doc)
javaAnnotationHeader.foreach(w.wl)
w.w(s"public enum ${marshal.typename(ident, e)}").braced {
for (o <- normalEnumOptions(e)) {
writeDoc(w, o.doc)
w.wl(idJava.enum(o.ident) + ",")
}
w.wl(";")
}
})
}
override def generateInterface(origin: String, ident: Ident, doc: Doc, typeParams: Seq[TypeParam], i: Interface) {
val refs = new JavaRefs()
i.methods.map(m => {
m.params.map(p => refs.find(p.ty))
m.ret.foreach(refs.find)
})
i.consts.map(c => {
refs.find(c.ty)
})
if (i.ext.cpp) {
refs.java.add("java.util.concurrent.atomic.AtomicBoolean")
}
writeJavaFile(ident, origin, refs.java, w => {
val javaClass = marshal.typename(ident, i)
val typeParamList = javaTypeParams(typeParams)
writeDoc(w, doc)
javaAnnotationHeader.foreach(w.wl)
w.w(s"public abstract class $javaClass$typeParamList").braced {
val skipFirst = SkipFirst()
generateJavaConstants(w, i.consts)
val throwException = spec.javaCppException.fold("")(" throws " + _)
for (m <- i.methods if !m.static) {
skipFirst { w.wl }
writeDoc(w, m.doc)
val ret = marshal.returnType(m.ret)
val params = m.params.map(p => {
val nullityAnnotation = marshal.nullityAnnotation(p.ty).map(_ + " ").getOrElse("")
nullityAnnotation + marshal.paramType(p.ty) + " " + idJava.local(p.ident)
})
marshal.nullityAnnotation(m.ret).foreach(w.wl)
w.wl("public abstract " + ret + " " + idJava.method(m.ident) + params.mkString("(", ", ", ")") + throwException + ";")
}
for (m <- i.methods if m.static) {
skipFirst { w.wl }
writeDoc(w, m.doc)
val ret = marshal.returnType(m.ret)
val params = m.params.map(p => {
val nullityAnnotation = marshal.nullityAnnotation(p.ty).map(_ + " ").getOrElse("")
nullityAnnotation + marshal.paramType(p.ty) + " " + idJava.local(p.ident)
})
marshal.nullityAnnotation(m.ret).foreach(w.wl)
w.wl("public static native "+ ret + " " + idJava.method(m.ident) + params.mkString("(", ", ", ")") + ";")
}
if (i.ext.cpp) {
w.wl
javaAnnotationHeader.foreach(w.wl)
w.wl(s"private static final class CppProxy$typeParamList extends $javaClass$typeParamList").braced {
w.wl("private final long nativeRef;")
w.wl("private final AtomicBoolean destroyed = new AtomicBoolean(false);")
w.wl
w.wl(s"private CppProxy(long nativeRef)").braced {
w.wl("if (nativeRef == 0) throw new RuntimeException(\\"nativeRef is zero\\");")
w.wl(s"this.nativeRef = nativeRef;")
}
w.wl
w.wl("private native void nativeDestroy(long nativeRef);")
w.wl("public void destroy()").braced {
w.wl("boolean destroyed = this.destroyed.getAndSet(true);")
w.wl("if (!destroyed) nativeDestroy(this.nativeRef);")
}
w.wl("protected void finalize() throws java.lang.Throwable").braced {
w.wl("destroy();")
w.wl("super.finalize();")
}
for (m <- i.methods if !m.static) { // Static methods not in CppProxy
val ret = marshal.returnType(m.ret)
val returnStmt = m.ret.fold("")(_ => "return ")
val params = m.params.map(p => marshal.paramType(p.ty) + " " + idJava.local(p.ident)).mkString(", ")
val args = m.params.map(p => idJava.local(p.ident)).mkString(", ")
val meth = idJava.method(m.ident)
w.wl
w.wl(s"@Override")
w.wl(s"public $ret $meth($params)$throwException").braced {
w.wl("assert !this.destroyed.get() : \\"trying to use a destroyed object\\";")
w.wl(s"${returnStmt}native_$meth(this.nativeRef${preComma(args)});")
}
w.wl(s"private native $ret native_$meth(long _nativeRef${preComma(params)});")
}
}
}
}
})
}
override def generateRecord(origin: String, ident: Ident, doc: Doc, params: Seq[TypeParam], r: Record) {
val refs = new JavaRefs()
r.fields.foreach(f => refs.find(f.ty))
val (javaName, javaFinal) = if (r.ext.java) (ident.name + "_base", "") else (ident.name, " final")
writeJavaFile(javaName, origin, refs.java, w => {
writeDoc(w, doc)
javaAnnotationHeader.foreach(w.wl)
val self = marshal.typename(javaName, r)
val comparableFlag =
if (r.derivingTypes.contains(DerivingType.Ord)) {
s" implements Comparable<$self>"
} else {
""
}
w.w(s"public$javaFinal class ${self + javaTypeParams(params)}$comparableFlag").braced {
w.wl
generateJavaConstants(w, r.consts)
// Field definitions.
for (f <- r.fields) {
w.wl
w.wl(s"/*package*/ final ${marshal.fieldType(f.ty)} ${idJava.field(f.ident)};")
}
// Constructor.
w.wl
w.wl(s"public $self(").nestedN(2) {
val skipFirst = SkipFirst()
for (f <- r.fields) {
skipFirst { w.wl(",") }
marshal.nullityAnnotation(f.ty).map(annotation => w.w(annotation + " "))
w.w(marshal.paramType(f.ty) + " " + idJava.local(f.ident))
}
w.wl(") {")
}
w.nested {
for (f <- r.fields) {
w.wl(s"this.${idJava.field(f.ident)} = ${idJava.local(f.ident)};")
}
}
w.wl("}")
// Accessors
for (f <- r.fields) {
w.wl
writeDoc(w, f.doc)
marshal.nullityAnnotation(f.ty).foreach(w.wl)
w.w("public " + marshal.returnType(Some(f.ty)) + " " + idJava.method("get_" + f.ident.name) + "()").braced {
w.wl("return " + idJava.field(f.ident) + ";")
}
}
if (r.derivingTypes.contains(DerivingType.Eq)) {
w.wl
w.wl("@Override")
val nullableAnnotation = javaNullableAnnotation.map(_ + " ").getOrElse("")
w.w(s"public boolean equals(${nullableAnnotation}Object obj)").braced {
w.w(s"if (!(obj instanceof $self))").braced {
w.wl("return false;")
}
w.wl(s"$self other = ($self) obj;")
w.w(s"return ").nestedN(2) {
val skipFirst = SkipFirst()
for (f <- r.fields) {
skipFirst { w.wl(" &&") }
f.ty.resolved.base match {
case MBinary => w.w(s"java.util.Arrays.equals(${idJava.field(f.ident)}, other.${idJava.field(f.ident)})")
case MList | MSet | MMap => w.w(s"this.${idJava.field(f.ident)}.equals(other.${idJava.field(f.ident)})")
case MOptional =>
w.w(s"((this.${idJava.field(f.ident)} == null && other.${idJava.field(f.ident)} == null) || ")
w.w(s"(this.${idJava.field(f.ident)} != null && this.${idJava.field(f.ident)}.equals(other.${idJava.field(f.ident)})))")
case MString => w.w(s"this.${idJava.field(f.ident)}.equals(other.${idJava.field(f.ident)})")
case t: MPrimitive => w.w(s"this.${idJava.field(f.ident)} == other.${idJava.field(f.ident)}")
case df: MDef => df.defType match {
case DRecord => w.w(s"this.${idJava.field(f.ident)}.equals(other.${idJava.field(f.ident)})")
case DEnum => w.w(s"this.${idJava.field(f.ident)} == other.${idJava.field(f.ident)}")
case _ => throw new AssertionError("Unreachable")
}
case e: MExtern => e.defType match {
case DRecord => if(e.java.reference) {
w.w(s"this.${idJava.field(f.ident)}.equals(other.${idJava.field(f.ident)})")
} else {
w.w(s"this.${idJava.field(f.ident)} == other.${idJava.field(f.ident)}")
}
case DEnum => w.w(s"this.${idJava.field(f.ident)} == other.${idJava.field(f.ident)}")
case _ => throw new AssertionError("Unreachable")
}
case _ => throw new AssertionError("Unreachable")
}
}
}
w.wl(";")
}
// Also generate a hashCode function, since you shouldn't override one without the other.
// This hashcode implementation is based off of the apache commons-lang implementation of
// HashCodeBuilder (excluding support for Java arrays) which is in turn based off of the
// the recommendataions made in Effective Java.
w.wl
w.wl("@Override")
w.w("public int hashCode()").braced {
w.wl("// Pick an arbitrary non-zero starting value")
w.wl("int hashCode = 17;")
// Also pick an arbitrary prime to use as the multiplier.
val multiplier = "31"
for (f <- r.fields) {
val fieldHashCode = f.ty.resolved.base match {
case MBinary => s"java.util.Arrays.hashCode(${idJava.field(f.ident)})"
case MList | MSet | MMap | MString | MDate => s"${idJava.field(f.ident)}.hashCode()"
// Need to repeat this case for MDef
case df: MDef => s"${idJava.field(f.ident)}.hashCode()"
case MOptional => s"(${idJava.field(f.ident)} == null ? 0 : ${idJava.field(f.ident)}.hashCode())"
case t: MPrimitive => t.jName match {
case "byte" | "short" | "int" => idJava.field(f.ident)
case "long" => s"((int) (${idJava.field(f.ident)} ^ (${idJava.field(f.ident)} >>> 32)))"
case "float" => s"Float.floatToIntBits(${idJava.field(f.ident)})"
case "double" => s"((int) (Double.doubleToLongBits(${idJava.field(f.ident)}) ^ (Double.doubleToLongBits(${idJava.field(f.ident)}) >>> 32)))"
case "boolean" => s"(${idJava.field(f.ident)} ? 1 : 0)"
case _ => throw new AssertionError("Unreachable")
}
case e: MExtern => e.defType match {
case DRecord => "(" + e.java.hash.format(idJava.field(f.ident)) + ")"
case DEnum => s"${idJava.field(f.ident)}.hashCode()"
case _ => throw new AssertionError("Unreachable")
}
case _ => throw new AssertionError("Unreachable")
}
w.wl(s"hashCode = hashCode * $multiplier + $fieldHashCode;")
}
w.wl(s"return hashCode;")
}
}
w.wl
w.wl("@Override")
w.w("public String toString()").braced {
w.w(s"return ").nestedN(2) {
w.wl(s""""${self}{" +""")
for (i <- 0 to r.fields.length-1) {
val name = idJava.field(r.fields(i).ident)
val comma = if (i > 0) """"," + """ else ""
w.wl(s"""${comma}"${name}=" + ${name} +""")
}
}
w.wl(s""""}";""")
}
w.wl
if (r.derivingTypes.contains(DerivingType.Ord)) {
def primitiveCompare(ident: Ident) {
w.wl(s"if (this.${idJava.field(ident)} < other.${idJava.field(ident)}) {").nested {
w.wl(s"tempResult = -1;")
}
w.wl(s"} else if (this.${idJava.field(ident)} > other.${idJava.field(ident)}) {").nested {
w.wl(s"tempResult = 1;")
}
w.wl(s"} else {").nested {
w.wl(s"tempResult = 0;")
}
w.wl("}")
}
w.wl
w.wl("@Override")
val nonnullAnnotation = javaNonnullAnnotation.map(_ + " ").getOrElse("")
w.w(s"public int compareTo($nonnullAnnotation$self other) ").braced {
w.wl("int tempResult;")
for (f <- r.fields) {
f.ty.resolved.base match {
case MString => w.wl(s"tempResult = this.${idJava.field(f.ident)}.compareTo(other.${idJava.field(f.ident)});")
case t: MPrimitive => primitiveCompare(f.ident)
case df: MDef => df.defType match {
case DRecord => w.wl(s"tempResult = this.${idJava.field(f.ident)}.compareTo(other.${idJava.field(f.ident)});")
case DEnum => w.w(s"tempResult = this.${idJava.field(f.ident)}.compareTo(other.${idJava.field(f.ident)});")
case _ => throw new AssertionError("Unreachable")
}
case e: MExtern => e.defType match {
case DRecord => if(e.java.reference) w.wl(s"tempResult = this.${idJava.field(f.ident)}.compareTo(other.${idJava.field(f.ident)});") else primitiveCompare(f.ident)
case DEnum => w.w(s"tempResult = this.${idJava.field(f.ident)}.compareTo(other.${idJava.field(f.ident)});")
case _ => throw new AssertionError("Unreachable")
}
case _ => throw new AssertionError("Unreachable")
}
w.w("if (tempResult != 0)").braced {
w.wl("return tempResult;")
}
}
w.wl("return 0;")
}
}
}
})
}
def javaTypeParams(params: Seq[TypeParam]): String =
if (params.isEmpty) "" else params.map(p => idJava.typeParam(p.ident)).mkString("<", ", ", ">")
}
| mknejp/djinni | src/source/JavaGenerator.scala | Scala | apache-2.0 | 17,551 |
/**
* Copyright (c) 2007-2011 Eric Torreborre <etorreborre@yahoo.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.specification
import org.specs._
import org.specs.execute._
import org.specs.runner._
import org.specs.util._
class exampleSpec extends SpecificationWithJUnit {
"An example" should {
"know if it has subexamples without executing them" in {
object s extends Specification {
"e" in {
"e1" in {
"e2" in { 1.isExpectation }
}
}
}
s.examples(0) aka "first example" must beTrue ^^ { e => e.hasSubExamples }
s.examples(0).examples(0) aka "first nested example" must beTrue ^^ { e => e.hasSubExamples }
}
"not be executed if not asked for results" in {
ex.hasBeenExecuted must beFalse
}
"be executed if asked for results" in {
ex.failures
ex.hasBeenExecuted must beTrue
}
}
"An example" can {
"be resetted for execution" in {
ex.resetExample
ex.hasBeenExecuted must beFalse
ex.failures
ex.hasBeenExecuted must beTrue
}
}
"An example" should {
"throw a SkippedException with a PENDING message if it has a body with no expectations" in {
object s extends Specification {
shareVariables()
"this is a pending example" in {}
}
s.skipped must_== List(new SkippedException("PENDING: not yet implemented"))
}
"not throw a SkippedException with a PENDING message if it has a body with no expectations and the configuration" +
"has examplesWithoutExpectationsMustBePending=false" in {
Configuration.config = new Configuration { override val examplesWithoutExpectationsMustBePending = false }
object s extends Specification {
shareVariables()
"this is a pending example" in {}
}
s.skipped must be empty
}
}
}
object ex extends Specification {
shareVariables()
var hasBeenExecuted = false
var subexample: Example = null
val testExample = "ex" in {
hasBeenExecuted = true
}
override def failures = testExample.failures.toList
def resetExample = { hasBeenExecuted = false; testExample.resetForExecution }
}
| yyuu/specs | src/test/scala/org/specs/specification/exampleSpec.scala | Scala | mit | 3,477 |
package org.piro84.performance.petclinic.request
import scala.util.Random
/**
* Contains custom utils for performing requests to the petclinic.
*/
object RequestUtils {
val random = (list: Seq[String]) => {
list(Random.nextInt(list.size))
}
} | ermanno-pirotta/gatling-spring-petclinic | src/org/piro84/performance/petclinic/request/RequestUtils.scala | Scala | apache-2.0 | 254 |
package im.actor.server.util
import java.nio.ByteBuffer
import java.security.MessageDigest
import scala.concurrent.forkjoin.ThreadLocalRandom
import akka.actor.ActorSystem
import org.apache.commons.codec.digest.DigestUtils
import im.actor.server.models
object ACLUtils {
def secretKey()(implicit s: ActorSystem) =
s.settings.config.getString("secret")
def hash(s: String): Long =
ByteBuffer.wrap(MessageDigest.getInstance("MD5").digest(s.getBytes)).getLong
def userAccessHash(authId: Long, userId: Int, accessSalt: String)(implicit s: ActorSystem): Long =
hash(s"$authId:$userId:$accessSalt:${secretKey()}")
def userAccessHash(authId: Long, u: models.User)(implicit s: ActorSystem): Long =
userAccessHash(authId, u.id, u.accessSalt)
def phoneAccessHash(authId: Long, userId: Int, phoneId: Int, accessSalt: String)(implicit s: ActorSystem): Long =
hash(s"$authId:$userId:$phoneId:$accessSalt:${secretKey()}")
def phoneAccessHash(authId: Long, p: models.UserPhone)(implicit s: ActorSystem): Long =
phoneAccessHash(authId, p.userId, p.id, p.accessSalt)
def emailAccessHash(authId: Long, userId: Int, emailId: Int, accessSalt: String)(implicit s: ActorSystem): Long =
hash(s"$authId:$userId:$emailId:$accessSalt:${secretKey()}")
def emailAccessHash(authId: Long, e: models.UserEmail)(implicit s: ActorSystem): Long =
emailAccessHash(authId, e.userId, e.id, e.accessSalt)
def fileAccessHash(fileId: Long, accessSalt: String)(implicit s: ActorSystem): Long =
hash(s"$fileId:$accessSalt:${secretKey()}")
def authTransactionHash(accessSalt: String)(implicit s: ActorSystem): String =
DigestUtils.sha1Hex(s"$accessSalt:${secretKey()}")
def nextAccessSalt(rng: ThreadLocalRandom): String = rng.nextLong().toString
def nextAccessSalt(): String = {
nextAccessSalt(ThreadLocalRandom.current())
}
def accessToken(rng: ThreadLocalRandom): String = DigestUtils.sha256Hex(rng.nextLong().toString)
}
| boneyao/actor-platform | actor-server/actor-utils/src/main/scala/im/actor/server/util/ACLUtils.scala | Scala | mit | 1,971 |
package org.eknet.sitebag.rest
import scala.concurrent.ExecutionContext
import akka.actor.{ActorRefFactory, ActorRef}
import akka.pattern.ask
import akka.util.Timeout
import spray.routing.{Route, Directives}
import spray.httpx.SprayJsonSupport
import spray.httpx.marshalling.ToResponseMarshaller
import spray.http._
import org.eknet.sitebag._
import org.eknet.sitebag.model._
import org.eknet.sitebag.ToggleArchived
import porter.app.client.PorterContext
import porter.model.Ident
class AppHttp(val settings: SitebagSettings, appRef: ActorRef, refFactory: ActorRefFactory, ec: ExecutionContext, to: Timeout)
extends Directives with RestDirectives with FormUnmarshaller {
implicit def timeout = to
implicit def executionContext = ec
import spray.json._
import JsonProtocol._
import SprayJsonSupport._
def getEntry(subject: Ident, entryid: String, full: Boolean): Route = {
if (! full) {
complete {
(appRef ? GetEntryMeta(subject, entryid)).mapTo[Result[PageEntry]]
}
} else {
complete {
(appRef ? GetEntry(subject, entryid)).mapTo[Result[PageEntry]]
}
}
}
def listEntries[A](subject: String, transform: (EntrySearch, Result[List[PageEntry]]) => A)(implicit rm: ToResponseMarshaller[A]): Route = {
fromParams(EntrySearch) { search =>
complete {
val f = (appRef ? search.toListEntries(subject))
.mapTo[Result[List[PageEntry]]]
f.map(result => transform(search, result))
}
}
}
def route(subject: String): Route = {
path("login") {
// login means validating the provided credentials and sending
// a cookie back that can be used as an authenticator
authenticateWithCookie { acc ⇒
val r: Result[Ident] = Success(acc.name, "Login successful")
complete(r)
} ~
complete {
val f: Ack = Failure("Credentials failed")
f
}
} ~
path("logout") {
// logout means only sending a cookie header that instructs clients
// to remove this cookie.
removeAuthCookie {
val r: Ack = Success("Logout successful")
complete(r)
}
} ~
path("entry") {
// add urls to sitebag
(put | post) {
checkAccess(subject, checkAddEntry) { rctx =>
handle { radd: RAdd =>
(appRef ? Add(rctx.subject, ExtractRequest(radd.url), radd.title, radd.tags)).mapTo[Result[String]]
}
}
} ~
get {
// this is a special handler for the bookmarklet. it will respond with a
// little javascript alert
parameters("url", "add") { (url, _) =>
checkAccess(subject, checkAddEntry) { rctx =>
val f = (appRef ? Add(rctx.subject, ExtractRequest(url))).mapTo[Result[String]]
onSuccess(f) { result =>
respondWithMediaType(MediaTypes.`application/javascript`) {
complete("alert('"+ result.message +"');")
}
}
} ~
respondWithMediaType(MediaTypes.`application/javascript`) {
complete(s"alert('»$subject« is not logged in!\\\\n\\\\nPlease login to your Sitebag account at\\\\n${settings.uiUri("")}!');")
}
}
}
} ~
path("entry" / Segment) { id =>
// gets a single entry with complete content by default, if `complete=false`
// the full content is not sent
get {
parameters('token.?, 'complete.as[Boolean] ? true) { (token, full) ⇒
token match {
case Some(t) ⇒
checkToken(subject, Token(t), checkGetEntry(id)) { _ ⇒
getEntry(subject, id, full)
}
case None ⇒
checkAccess(subject, checkGetEntry(id)) { rctx ⇒
getEntry(rctx.subject, id, full)
}
}
}
} ~
// deletes a entry by its id
delete {
checkAccess(subject, checkDeleteEntry(id)) { rctx =>
complete {
(appRef ? DropEntry(rctx.subject, id)).mapTo[Ack]
}
}
} ~
post {
checkAccess(subject, checkDeleteEntry(id)) { rctx =>
handle { _: DeleteAction =>
(appRef ? DropEntry(rctx.subject, id)).mapTo[Ack]
}
}
}
} ~
path("entry" / Segment / "togglearchived") { id =>
// toggles the archived flag of an entry
post {
checkAccess(subject, checkUpdateEntry(id)) { rctx =>
complete {
(appRef ? ToggleArchived(rctx.subject, id)).mapTo[Result[Boolean]]
}
}
}
} ~
path("entry" / Segment / "setarchived") { id =>
// sets the archived flag of the entry to `true`
post {
checkAccess(subject, checkUpdateEntry(id)) { rctx =>
handle { flag: Flag =>
(appRef ? SetArchived(rctx.subject, id, flag.flag)).mapTo[Result[Boolean]]
}
}
}
} ~
path("entry" / Segment / "tag") { id =>
// adds all given tags to this entry
post {
checkAccess(subject, checkUpdateEntry(id)) { rctx =>
handle { tagin: TagInput =>
(appRef ? TagEntry(rctx.subject, id, tagin.tags)).mapTo[Ack]
}
}
}
} ~
path("entry" / Segment / "untag") { id =>
// removes all given tags from this entry
post {
checkAccess(subject, checkUpdateEntry(id)) { rctx =>
handle { tagin: TagInput =>
(appRef ? UntagEntry(rctx.subject, id, tagin.tags)).mapTo[Ack]
}
}
}
} ~
path("entry" / Segment / "tags") { id =>
// removes all tags from this entry and adds the given tags
post {
checkAccess(subject, checkUpdateEntry(id)) { rctx =>
handle { tagin: TagInput =>
(appRef ? SetTags(subject, id, tagin.tags)).mapTo[Ack]
}
}
}
} ~
path("entry" / Segment / "cache") { id =>
// return the original page from cache
get {
checkAccess(subject, checkGetEntry(id)) { rctx =>
getEntryContent(appRef, GetEntryContent(rctx.subject, id))
}
}
} ~
path("tags") {
// returns a "tag cloud"
get {
checkAccess(subject, checkListTags) { rctx =>
fromParams(TagFilter) { f =>
complete {
(appRef ? ListTags(rctx.subject, f.filter)).mapTo[Result[TagList]]
}
}
}
}
} ~
// returns a list of entries as json
path("entries" / "json") {
get {
checkAccess(subject, checkGetEntries) { rctx =>
listEntries(subject, (s, r) => r)
}
}
} ~
path("entries" / "json" / Segment) { token =>
get {
checkToken(subject, Token(token), checkGetEntries) { _ =>
listEntries(subject, (s, r) ⇒ r)
}
}
} ~
// returns a list of entries as rss xml
path("entries" / "rss") {
import RssSupport._
get {
checkAccess(subject, checkGetEntries) { rctx =>
listEntries(subject, (search, res) => {
val uri = settings.rssFeedUrl(subject, rctx.token.get, search)
mapRss(uri, rctx.subject, search, res)(settings.entryUiUri)
})
}
}
} ~
path("entries" / "rss" / Segment) { tokenStr ⇒
// returns a list of entries as rss xml and authenticates the request with
// the token given as last part in the url path
get {
import RssSupport._
val token = Token(tokenStr)
checkToken(subject, token, checkGetEntries) { _ ⇒
listEntries(subject, (search, res) ⇒ {
val uri = settings.rssFeedUrl(subject, token, search)
mapRss(uri, subject, search, res)(settings.entryUiUri)
})
}
}
}
}
}
| eikek/sitebag | src/main/scala/org/eknet/sitebag/rest/AppHttp.scala | Scala | apache-2.0 | 7,840 |
package im.mange.jetpac.comet
case class Subscribe(subscriber: Subscriber)
case class Unsubscribe(subscriber: Subscriber)
case class PushToAllSubscribers(message: Any)
| alltonp/jetboot | src/main/scala/im/mange/jetpac/comet/Messages.scala | Scala | apache-2.0 | 169 |
package org.bitcoins.dlc.testgen
import java.io.File
import org.bitcoins.core.protocol.tlv.TLV
import play.api.libs.json.{JsResult, JsValue}
import scala.concurrent.Future
object DLCParsingTestVectorGen
extends TestVectorGen[DLCParsingTestVector, TLV] {
override val defaultTestFile: File = new File(
"dlc-test/src/test/scala/org/bitcoins/dlc/testgen/dlc_message_test.json")
override val testVectorParser: DLCParsingTestVector.type =
DLCParsingTestVector
override def inputFromJson: JsValue => JsResult[TLV] =
DLCParsingTestVector.tlvFromJson
override val inputStr: String = "input"
override def generateFromInput: TLV => Future[DLCParsingTestVector] = { tlv =>
Future.successful(DLCParsingTestVector(tlv))
}
override def generateTestVectors(): Future[Vector[DLCParsingTestVector]] = {
Future.successful(
Vector(
DLCTLVGen.contractDescriptorParsingTestVector(),
DLCTLVGen.oracleInfoParsingTestVector(),
DLCTLVGen.fundingInputParsingTestVector(),
DLCTLVGen.cetSigsParsingTestVector(),
DLCTLVGen.fundingSigsParsingTestVector(),
DLCTLVGen.dlcOfferParsingTestVector(),
DLCTLVGen.dlcAcceptParsingTestVector(),
DLCTLVGen.dlcSignParsingTestVector()
)
)
}
}
| bitcoin-s/bitcoin-s | dlc-test/src/test/scala/org/bitcoins/dlc/testgen/DLCParsingTestVectorGen.scala | Scala | mit | 1,280 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.strategy
import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.carbondata.execution.datasources.CarbonSparkDataSourceUtil
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.execution.command.{ExecutedCommandExec, RunnableCommand}
import org.apache.spark.sql.execution.command.management.CarbonAlterTableCompactionCommand
import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableColRenameDataTypeChangeCommand, CarbonAlterTableDropColumnCommand}
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.types.StructField
import org.apache.spark.util.{CarbonReflectionUtils, SparkUtil}
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.core.util.DataTypeUtil
object CarbonPlanHelper {
def addColumn(
addColumnCommand: CarbonAlterTableAddColumnCommand,
sparkSession: SparkSession
): Seq[SparkPlan] = {
val alterTableAddColumnsModel = addColumnCommand.alterTableAddColumnsModel
if (isCarbonTable(
TableIdentifier(
alterTableAddColumnsModel.tableName,
alterTableAddColumnsModel.databaseName),
sparkSession)) {
val carbonTable = CarbonEnv.getCarbonTable(alterTableAddColumnsModel.databaseName,
alterTableAddColumnsModel.tableName)(sparkSession)
if (carbonTable != null && carbonTable.isFileLevelFormat) {
throw new MalformedCarbonCommandException(
"Unsupported alter operation on Carbon external fileformat table")
} else if (carbonTable != null && !carbonTable.getTableInfo.isTransactionalTable) {
throw new MalformedCarbonCommandException(
"Unsupported operation on non transactional table")
} else {
ExecutedCommandExec(addColumnCommand) :: Nil
}
// TODO: remove this else if check once the 2.1 version is unsupported by carbon
} else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
val structField = (alterTableAddColumnsModel.dimCols ++ alterTableAddColumnsModel.msrCols)
.map { f =>
val structField = StructField(f.column,
CarbonSparkDataSourceUtil
.convertCarbonToSparkDataType(DataTypeUtil.valueOf(f.dataType.get)))
if (StringUtils.isNotEmpty(f.columnComment)) {
structField.withComment(f.columnComment)
} else {
structField
}
}
val identifier = TableIdentifier(
alterTableAddColumnsModel.tableName,
alterTableAddColumnsModel.databaseName)
ExecutedCommandExec(CarbonReflectionUtils
.invokeAlterTableAddColumn(identifier, structField).asInstanceOf[RunnableCommand]) ::
Nil
// TODO: remove this else check once the 2.1 version is unsupported by carbon
} else {
throw new MalformedCarbonCommandException("Unsupported alter operation on hive table")
}
}
def changeColumn(
changeColumnCommand: CarbonAlterTableColRenameDataTypeChangeCommand,
sparkSession: SparkSession
): Seq[SparkPlan] = {
val model = changeColumnCommand.alterTableColRenameAndDataTypeChangeModel
if (isCarbonTable(TableIdentifier(model.tableName, model.databaseName), sparkSession)) {
val carbonTable =
CarbonEnv.getCarbonTable(model.databaseName, model.tableName)(sparkSession)
if (carbonTable != null && carbonTable.isFileLevelFormat) {
throw new MalformedCarbonCommandException(
"Unsupported alter operation on Carbon external fileformat table")
} else if (carbonTable != null && !carbonTable.getTableInfo.isTransactionalTable) {
throw new MalformedCarbonCommandException(
"Unsupported operation on non transactional table")
} else {
ExecutedCommandExec(changeColumnCommand) :: Nil
}
} else {
throw new MalformedCarbonCommandException(
String.format("Table or view '%s' not found in database '%s' or not carbon fileformat",
model.tableName,
model.databaseName.getOrElse("default")))
}
}
def dropColumn(
dropColumnCommand: CarbonAlterTableDropColumnCommand,
sparkSession: SparkSession
): Seq[SparkPlan] = {
val alterTableDropColumnModel = dropColumnCommand.alterTableDropColumnModel
val carbonTable = CarbonEnv.getCarbonTable(alterTableDropColumnModel.databaseName,
alterTableDropColumnModel.tableName)(sparkSession)
if (carbonTable != null && carbonTable.isFileLevelFormat) {
throw new MalformedCarbonCommandException(
"Unsupported alter operation on Carbon external fileformat table")
} else if (carbonTable != null && !carbonTable.getTableInfo.isTransactionalTable) {
throw new MalformedCarbonCommandException(
"Unsupported operation on non transactional table")
} else {
ExecutedCommandExec(dropColumnCommand) :: Nil
}
}
def compact(
compactionCommand: CarbonAlterTableCompactionCommand,
sparkSession: SparkSession
): Seq[SparkPlan] = {
val alterTableModel = compactionCommand.alterTableModel
if (isCarbonTable(TableIdentifier(alterTableModel.tableName, alterTableModel.dbName),
sparkSession)) {
ExecutedCommandExec(compactionCommand) :: Nil
} else {
throw new MalformedCarbonCommandException(
String.format("Table or view '%s' not found in database '%s' or not carbon fileformat",
alterTableModel.tableName,
alterTableModel.dbName.getOrElse("default")))
}
}
def isCarbonTable(tableIdent: TableIdentifier, sparkSession: SparkSession): Boolean = {
val dbOption = tableIdent.database.map(_.toLowerCase)
val tableIdentifier = TableIdentifier(tableIdent.table.toLowerCase(), dbOption)
CarbonEnv
.getInstance(sparkSession)
.carbonMetaStore
.tableExists(tableIdentifier)(sparkSession)
}
def isTableExists(tableIdent: TableIdentifier, sparkSession: SparkSession): Boolean = {
val dbOption = tableIdent.database.map(_.toLowerCase)
val tableIdentifier = TableIdentifier(tableIdent.table.toLowerCase(), dbOption)
sparkSession.sessionState.catalog.tableExists(tableIdentifier)
}
def validateCarbonTable(
tableIdentifier: TableIdentifier,
sparkSession: SparkSession,
message: String
): Unit = {
if (!CarbonPlanHelper.isTableExists(tableIdentifier, sparkSession)) {
throw new NoSuchTableException(
tableIdentifier.database.getOrElse(
CarbonEnv.getDatabaseName(tableIdentifier.database)(sparkSession)),
tableIdentifier.table)
}
if (!CarbonPlanHelper.isCarbonTable(tableIdentifier, sparkSession)) {
throw new UnsupportedOperationException(message)
}
}
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala | Scala | apache-2.0 | 7,719 |
package mesosphere.marathon
package api
import diffson.jsonpatch.lcsdiff._
import diffson.jsonpatch.{Add, Copy, Operation}
import diffson.lcs.Patience
import diffson.playJson._
import mesosphere.marathon.raml.RamlSerializer
import org.scalatest.{Assertions, Matchers}
import play.api.libs.json._
import scala.collection.Map
object JsonTestHelper extends Assertions with Matchers {
implicit val lcs = new Patience[JsValue]
def assertSerializationRoundtripWorks[T](value: T, normalize: T => T = { t: T => t })(implicit format: Format[T]): Unit = {
val normed = normalize(value)
val json = Json.toJson(normed)
val reread = Json.fromJson[T](json)
withClue(s"for json:\\n${Json.prettyPrint(json)}\\n") {
reread should be('success)
normed should be(normalize(reread.get))
}
}
def assertSerializationRoundtripWithJacksonWorks[T](value: T, normalize: T => T = { t: T => t })(implicit format: Format[T]): Unit = {
val normed = normalize(value)
val json = RamlSerializer.serializer.writeValueAsString(normed)
val jsonObj = Json.parse(json)
val reread = Json.fromJson[T](jsonObj)
withClue(s"for json:\\n${Json.prettyPrint(jsonObj)}\\n") {
reread should be('success)
normed should be(normalize(reread.get))
}
}
def assertSerializationIsSameForPlayAndJackson[T](value: T, normalize: T => T = { t: T => t })(implicit format: Format[T]): Unit = {
val normed = normalize(value)
val jsonJackson = RamlSerializer.serializer.writeValueAsString(normed)
val jsonPlay = Json.toJson(normed).toString()
jsonJackson should be(jsonPlay)
}
def assertThatJsonOf[T](value: T)(implicit writes: Writes[T]): AssertThatJsonString = {
AssertThatJsonString(Json.prettyPrint(Json.toJson(value)))
}
def assertThatJacksonJsonOf[T](value: T): AssertThatJsonString = {
val jsonJackson = RamlSerializer.serializer.writeValueAsString(value)
AssertThatJsonString(jsonJackson)
}
def assertThatJsonString(actual: String): AssertThatJsonString = {
AssertThatJsonString(actual)
}
def removeNullFieldValues(json: JsValue): JsValue =
json match {
case JsObject(fields) =>
val withoutNullValues: Map[String, JsValue] = fields.filter {
case (_, JsNull) => false
case _ => true
}
val filterSubValues = withoutNullValues.map { case (k, v) => k -> removeNullFieldValues(v) }
JsObject(filterSubValues)
case JsArray(v) =>
JsArray(v.map(removeNullFieldValues))
case _: JsValue => json
}
case class AssertThatJsonString(actual: String) {
val actualJson = Json.parse(actual)
private[this] def isAddition(op: Operation[JsValue]): Boolean =
op match {
case _: Add[_] | _: Copy[_] => true
case _ => false
}
def containsEverythingInJsonString(expected: String): Unit = {
val expectedJson = Json.parse(expected)
val diff = diffson.diff(expectedJson, actualJson)
require(
diff.ops.forall(isAddition),
s"unexpected differences in actual json:\\n$actual\\nexpected:\\n$expected\\n${diff.ops.filter(!isAddition(_))}"
)
}
def containsEverythingInJsonOf[T](expected: T)(implicit writes: Writes[T]): Unit = {
correspondsToJsonString(Json.prettyPrint(Json.toJson(expected)))
}
def correspondsToJsonString(expected: String): Unit = {
val expectedJson = Json.parse(expected)
val diff = diffson.diff(expectedJson, actualJson)
require(diff.ops.isEmpty, s"unexpected differences in actual json:\\n$actual\\nexpected:\\n$expected\\ndiff\\n$diff")
}
def correspondsToJsonOf[T](expected: T)(implicit writes: Writes[T]): Unit = {
correspondsToJsonString(Json.prettyPrint(Json.toJson(expected)))
}
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/api/JsonTestHelper.scala | Scala | apache-2.0 | 3,780 |
package com.sai.scratchpad.core
object JiraRegex {
def main(args: Array[String]): Unit = {
val pattern = "[A-Z]*-[0-9]*.*?".r
val str = " OMAN-1234,GSLSEC-1:djsljk GSLUTIL-12344 -- d thr kldsklj sdljaslj"
println(pattern.findAllMatchIn(str).filter(_.toString.split("-").length > 1).mkString("\\n"))
}
} | SaiprasadKrishnamurthy/BigDash | src/main/scala/com/sai/scratchpad/core/JiraRegex.scala | Scala | apache-2.0 | 322 |
/*
* Licensed to STRATIO (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The STRATIO (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.stratio.connector.commons
import com.codahale.metrics.{MetricRegistry, Timer}
import org.slf4j.Logger
trait ABSTimer {
def time[T](f: => T)(implicit logger: Logger): T =
time(getUID)(f)(logger)
def getUID[T]: String = {
java.util.UUID.randomUUID().toString
}
/**
* Evals some {{{T}}} by-name expression and get times spent on it.
*
* @param op Description of what {{{f}}} does
* @param f By-name {{{T}}} expression
* @param logger Implicit logger used to print out elapsed time
* @tparam T Expression type
* @return Expression value
*/
def time[T](op: String)(f: => T)(implicit logger: Logger): T = {
val before = System.currentTimeMillis()
val t = f
val after = System.currentTimeMillis()
logger.debug( s"""millis: [${after - before}] $op""")
t
}
def timeFor[T, U](timerName: String)(f: => T)(
implicit metricRegistry: MetricRegistry, logger: Logger): T = {
import scala.collection.JavaConversions._
val timer = this.synchronized {
metricRegistry.getTimers.toMap.getOrElse(timerName, {
metricRegistry.register(timerName, new Timer())
})
}
if (logger.isDebugEnabled()) {
logger.debug(s"The process [$timerName] is starting")
}
val before = timer.time()
val timeBefore = before.stop
val t = f
val after = before.stop()
if (logger.isDebugEnabled()) {
logger.debug( s"""[millis: ${after - timeBefore}] $timerName""")
}
t
}
}
object timer extends ABSTimer
| Stratio/stratio-connector-commons | connector-commons/src/main/scala/com.stratio.connector.commons/timer.scala | Scala | apache-2.0 | 2,316 |
package nl.svanwouw.trending
import org.apache.spark.{SparkContext, SparkConf}
/**
* Test context which sets the spark context to be available for every test.
*/
object SparkTestContext {
val AppName = "SparkTestJob"
val Master = "local"
val sc = new SparkContext(new SparkConf().setAppName(AppName).setMaster(Master))
}
| stefanvanwouw/spark-based-trending-topics-extraction | src/test/scala/nl/svanwouw/trending/SparkTestContext.scala | Scala | mit | 332 |
/* *\
** _____ __ _____ __ ____ **
** / ___/ / / /____/ / / / \ FieldKit **
** / ___/ /_/ /____/ / /__ / / / (c) 2010, FIELD **
** /_/ /____/ /____/ /_____/ http://www.field.io **
\* */
/* created March 07, 2009 */
package field.kit.util
/**
* Implements a flexible Logging System
* @author Marcus Wendt
*/
object Logger extends Enumeration {
val ALL = Value
val FINE = Value
val INFO = Value
val WARNING = Value
val ERROR = Value
val FATAL = Value
val NONE = Value
var level = Logger.INFO
def log(l:Value, name:String, m:Seq[Any]) {
if(l < level) return
val s = if(l < WARNING) System.out else System.err
val prefix = "["+ name +"]"
s.println( (prefix /: m) (_ +" "+ _) )
}
}
trait Logger {
private var name:String = null
private def className = {
try {
var n = this.getClass.getCanonicalName
if(n.endsWith("$")) n = n.slice(0, n.length-1)
var dot = n.lastIndexOf('.')
if(dot < 0) n else n.substring(dot+1)
} catch {
case e:NoClassDefFoundError => "Anonymous"
}
}
private def log(l:Logger.Value, m:Seq[Any]) = Logger.log(l, logName, m)
def logName_=(name:String) = this.name = name
def logName = if(name == null) className else name
def fine(m:Any*) = log(Logger.FINE, m)
def info(m:Any*) = log(Logger.INFO, m)
def warn(m:Any*) = log(Logger.WARNING, m)
def error(m:Any*) = log(Logger.ERROR, m)
def fatal(m:Any*):Unit = { log(Logger.FATAL, m); System.exit(-1) }
def fatal(code:Int, m:Any*) = { log(Logger.FATAL, m); System.exit(code) }
}
| field/FieldKit.scala | src/field/kit/util/Logger.scala | Scala | lgpl-3.0 | 1,804 |
/*
* ParticleFilter.scala
* Particle Filtering
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.filtering
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.language._
import com.cra.figaro.util._
import sun.swing.AccumulativeRunnable
/**
* An abstract class of particle filters.
* A particle filter is provided with three models:
* a static model, containing a universe defining a distribution over static elements that do not change over time;
* an initial model, containing a universe defining a distribution over the initial state of time-varying elements;
* and a transition model, which is a function from the previous universe to a new universe. defining the way the distribution over the new state
* of the time-varying variables depends on their values in the previous state.
* The fourth argument to the particle filter is the number of particles to use at each time step.
*
* The particle filter works in an online fashion. At each point in time, it maintains its current beliefs about the state of the system as a set of
* representative states. advanceTime is used to move forward one time step. The particle filter updates its beliefs in light
* of the new evidence.
*
* @param static A universe with static elements that do not change over time.
* @param intitial The universe describing the initial distribution of the model.
* @param transition The transition function that returns a new universe from a static and previous universe, respectively.
*/
abstract class ParticleFilter(static: Universe = new Universe(), initial: Universe, transition: (Universe, Universe) => Universe, numParticles: Int)
extends Filtering(static, initial, transition) {
/** The belief about the state of the system at the current point in time. */
val beliefState: ParticleFilter.BeliefState = Array.fill(numParticles)(null)
protected var logProbEvidence: Double = 0.0
protected var previousUniverse: Universe = _
protected var currentUniverse = initial
/**
* Returns the expectation of the element referred to by the reference
* under the given function at the current time point.
*/
def computeCurrentExpectation[T](reference: Reference[T], function: T => Double): Double = {
val fValues: Seq[Double] = beliefState.map(state => function(state.get(reference)))
val total = (fValues :\\ 0.0)(_ + _)
total.toDouble / numParticles
}
/**
* Returns the distribution over the element referred to by the reference at the current time point.
*/
def computeCurrentDistribution[T](reference: Reference[T]): Stream[(Double, T)] = {
val map = scala.collection.mutable.Map[T, Int]()
for {
state <- beliefState
} {
val t = state.get(reference)
val prevCount = map.getOrElse(t, 0)
map += t -> (prevCount + 1)
}
val z = 1.0 / beliefState.size
val normalized = map.toList.map((pair: (T, Int)) => (pair._2 * z, pair._1))
normalized.toStream
}
/*
* Careful: makeWeightedParticle overwrites the previous state with the new state. That means we can't use it to generate another new particle from the same previous
* state. The reason for this design is to avoid creating new snapshots and states to conserve memory.
*/
protected def makeWeightedParticle(previousState: State): ParticleFilter.WeightedParticle = {
Forward(currentUniverse)
// avoiding recursion
var satisfied = true
var conditionedElementsRemaining = currentUniverse.conditionedElements
while (!conditionedElementsRemaining.isEmpty) {
satisfied &= conditionedElementsRemaining.head.conditionSatisfied
conditionedElementsRemaining = conditionedElementsRemaining.tail
}
val weight =
if (satisfied) {
var w = 1.0
var constrainedElementsRemaining = currentUniverse.constrainedElements
while (!constrainedElementsRemaining.isEmpty) {
w *= math.exp(constrainedElementsRemaining.head.constraintValue)
constrainedElementsRemaining = constrainedElementsRemaining.tail
}
w
} else 0.0
val snapshot = new Snapshot
snapshot.store(currentUniverse)
val state = new State(snapshot, previousState.static)
(weight, state)
}
private[figaro] def updateBeliefState(weightedParticles: Seq[ParticleFilter.WeightedParticle]) {
// If all the particles have weight 1, there is no need to resample
// If all the particles have weight 0, none of them satisfy the conditions, so the best we can do is produce a uniform distribution over them.
if (weightedParticles.forall(_._1 == 1.0) || weightedParticles.forall(_._1 == 0.0)) {
val weightedParticleArray = weightedParticles.toArray
for { i <- 0 until numParticles } {
beliefState(i) = weightedParticleArray(i)._2
}
} else {
val resampler = new MapResampler(weightedParticles)
for { i <- 0 until numParticles } {
beliefState(i) = resampler.resample()
}
}
}
private[figaro] def computeProbEvidence(weightedParticles: Seq[ParticleFilter.WeightedParticle]) {
// compute probability of evidence here by taking the average weight of the weighted particles and store it so you can later return it as a query result
val weightedParticleArray = weightedParticles.toArray
val sum = weightedParticleArray.map(_._1).sum
logProbEvidence = logProbEvidence + scala.math.log(sum / numParticles)
}
protected def addWeightedParticle(evidence: Seq[NamedEvidence[_]], index: Int): ParticleFilter.WeightedParticle = {
val previousState = beliefState(index)
previousState.dynamic.restore(previousUniverse)
previousState.static.restore(static)
currentUniverse.assertEvidence(evidence)
val result = makeWeightedParticle(previousState)
result
}
protected def initialWeightedParticle(): ParticleFilter.WeightedParticle = {
Forward(static)
val staticSnapshot = new Snapshot
staticSnapshot.store(static)
val state = new State(new Snapshot, staticSnapshot)
makeWeightedParticle(state)
}
/*
* Advance the universe one time step.
* The previous universe becomes a copy of the current universe with all named elements replaced by constants.
* This is done so we don't have to store the previous universe (and the universes previous to it), and we can release the memory.
*/
protected def advanceUniverse() {
previousUniverse = Universe.createNew()
for { element <- currentUniverse.activeElements.filter(!_.name.isEmpty) } {
new Settable(element.name.string, element.value, previousUniverse)
}
currentUniverse = transition(static, previousUniverse)
}
/**
* The computed log probability of evidence.
*/
def getlogProbEvidence(): Double = {
logProbEvidence
}
/**
* The computed probability of evidence.
*/
def probEvidence(): Double = {
val probEvidence = scala.math.exp(logProbEvidence)
probEvidence
}
}
/**
* A one-time particle filter.
*
* @param static The universe of elements whose values do not change over time
* @param initial The universe describing the distribution over the initial state of the system
* @param transition The transition model describing how the current state of the system depends on the previous
* @param numParticles The number of particles to use at each time step
*/
class OneTimeParticleFilter(static: Universe = new Universe(), initial: Universe, transition: (Universe, Universe) => Universe, numParticles: Int)
extends ParticleFilter(static, initial, transition, numParticles) with OneTimeFiltering {
private def doTimeStep(weightedParticleCreator: Int => ParticleFilter.WeightedParticle) {
val weightedParticles = for { i <- 0 until numParticles } yield weightedParticleCreator(i)
// compute probability of evidence here by taking the average weight of the weighted particles and store it so you can later return it as a query result
computeProbEvidence(weightedParticles)
updateBeliefState(weightedParticles)
}
/**
* Begin the particle filter, determining the initial distribution.
*/
def run(): Unit = {
doTimeStep((i: Int) => initialWeightedParticle())
}
/**
* Advance the filtering one time step, conditioning on the given evidence at the new time point.
*/
def advanceTime(evidence: Seq[NamedEvidence[_]] = List()): Unit = {
advanceUniverse()
doTimeStep((i: Int) => addWeightedParticle(evidence, i))
}
}
object ParticleFilter {
/**
* A one-time particle filter.
*
* @param static The universe of elements whose values do not change over time
* @param initial The universe describing the distribution over the initial state of the system
* @param transition The transition model describing how the current state of the system depends on the static and previous, respectively
* @param numParticles Number of particles to use at each time step
*/
def apply(static: Universe, initial: Universe, transition: (Universe, Universe) => Universe, numParticles: Int): OneTimeParticleFilter =
new OneTimeParticleFilter(static, initial, transition, numParticles)
/**
* A one-time particle filter.
*
* @param static The universe of elements whose values do not change over time
* @param initial The universe describing the distribution over the initial state of the system
* @param transition The transition model describing how the current state of the system depends on the previous
* @param numParticles Number of particles to use at each time step
*/
@deprecated("If the static universe is defined, use the constructor where the transition function takes two universes", "2.3.0.0")
def apply(static: Universe, initial: Universe, transition: Universe => Universe, numParticles: Int): OneTimeParticleFilter =
new OneTimeParticleFilter(static, initial, (static: Universe, previous: Universe) => transition(previous), numParticles)
/**
* A one-time particle filter in which the static universe is empty.
*
* @param initial The universe describing the distribution over the initial state of the system
* @param transition The transition model describing how the current state of the system depends on the previous
* @param numParticles Number of particles to use at each time step
*/
def apply(initial: Universe, transition: Universe => Universe, numParticles: Int): OneTimeParticleFilter =
apply(new Universe(), initial, (static: Universe, previous: Universe) => transition(previous), numParticles)
/**
* A representation of the current beliefs of the particle filter.
* A BeliefState should not be confused with a State, which is a particular configuration of the system.
* A BeliefState represents a distribution over States, and in a particle filter, it is implemented as a collection of representative States.
*/
type BeliefState = Array[State] // dynamic and static
/** Weighted particles, consisting of a weight and a state. */
type WeightedParticle = (Double, State)
}
| lfkellogg/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/filtering/ParticleFilter.scala | Scala | bsd-3-clause | 11,336 |
package controllers
import cats.data.Validated
import play.api.libs.json.Json
import play.api.mvc.Result
import scala.annotation.nowarn
import scala.concurrent.duration._
import views.html
import lila.api.Context
import lila.app._
import lila.challenge.{ Challenge => ChallengeModel }
import lila.common.{ Bearer, HTTPRequest, IpAddress, Template }
import lila.game.{ AnonCookie, Pov }
import lila.oauth.{ AccessToken, OAuthScope }
import lila.setup.ApiConfig
import lila.socket.Socket.SocketVersion
import lila.user.{ User => UserModel }
final class Challenge(
env: Env,
apiC: Api
) extends LilaController(env) {
def api = env.challenge.api
def all =
Auth { implicit ctx => me =>
XhrOrRedirectHome {
api allFor me.id map env.challenge.jsonView.apply map JsonOk
}
}
def apiList =
ScopedBody(_.Challenge.Read) { implicit req => me =>
implicit val lang = reqLang
api allFor me.id map { all =>
JsonOk(
Json.obj(
"in" -> all.in.map(env.challenge.jsonView.apply(lila.challenge.Direction.In.some)),
"out" -> all.out.map(env.challenge.jsonView.apply(lila.challenge.Direction.Out.some))
)
)
}
}
def show(id: String, @nowarn("cat=unused") _color: Option[String]) =
Open { implicit ctx =>
showId(id)
}
protected[controllers] def showId(id: String)(implicit
ctx: Context
): Fu[Result] =
OptionFuResult(api byId id)(showChallenge(_))
protected[controllers] def showChallenge(
c: ChallengeModel,
error: Option[String] = None,
justCreated: Boolean = false
)(implicit
ctx: Context
): Fu[Result] =
env.challenge version c.id flatMap { version =>
val mine = justCreated || isMine(c)
import lila.challenge.Direction
val direction: Option[Direction] =
if (mine) Direction.Out.some
else if (isForMe(c, ctx.me)) Direction.In.some
else none
val json = env.challenge.jsonView.show(c, version, direction)
negotiate(
html =
if (mine) fuccess {
error match {
case Some(e) => BadRequest(html.challenge.mine(c, json, e.some))
case None => Ok(html.challenge.mine(c, json, none))
}
}
else
(c.challengerUserId ?? env.user.repo.named) map { user =>
Ok(html.challenge.theirs(c, json, user, get("color") flatMap chess.Color.fromName))
},
api = _ => Ok(json).fuccess
) flatMap withChallengeAnonCookie(mine && c.challengerIsAnon, c, owner = true)
} map env.lilaCookie.ensure(ctx.req)
private def isMine(challenge: ChallengeModel)(implicit ctx: Context) =
challenge.challenger match {
case lila.challenge.Challenge.Challenger.Anonymous(secret) => HTTPRequest sid ctx.req contains secret
case lila.challenge.Challenge.Challenger.Registered(userId, _) => ctx.userId contains userId
case lila.challenge.Challenge.Challenger.Open => false
}
private def isForMe(challenge: ChallengeModel, me: Option[UserModel]) =
challenge.destUserId.fold(true)(dest => me.exists(_ is dest)) &&
!challenge.challengerUserId.??(orig => me.exists(_ is orig))
def accept(id: String, color: Option[String]) =
Open { implicit ctx =>
OptionFuResult(api byId id) { c =>
val cc = color flatMap chess.Color.fromName
isForMe(c, ctx.me) ?? api
.accept(c, ctx.me, HTTPRequest sid ctx.req, cc)
.flatMap {
case Validated.Valid(Some(pov)) =>
negotiate(
html = Redirect(routes.Round.watcher(pov.gameId, cc.fold("white")(_.name))).fuccess,
api = apiVersion => env.api.roundApi.player(pov, none, apiVersion) map { Ok(_) }
) flatMap withChallengeAnonCookie(ctx.isAnon, c, owner = false)
case invalid =>
negotiate(
html = Redirect(routes.Round.watcher(c.id, cc.fold("white")(_.name))).fuccess,
api = _ =>
notFoundJson(invalid match {
case Validated.Invalid(err) => err
case _ => "The challenge has already been accepted"
})
)
}
}
}
def apiAccept(id: String) =
Scoped(_.Challenge.Write, _.Bot.Play, _.Board.Play) { _ => me =>
def tryRematch =
env.bot.player.rematchAccept(id, me) flatMap {
case true => jsonOkResult.fuccess
case _ => notFoundJson()
}
api.byId(id) flatMap {
_.filter(isForMe(_, me.some)) match {
case None => tryRematch
case Some(c) if c.accepted => tryRematch
case Some(c) =>
api.accept(c, me.some, none) map {
_.fold(BadRequest(_), _ => jsonOkResult)
}
}
}
}
private def withChallengeAnonCookie(cond: Boolean, c: ChallengeModel, owner: Boolean)(
res: Result
)(implicit ctx: Context): Fu[Result] =
cond ?? {
env.game.gameRepo.game(c.id).map {
_ map { game =>
env.lilaCookie.cookie(
AnonCookie.name,
game.player(if (owner) c.finalColor else !c.finalColor).id,
maxAge = AnonCookie.maxAge.some,
httpOnly = false.some
)
}
}
} map { cookieOption =>
cookieOption.fold(res) { res.withCookies(_) }
}
def decline(id: String) =
AuthBody { implicit ctx => _ =>
OptionFuResult(api byId id) { c =>
implicit val req = ctx.body
isForMe(c, ctx.me) ??
api.decline(
c,
env.challenge.forms.decline
.bindFromRequest()
.fold(_ => ChallengeModel.DeclineReason.default, _.realReason)
)
}
}
def apiDecline(id: String) =
ScopedBody(_.Challenge.Write, _.Bot.Play, _.Board.Play) { implicit req => me =>
implicit val lang = reqLang
api.activeByIdFor(id, me) flatMap {
case None =>
env.bot.player.rematchDecline(id, me) flatMap {
case true => jsonOkResult.fuccess
case _ => notFoundJson()
}
case Some(c) =>
env.challenge.forms.decline
.bindFromRequest()
.fold(
newJsonFormError,
data => api.decline(c, data.realReason) inject jsonOkResult
)
}
}
def cancel(id: String) =
Open { implicit ctx =>
OptionFuResult(api byId id) { c =>
if (isMine(c)) api cancel c
else notFound
}
}
def apiCancel(id: String) =
Scoped(_.Challenge.Write, _.Bot.Play, _.Board.Play) { req => me =>
api.activeByIdBy(id, me) flatMap {
case Some(c) => api.cancel(c) inject jsonOkResult
case None =>
api.activeByIdFor(id, me) flatMap {
case Some(c) => api.decline(c, ChallengeModel.DeclineReason.default) inject jsonOkResult
case None =>
import lila.hub.actorApi.map.Tell
import lila.hub.actorApi.round.Abort
import lila.round.actorApi.round.AbortForce
env.game.gameRepo game id dmap {
_ flatMap { Pov.ofUserId(_, me.id) }
} flatMap {
_ ?? { p => env.round.proxyRepo.upgradeIfPresent(p) dmap some }
} flatMap {
case Some(pov) if pov.game.abortable =>
lila.common.Bus.publish(Tell(id, Abort(pov.playerId)), "roundSocket")
jsonOkResult.fuccess
case Some(pov) if pov.game.playable =>
get("opponentToken", req).map(Bearer.apply) match {
case None => BadRequest(jsonError("The game can no longer be aborted")).fuccess
case Some(bearer) =>
env.oAuth.server.auth(bearer, List(OAuthScope.Challenge.Write)) map {
case Right(OAuthScope.Scoped(op, _)) if pov.opponent.isUser(op) =>
lila.common.Bus.publish(Tell(id, AbortForce), "roundSocket")
jsonOkResult
case Right(_) => BadRequest(jsonError("Not the opponent token"))
case Left(err) => BadRequest(jsonError(err.message))
}
}
case _ => notFoundJson()
}
}
}
}
def apiStartClocks(id: String) =
Action.async { req =>
import cats.implicits._
val scopes = List(OAuthScope.Challenge.Write)
(get("token1", req) map Bearer.apply, get("token2", req) map Bearer.apply).mapN {
env.oAuth.server.authBoth(scopes)
} ?? {
_ flatMap {
case Left(e) => handleScopedFail(scopes, e)
case Right((u1, u2)) =>
env.game.gameRepo game id flatMap {
_ ?? { g =>
env.round.proxyRepo.upgradeIfPresent(g) dmap some dmap
(_.filter(_.hasUserIds(u1.id, u2.id)))
}
} map {
_ ?? { game =>
env.round.tellRound(game.id, lila.round.actorApi.round.StartClock)
jsonOkResult
}
}
}
}
}
private val ChallengeIpRateLimit = new lila.memo.RateLimit[IpAddress](
500,
10.minute,
key = "challenge.create.ip"
)
private val BotChallengeIpRateLimit = new lila.memo.RateLimit[IpAddress](
80 * 5,
1.day,
key = "challenge.bot.create.ip"
)
private val ChallengeUserRateLimit = lila.memo.RateLimit.composite[lila.user.User.ID](
key = "challenge.create.user"
)(
("fast", 5 * 5, 1.minute),
("slow", 40 * 5, 1.day)
)
def toFriend(id: String) =
AuthBody { implicit ctx => _ =>
import play.api.data._
import play.api.data.Forms._
implicit def req = ctx.body
OptionFuResult(api byId id) { c =>
if (isMine(c))
Form(
single(
"username" -> lila.user.UserForm.historicalUsernameField
)
).bindFromRequest()
.fold(
_ => funit,
username =>
ChallengeIpRateLimit(ctx.ip) {
env.user.repo named username flatMap {
case None => Redirect(routes.Challenge.show(c.id)).fuccess
case Some(dest) =>
env.challenge.granter(ctx.me, dest, c.perfType.some) flatMap {
case Some(denied) =>
showChallenge(c, lila.challenge.ChallengeDenied.translated(denied).some)
case None => api.setDestUser(c, dest) inject Redirect(routes.Challenge.show(c.id))
}
}
}(rateLimitedFu)
)
else notFound
}
}
def apiCreate(userId: String) =
ScopedBody(_.Challenge.Write, _.Bot.Play, _.Board.Play) { implicit req => me =>
implicit val lang = reqLang
!me.is(userId) ?? env.setup.forms.api
.user(me)
.bindFromRequest()
.fold(
newJsonFormError,
config =>
ChallengeIpRateLimit(HTTPRequest ipAddress req, cost = if (me.isApiHog) 0 else 1) {
env.user.repo enabledById userId.toLowerCase flatMap { destUser =>
val cost = destUser match {
case _ if me.isApiHog => 0
case None => 2
case Some(dest) if dest.isBot => 1
case _ => 5
}
BotChallengeIpRateLimit(HTTPRequest ipAddress req, cost = if (me.isBot) cost else 0) {
ChallengeUserRateLimit(me.id, cost = cost) {
val challenge = makeOauthChallenge(config, me, destUser)
(destUser, config.acceptByToken) match {
case (Some(dest), Some(strToken)) =>
apiChallengeAccept(dest, challenge, strToken)(me, config.message)
case _ =>
destUser ?? { env.challenge.granter(me.some, _, config.perfType) } flatMap {
case Some(denied) =>
BadRequest(jsonError(lila.challenge.ChallengeDenied.translated(denied))).fuccess
case _ =>
env.challenge.api create challenge map {
case true =>
val json = env.challenge.jsonView
.show(challenge, SocketVersion(0), lila.challenge.Direction.Out.some)
if (config.keepAliveStream)
apiC.sourceToNdJsonOption(
apiC.addKeepAlive(env.challenge.keepAliveStream(challenge, json))
)
else JsonOk(json)
case false =>
BadRequest(jsonError("Challenge not created"))
}
} map (_ as JSON)
}
}(rateLimitedFu)
}(rateLimitedFu)
}
}(rateLimitedFu)
)
}
private def makeOauthChallenge(config: ApiConfig, orig: UserModel, dest: Option[UserModel]) = {
import lila.challenge.Challenge._
val timeControl = config.clock map {
TimeControl.Clock.apply
} orElse config.days.map {
TimeControl.Correspondence.apply
} getOrElse TimeControl.Unlimited
lila.challenge.Challenge
.make(
variant = config.variant,
initialFen = config.position,
timeControl = timeControl,
mode = config.mode,
color = config.color.name,
challenger = ChallengeModel.toRegistered(config.variant, timeControl)(orig),
destUser = dest,
rematchOf = none
)
}
private def apiChallengeAccept(
dest: UserModel,
challenge: lila.challenge.Challenge,
strToken: String
)(managedBy: lila.user.User, message: Option[Template]) =
env.oAuth.server.auth(
Bearer(strToken),
List(lila.oauth.OAuthScope.Challenge.Write)
) flatMap {
_.fold(
err => BadRequest(jsonError(err.message)).fuccess,
scoped =>
if (scoped.user is dest) env.challenge.api.oauthAccept(dest, challenge) flatMap {
case Validated.Valid(g) =>
env.challenge.msg.onApiPair(challenge)(managedBy, message) inject Ok(
Json.obj(
"game" -> {
env.game.jsonView(g, challenge.initialFen) ++ Json.obj(
"url" -> s"${env.net.baseUrl}${routes.Round.watcher(g.id, "white")}"
)
}
)
)
case Validated.Invalid(err) => BadRequest(jsonError(err)).fuccess
}
else BadRequest(jsonError("dest and accept user don't match")).fuccess
)
}
def openCreate =
Action.async { implicit req =>
implicit val lang = reqLang
env.setup.forms.api.open
.bindFromRequest()
.fold(
err => BadRequest(apiFormError(err)).fuccess,
config =>
ChallengeIpRateLimit(HTTPRequest ipAddress req) {
import lila.challenge.Challenge._
val challenge = lila.challenge.Challenge
.make(
variant = config.variant,
initialFen = config.position,
timeControl =
config.clock.fold[TimeControl](TimeControl.Unlimited)(TimeControl.Clock.apply),
mode = chess.Mode(config.rated),
color = "random",
challenger = Challenger.Open,
destUser = none,
rematchOf = none,
name = config.name
)
(env.challenge.api create challenge) map {
case true =>
JsonOk(
env.challenge.jsonView.show(challenge, SocketVersion(0), none) ++ Json.obj(
"urlWhite" -> s"${env.net.baseUrl}/${challenge.id}?color=white",
"urlBlack" -> s"${env.net.baseUrl}/${challenge.id}?color=black"
)
)
case false =>
BadRequest(jsonError("Challenge not created"))
}
}(rateLimitedFu).dmap(_ as JSON)
)
}
def rematchOf(gameId: String) =
Auth { implicit ctx => me =>
OptionFuResult(env.game.gameRepo game gameId) { g =>
Pov.opponentOfUserId(g, me.id).flatMap(_.userId) ?? env.user.repo.byId flatMap {
_ ?? { opponent =>
env.challenge.granter(me.some, opponent, g.perfType) flatMap {
case Some(d) =>
BadRequest(jsonError {
lila.challenge.ChallengeDenied translated d
}).fuccess
case _ =>
api.sendRematchOf(g, me) map {
case true => jsonOkResult
case _ => BadRequest(jsonError("Sorry, couldn't create the rematch."))
}
}
}
}
}
}
}
| luanlv/lila | app/controllers/Challenge.scala | Scala | mit | 17,431 |
package avrohugger
package types
// fixed
sealed trait AvroScalaFixedType extends Product with Serializable
case object ScalaCaseClassWrapper extends AvroScalaFixedType
case object ScalaCaseClassWrapperWithSchema extends AvroScalaFixedType
// record
sealed trait AvroScalaRecordType extends Product with Serializable
case object ScalaCaseClass extends AvroScalaRecordType
case object ScalaCaseClassWithSchema extends AvroScalaRecordType
// enum
sealed trait AvroScalaEnumType extends Product with Serializable
case object ScalaEnumeration extends AvroScalaEnumType
case object JavaEnum extends AvroScalaEnumType
case object ScalaCaseObjectEnum extends AvroScalaEnumType
case object EnumAsScalaString extends AvroScalaEnumType
// union
sealed trait AvroScalaUnionType extends Product with Serializable
case object OptionalShapelessCoproduct extends AvroScalaUnionType
case object OptionShapelessCoproduct extends AvroScalaUnionType
case object OptionEitherShapelessCoproduct extends AvroScalaUnionType
// array
sealed trait AvroScalaArrayType extends Product with Serializable
case object ScalaArray extends AvroScalaArrayType
case object ScalaList extends AvroScalaArrayType
case object ScalaSeq extends AvroScalaArrayType
case object ScalaVector extends AvroScalaArrayType
// map
sealed trait AvroScalaMapType extends Product with Serializable
case object ScalaMap extends AvroScalaMapType
// protocol
sealed trait AvroScalaProtocolType extends Product with Serializable
case object ScalaADT extends AvroScalaProtocolType
case object NoTypeGenerated extends AvroScalaProtocolType | julianpeeters/avrohugger | avrohugger-core/src/main/scala/types/ComplexAvroScalaTypes.scala | Scala | apache-2.0 | 1,581 |
/*
* Copyright (C) 2014 - 2016 Softwaremill <http://softwaremill.com>
* Copyright (C) 2016 - 2019 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.kafka.scaladsl
import akka.annotation.ApiMayChange
import akka.kafka.ConsumerMessage.Committable
import akka.kafka.ProducerMessage._
import akka.kafka.internal.{CommittingProducerSinkStage, DefaultProducerStage}
import akka.kafka.{CommitterSettings, ConsumerMessage, ProducerSettings}
import akka.stream.ActorAttributes
import akka.stream.scaladsl.{Flow, FlowWithContext, Keep, Sink}
import akka.{Done, NotUsed}
import org.apache.kafka.clients.producer.ProducerRecord
import scala.concurrent.Future
/**
* Akka Stream connector for publishing messages to Kafka topics.
*/
object Producer {
/**
* Create a sink for publishing records to Kafka topics.
*
* The [[org.apache.kafka.clients.producer.ProducerRecord Kafka ProducerRecord]] contains the topic name to which the record is being sent, an optional
* partition number, and an optional key and value.
*/
def plainSink[K, V](settings: ProducerSettings[K, V]): Sink[ProducerRecord[K, V], Future[Done]] =
Flow[ProducerRecord[K, V]]
.map(Message(_, NotUsed))
.via(flexiFlow(settings))
.toMat(Sink.ignore)(Keep.right)
/**
* Create a sink for publishing records to Kafka topics.
*
* The [[org.apache.kafka.clients.producer.ProducerRecord Kafka ProducerRecord]] contains the topic name to which the record is being sent, an optional
* partition number, and an optional key and value.
*
* Supports sharing a Kafka Producer instance.
*/
@deprecated(
"Pass in external or shared producer using ProducerSettings.withProducerFactory or ProducerSettings.withProducer",
"2.0.0"
)
def plainSink[K, V](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Sink[ProducerRecord[K, V], Future[Done]] =
plainSink(settings.withProducer(producer))
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]]
* from a [[Consumer.committableSource]]. It will commit the consumer offset when the message has
* been published successfully to the topic.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
* Note that there is a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*/
@deprecated("use `committableSink(ProducerSettings, CommitterSettings)` instead", "2.0.0")
def committableSink[K, V](
settings: ProducerSettings[K, V]
): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
flexiFlow[K, V, ConsumerMessage.Committable](settings)
.mapAsync(settings.parallelism)(_.passThrough.commitInternal())
.toMat(Sink.ignore)(Keep.right)
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]]
* from a [[Consumer.committableSource]]. It will commit the consumer offset when the message has
* been published successfully to the topic.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
*
* Note that there is always a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*
* Supports sharing a Kafka Producer instance.
*/
@deprecated("use `committableSink(ProducerSettings, CommitterSettings)` instead", "2.0.0")
def committableSink[K, V](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
committableSink(settings.withProducer(producer))
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]]
* from a [[Consumer.committableSource]]. The offsets are batched and committed regularly.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
* Note that there is a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*/
def committableSink[K, V](
producerSettings: ProducerSettings[K, V],
committerSettings: CommitterSettings
): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
Sink.fromGraph(new CommittingProducerSinkStage(producerSettings, committerSettings))
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]] passed as
* context from a [[Consumer.sourceWithOffsetContext]]. The offsets are batched and committed regularly.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
* Note that there is a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*/
@ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
def committableSinkWithOffsetContext[K, V](
producerSettings: ProducerSettings[K, V],
committerSettings: CommitterSettings
): Sink[(Envelope[K, V, _], Committable), Future[Done]] =
committableSink(producerSettings, committerSettings)
.contramap {
case (env, offset) =>
env.withPassThrough(offset)
}
/**
* Create a flow to publish records to Kafka topics and then pass it on.
*
* The records must be wrapped in a [[akka.kafka.ProducerMessage.Message Message]] and continue in the stream as [[akka.kafka.ProducerMessage.Result Result]].
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*/
@deprecated("prefer flexiFlow over this flow implementation", "0.21")
def flow[K, V, PassThrough](
settings: ProducerSettings[K, V]
): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] = {
val flow = Flow
.fromGraph(
new DefaultProducerStage[K, V, PassThrough, Message[K, V, PassThrough], Result[K, V, PassThrough]](
settings
)
)
.mapAsync(settings.parallelism)(identity)
flowWithDispatcher(settings, flow)
}
/**
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*/
def flexiFlow[K, V, PassThrough](
settings: ProducerSettings[K, V]
): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] = {
val flow = Flow
.fromGraph(
new DefaultProducerStage[K, V, PassThrough, Envelope[K, V, PassThrough], Results[K, V, PassThrough]](
settings
)
)
.mapAsync(settings.parallelism)(identity)
flowWithDispatcherEnvelope(settings, flow)
}
/**
* API MAY CHANGE
*
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* This flow is intended to be used with Akka's [flow with context](https://doc.akka.io/docs/akka/current/stream/operators/Flow/asFlowWithContext.html).
*
* @tparam C the flow context type
*/
@ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
def flowWithContext[K, V, C](
settings: ProducerSettings[K, V]
): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
flexiFlow[K, V, C](settings)
.asFlowWithContext[Envelope[K, V, NotUsed], C, C]({
case (env, c) => env.withPassThrough(c)
})(res => res.passThrough)
/**
* Create a flow to publish records to Kafka topics and then pass it on.
*
* The records must be wrapped in a [[akka.kafka.ProducerMessage.Message Message]] and continue in the stream as [[akka.kafka.ProducerMessage.Result Result]].
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*
* Supports sharing a Kafka Producer instance.
*/
@deprecated("prefer flexiFlow over this flow implementation", "0.21")
def flow[K, V, PassThrough](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
flow(settings.withProducer(producer))
/**
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*
* Supports sharing a Kafka Producer instance.
*/
@deprecated(
"Pass in external or shared producer using ProducerSettings.withProducerFactory or ProducerSettings.withProducer",
"2.0.0"
)
def flexiFlow[K, V, PassThrough](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
flexiFlow(settings.withProducer(producer))
/**
* API MAY CHANGE
*
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* This flow is intended to be used with Akka's [flow with context](https://doc.akka.io/docs/akka/current/stream/operators/Flow/asFlowWithContext.html).
*
* Supports sharing a Kafka Producer instance.
*
* @tparam C the flow context type
*/
@deprecated(
"Pass in external or shared producer using ProducerSettings.withProducerFactory or ProducerSettings.withProducer",
"2.0.0"
)
@ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
def flowWithContext[K, V, C](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
flowWithContext(settings.withProducer(producer))
private def flowWithDispatcher[PassThrough, V, K](
settings: ProducerSettings[K, V],
flow: Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed]
) =
if (settings.dispatcher.isEmpty) flow
else flow.withAttributes(ActorAttributes.dispatcher(settings.dispatcher))
private def flowWithDispatcherEnvelope[PassThrough, V, K](
settings: ProducerSettings[K, V],
flow: Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed]
) =
if (settings.dispatcher.isEmpty) flow
else flow.withAttributes(ActorAttributes.dispatcher(settings.dispatcher))
}
| softwaremill/reactive-kafka | core/src/main/scala/akka/kafka/scaladsl/Producer.scala | Scala | apache-2.0 | 15,265 |
package org.jetbrains.plugins.hocon.misc
import com.intellij.lang.ASTNode
import com.intellij.lang.folding.{FoldingBuilder, FoldingDescriptor}
import com.intellij.openapi.editor.Document
import com.intellij.psi.tree.TokenSet
class HoconFoldingBuilder extends FoldingBuilder {
import org.jetbrains.plugins.hocon.lexer.HoconTokenType._
import org.jetbrains.plugins.hocon.parser.HoconElementType._
def buildFoldRegions(node: ASTNode, document: Document): Array[FoldingDescriptor] = {
val foldableTypes = TokenSet.create(Object, Array, MultilineString)
def nodesIterator(root: ASTNode): Iterator[ASTNode] =
Iterator(root) ++ Iterator.iterate(root.getFirstChildNode)(_.getTreeNext).takeWhile(_ != null).flatMap(nodesIterator)
nodesIterator(node).collect {
case n if foldableTypes.contains(n.getElementType) && n.getTextLength > 0 =>
new FoldingDescriptor(n, n.getTextRange)
}.toArray
}
def isCollapsedByDefault(node: ASTNode) =
false
def getPlaceholderText(node: ASTNode) = node.getElementType match {
case Object => "{...}"
case Array => "[...]"
case MultilineString => "\"\"\"...\"\"\""
}
}
| katejim/intellij-scala | src/org/jetbrains/plugins/hocon/misc/HoconFoldingBuilder.scala | Scala | apache-2.0 | 1,161 |
package org.jetbrains.plugins.scala
package lang.psi.api
import com.intellij.psi.{PsiElementVisitor, PsiFile}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScCaseClause, ScPattern}
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScLiteral, ScModifierList, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.expr.xml.{ScXmlEndTag, ScXmlStartTag}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScParameters}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportExpr
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api._
import scala.collection.mutable
/**
* @author ilyas
* @author Alexander Podkhalyuzin
*/
class ScalaRecursiveElementVisitor extends ScalaElementVisitor {
private val referencesStack = new mutable.Stack[ScReferenceElement]()
override def visitElement(element: ScalaPsiElement) {
if (referencesStack.nonEmpty && referencesStack.top == element) {
referencesStack.pop()
referencesStack.push(null)
} else {
element.acceptChildren(this)
}
}
override def visitReferenceExpression(ref: ScReferenceExpression) {
try {
referencesStack.push(ref)
visitReference(ref)
visitExpression(ref)
} finally {
referencesStack.pop()
}
}
override def visitTypeProjection(proj: ScTypeProjection) {
try {
referencesStack.push(proj)
visitReference(proj)
visitTypeElement(proj)
} finally {
referencesStack.pop()
}
}
}
class ScalaElementVisitor extends PsiElementVisitor {
def visitTypeAliasDefinition(alias: ScTypeAliasDefinition) {visitTypeAlias(alias)}
def visitTypeAlias(alias: ScTypeAlias) {visitElement(alias)}
def visitTypeAliasDeclaration(alias: ScTypeAliasDeclaration) {visitTypeAlias(alias)}
def visitParameters(parameters: ScParameters) {visitElement(parameters)}
def visitModifierList(modifierList: ScModifierList) {visitElement(modifierList)}
def visitConstructor(constr: ScConstructor) {visitElement(constr)}
def visitFunctionDefinition(fun: ScFunctionDefinition) {visitFunction(fun)}
def visitFunctionDeclaration(fun: ScFunctionDeclaration) {visitFunction(fun)}
def visitMacroDefinition(fun: ScMacroDefinition) {visitFunction(fun)}
def visitCatchBlock(c: ScCatchBlock) {visitElement(c)}
override def visitFile(file: PsiFile) {
file match {
case sf: ScalaFile => visitElement(sf)
case _ => visitElement(file)
}
}
def visitElement(element: ScalaPsiElement) {super.visitElement(element)}
//Override also visitReferenceExpression! and visitTypeProjection!
def visitReference(ref: ScReferenceElement) { visitElement(ref) }
def visitParameter(parameter: ScParameter) {visitElement(parameter)}
def visitClassParameter(parameter: ScClassParameter) {visitParameter(parameter)}
def visitPatternDefinition(pat: ScPatternDefinition) { visitValue(pat) }
def visitValueDeclaration(v: ScValueDeclaration) {visitValue(v)}
def visitVariableDefinition(varr: ScVariableDefinition) { visitVariable(varr) }
def visitVariableDeclaration(varr: ScVariableDeclaration) {visitVariable(varr) }
def visitVariable(varr: ScVariable) {visitElement(varr)}
def visitValue(v: ScValue) {visitElement(v)}
def visitCaseClause(cc: ScCaseClause) { visitElement(cc) }
def visitPattern(pat: ScPattern) { visitElement(pat) }
def visitEnumerator(enum: ScEnumerator) { visitElement(enum) }
def visitGenerator(gen: ScGenerator) { visitElement(gen) }
def visitGuard(guard: ScGuard) { visitElement(guard) }
def visitFunction(fun: ScFunction) { visitElement(fun) }
def visitTypeDefinition(typedef: ScTypeDefinition) { visitElement(typedef) }
def visitImportExpr(expr: ScImportExpr) {visitElement(expr)}
def visitSelfInvocation(self: ScSelfInvocation) {visitElement(self)}
def visitAnnotation(annotation: ScAnnotation) {visitElement(annotation)}
// Expressions
//Override also visitReferenceExpression!
def visitExpression(expr: ScExpression) { visitElement(expr) }
def visitThisReference(t: ScThisReference) {visitExpression(t)}
def visitSuperReference(t: ScSuperReference) {visitExpression(t)}
def visitReferenceExpression(ref: ScReferenceExpression) {}
def visitPostfixExpression(p: ScPostfixExpr) { visitExpression(p) }
def visitPrefixExpression(p: ScPrefixExpr) { visitExpression(p) }
def visitIfStatement(stmt: ScIfStmt) { visitExpression(stmt) }
def visitLiteral(l: ScLiteral) {visitExpression(l)}
def visitAssignmentStatement(stmt: ScAssignStmt) { visitExpression(stmt) }
def visitMethodCallExpression(call: ScMethodCall) { visitExpression(call) }
def visitGenericCallExpression(call: ScGenericCall) { visitExpression(call) }
def visitInfixExpression(infix: ScInfixExpr) {visitExpression(infix)}
def visitWhileStatement(ws: ScWhileStmt) { visitExpression(ws) }
def visitReturnStatement(ret: ScReturnStmt) { visitExpression(ret) }
def visitMatchStatement(ms: ScMatchStmt) { visitExpression(ms) }
def visitForExpression(expr: ScForStatement) { visitExpression(expr) }
def visitDoStatement(stmt: ScDoStmt) { visitExpression(stmt) }
def visitFunctionExpression(stmt: ScFunctionExpr) { visitExpression(stmt) }
def visitThrowExpression(throwStmt: ScThrowStmt) { visitExpression(throwStmt) }
def visitTryExpression(tryStmt: ScTryStmt) { visitExpression(tryStmt) }
def visitExprInParent(expr: ScParenthesisedExpr) {visitExpression(expr)}
def visitNewTemplateDefinition(templ: ScNewTemplateDefinition) {visitExpression(templ)}
def visitTypedStmt(stmt: ScTypedStmt) {visitExpression(stmt)}
def visitTupleExpr(tuple: ScTuple) {visitExpression(tuple)}
def visitBlockExpression(block: ScBlockExpr) {visitExpression(block)}
def visitUnderscoreExpression(under: ScUnderscoreSection) {visitExpression(under)}
def visitConstrBlock(constr: ScConstrBlock) {visitBlockExpression(constr)}
//type elements
//Override also visitTypeProjection!
//If you use it for typed pattern, override visitTypeParam too.
def visitTypeElement(te: ScTypeElement) {visitElement(te)}
def visitSimpleTypeElement(simple: ScSimpleTypeElement) {visitTypeElement(simple)}
def visitWildcardTypeElement(wildcard: ScWildcardTypeElement) {visitTypeElement(wildcard)}
def visitTypeProjection(proj: ScTypeProjection) {}
def visitTupleTypeElement(tuple: ScTupleTypeElement) {visitTypeElement(tuple)}
def visitParenthesisedTypeElement(parenthesised: ScParenthesisedTypeElement) {visitTypeElement(parenthesised)}
def visitParameterizedTypeElement(parameterized: ScParameterizedTypeElement) {visitTypeElement(parameterized)}
def visitInfixTypeElement(infix: ScInfixTypeElement) {visitTypeElement(infix)}
def visitFunctionalTypeElement(fun: ScFunctionalTypeElement) {visitTypeElement(fun)}
def visitExistentialTypeElement(exist: ScExistentialTypeElement) {visitTypeElement(exist)}
def visitCompoundTypeElement(compound: ScCompoundTypeElement) {visitTypeElement(compound)}
def visitAnnotTypeElement(annot: ScAnnotTypeElement) {visitTypeElement(annot)}
def visitTypeVariableTypeElement(tvar: ScTypeVariableTypeElement): Unit = { visitTypeElement(tvar) }
//scaladoc
def visitDocComment(s: ScDocComment) {visitComment(s)}
def visitScaladocElement(s: ScalaPsiElement) {visitElement(s)}
def visitWikiSyntax(s: ScDocSyntaxElement) {visitElement(s)}
def visitInlinedTag(s: ScDocInlinedTag) {visitElement(s)}
def visitTag(s: ScDocTag) {visitElement(s)}
//xml
def visitXmlStartTag(s: ScXmlStartTag) {visitElement(s)}
def visitXmlEndTag(s: ScXmlEndTag) {visitElement(s)}
} | SergeevPavel/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/ScalaElementVisitor.scala | Scala | apache-2.0 | 7,957 |
package models.misc
import java.io._
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import play.api.libs.json._
@RunWith(classOf[JUnitRunner])
class EmailAddressSpec extends Specification {
import EmailAddressSpec._
"Email Address" should {
"be able to compare with another Email Address" >> {
email1 mustEqual email2
email1 mustNotEqual email3
email1.self mustEqual "zepeng.li@gmail.com"
Json.toJson(email1) mustEqual JsString("zepeng.li@gmail.com")
}
"be able to serialized" >> {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(email1)
bos.toByteArray.length mustNotEqual 0
}
}
}
object EmailAddressSpec {
val email1 = EmailAddress("zepeng.li@gmail.com")
val email2 = EmailAddress("zepeng.li@gmail.com")
val email3 = EmailAddress("zepeng.li@qq.com")
} | lizepeng/app.io | modules/models/test/models/misc/EmailAddressSpec.scala | Scala | apache-2.0 | 916 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, SQLException, Types}
import java.util
import java.util.Locale
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.SQLConfHelper
import org.apache.spark.sql.catalyst.analysis.{IndexAlreadyExistsException, NonEmptyNamespaceException, NoSuchIndexException}
import org.apache.spark.sql.connector.expressions.NamedReference
import org.apache.spark.sql.connector.expressions.aggregate.{AggregateFunc, GeneralAggregateFunc}
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils}
import org.apache.spark.sql.execution.datasources.v2.TableSampleInfo
import org.apache.spark.sql.types._
private object PostgresDialect extends JdbcDialect with SQLConfHelper {
override def canHandle(url: String): Boolean =
url.toLowerCase(Locale.ROOT).startsWith("jdbc:postgresql")
override def compileAggregate(aggFunction: AggregateFunc): Option[String] = {
super.compileAggregate(aggFunction).orElse(
aggFunction match {
case f: GeneralAggregateFunc if f.name() == "VAR_POP" =>
assert(f.inputs().length == 1)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"VAR_POP($distinct${f.inputs().head})")
case f: GeneralAggregateFunc if f.name() == "VAR_SAMP" =>
assert(f.inputs().length == 1)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"VAR_SAMP($distinct${f.inputs().head})")
case f: GeneralAggregateFunc if f.name() == "STDDEV_POP" =>
assert(f.inputs().length == 1)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"STDDEV_POP($distinct${f.inputs().head})")
case f: GeneralAggregateFunc if f.name() == "STDDEV_SAMP" =>
assert(f.inputs().length == 1)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"STDDEV_SAMP($distinct${f.inputs().head})")
case f: GeneralAggregateFunc if f.name() == "COVAR_POP" =>
assert(f.inputs().length == 2)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"COVAR_POP($distinct${f.inputs().head}, ${f.inputs().last})")
case f: GeneralAggregateFunc if f.name() == "COVAR_SAMP" =>
assert(f.inputs().length == 2)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"COVAR_SAMP($distinct${f.inputs().head}, ${f.inputs().last})")
case f: GeneralAggregateFunc if f.name() == "CORR" =>
assert(f.inputs().length == 2)
val distinct = if (f.isDistinct) "DISTINCT " else ""
Some(s"CORR($distinct${f.inputs().head}, ${f.inputs().last})")
case _ => None
}
)
}
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
if (sqlType == Types.REAL) {
Some(FloatType)
} else if (sqlType == Types.SMALLINT) {
Some(ShortType)
} else if (sqlType == Types.BIT && typeName == "bit" && size != 1) {
Some(BinaryType)
} else if (sqlType == Types.DOUBLE && typeName == "money") {
// money type seems to be broken but one workaround is to handle it as string.
// See SPARK-34333 and https://github.com/pgjdbc/pgjdbc/issues/100
Some(StringType)
} else if (sqlType == Types.OTHER) {
Some(StringType)
} else if (sqlType == Types.ARRAY) {
val scale = md.build.getLong("scale").toInt
// postgres array type names start with underscore
toCatalystType(typeName.drop(1), size, scale).map(ArrayType(_))
} else None
}
private def toCatalystType(
typeName: String,
precision: Int,
scale: Int): Option[DataType] = typeName match {
case "bool" => Some(BooleanType)
case "bit" => Some(BinaryType)
case "int2" => Some(ShortType)
case "int4" => Some(IntegerType)
case "int8" | "oid" => Some(LongType)
case "float4" => Some(FloatType)
case "float8" => Some(DoubleType)
case "text" | "varchar" | "char" | "bpchar" | "cidr" | "inet" | "json" | "jsonb" | "uuid" |
"xml" | "tsvector" | "tsquery" | "macaddr" | "macaddr8" | "txid_snapshot" | "point" |
"line" | "lseg" | "box" | "path" | "polygon" | "circle" | "pg_lsn" | "varbit" |
"interval" | "pg_snapshot" =>
Some(StringType)
case "bytea" => Some(BinaryType)
case "timestamp" | "timestamptz" | "time" | "timetz" => Some(TimestampType)
case "date" => Some(DateType)
case "numeric" | "decimal" if precision > 0 => Some(DecimalType.bounded(precision, scale))
case "numeric" | "decimal" =>
// SPARK-26538: handle numeric without explicit precision and scale.
Some(DecimalType. SYSTEM_DEFAULT)
case "money" =>
// money[] type seems to be broken and difficult to handle.
// So this method returns None for now.
// See SPARK-34333 and https://github.com/pgjdbc/pgjdbc/issues/1405
None
case _ => None
}
override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
case StringType => Some(JdbcType("TEXT", Types.CHAR))
case BinaryType => Some(JdbcType("BYTEA", Types.BINARY))
case BooleanType => Some(JdbcType("BOOLEAN", Types.BOOLEAN))
case FloatType => Some(JdbcType("FLOAT4", Types.FLOAT))
case DoubleType => Some(JdbcType("FLOAT8", Types.DOUBLE))
case ShortType | ByteType => Some(JdbcType("SMALLINT", Types.SMALLINT))
case t: DecimalType => Some(
JdbcType(s"NUMERIC(${t.precision},${t.scale})", java.sql.Types.NUMERIC))
case ArrayType(et, _) if et.isInstanceOf[AtomicType] =>
getJDBCType(et).map(_.databaseTypeDefinition)
.orElse(JdbcUtils.getCommonJDBCType(et).map(_.databaseTypeDefinition))
.map(typeName => JdbcType(s"$typeName[]", java.sql.Types.ARRAY))
case _ => None
}
override def getTableExistsQuery(table: String): String = {
s"SELECT 1 FROM $table LIMIT 1"
}
override def isCascadingTruncateTable(): Option[Boolean] = Some(false)
/**
* The SQL query used to truncate a table. For Postgres, the default behaviour is to
* also truncate any descendant tables. As this is a (possibly unwanted) side-effect,
* the Postgres dialect adds 'ONLY' to truncate only the table in question
* @param table The table to truncate
* @param cascade Whether or not to cascade the truncation. Default value is the value of
* isCascadingTruncateTable(). Cascading a truncation will truncate tables
* with a foreign key relationship to the target table. However, it will not
* truncate tables with an inheritance relationship to the target table, as
* the truncate query always includes "ONLY" to prevent this behaviour.
* @return The SQL query to use for truncating a table
*/
override def getTruncateQuery(
table: String,
cascade: Option[Boolean] = isCascadingTruncateTable): String = {
cascade match {
case Some(true) => s"TRUNCATE TABLE ONLY $table CASCADE"
case _ => s"TRUNCATE TABLE ONLY $table"
}
}
override def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
super.beforeFetch(connection, properties)
// According to the postgres jdbc documentation we need to be in autocommit=false if we actually
// want to have fetchsize be non 0 (all the rows). This allows us to not have to cache all the
// rows inside the driver when fetching.
//
// See: https://jdbc.postgresql.org/documentation/head/query.html#query-with-cursor
//
if (properties.getOrElse(JDBCOptions.JDBC_BATCH_FETCH_SIZE, "0").toInt > 0) {
connection.setAutoCommit(false)
}
}
// See https://www.postgresql.org/docs/12/sql-altertable.html
override def getUpdateColumnTypeQuery(
tableName: String,
columnName: String,
newDataType: String): String = {
s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} TYPE $newDataType"
}
// See https://www.postgresql.org/docs/12/sql-altertable.html
override def getUpdateColumnNullabilityQuery(
tableName: String,
columnName: String,
isNullable: Boolean): String = {
val nullable = if (isNullable) "DROP NOT NULL" else "SET NOT NULL"
s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} $nullable"
}
override def supportsTableSample: Boolean = true
override def getTableSample(sample: TableSampleInfo): String = {
// hard-coded to BERNOULLI for now because Spark doesn't have a way to specify sample
// method name
s"TABLESAMPLE BERNOULLI" +
s" (${(sample.upperBound - sample.lowerBound) * 100}) REPEATABLE (${sample.seed})"
}
// CREATE INDEX syntax
// https://www.postgresql.org/docs/14/sql-createindex.html
override def createIndex(
indexName: String,
tableName: String,
columns: Array[NamedReference],
columnsProperties: util.Map[NamedReference, util.Map[String, String]],
properties: util.Map[String, String]): String = {
val columnList = columns.map(col => quoteIdentifier(col.fieldNames.head))
var indexProperties = ""
val (indexType, indexPropertyList) = JdbcUtils.processIndexProperties(properties, "postgresql")
if (indexPropertyList.nonEmpty) {
indexProperties = "WITH (" + indexPropertyList.mkString(", ") + ")"
}
s"CREATE INDEX ${quoteIdentifier(indexName)} ON ${quoteIdentifier(tableName)}" +
s" $indexType (${columnList.mkString(", ")}) $indexProperties"
}
// SHOW INDEX syntax
// https://www.postgresql.org/docs/14/view-pg-indexes.html
override def indexExists(
conn: Connection,
indexName: String,
tableName: String,
options: JDBCOptions): Boolean = {
val sql = s"SELECT * FROM pg_indexes WHERE tablename = '$tableName' AND" +
s" indexname = '$indexName'"
JdbcUtils.checkIfIndexExists(conn, sql, options)
}
// DROP INDEX syntax
// https://www.postgresql.org/docs/14/sql-dropindex.html
override def dropIndex(indexName: String, tableName: String): String = {
s"DROP INDEX ${quoteIdentifier(indexName)}"
}
override def classifyException(message: String, e: Throwable): AnalysisException = {
e match {
case sqlException: SQLException =>
sqlException.getSQLState match {
// https://www.postgresql.org/docs/14/errcodes-appendix.html
case "42P07" => throw new IndexAlreadyExistsException(message, cause = Some(e))
case "42704" => throw new NoSuchIndexException(message, cause = Some(e))
case "2BP01" => throw NonEmptyNamespaceException(message, cause = Some(e))
case _ => super.classifyException(message, e)
}
case unsupported: UnsupportedOperationException => throw unsupported
case _ => super.classifyException(message, e)
}
}
}
| shaneknapp/spark | sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala | Scala | apache-2.0 | 11,690 |
package com.sksamuel.elastic4s.requests.security.roles
case class IndexPrivileges(
names: Seq[String],
privileges: Seq[String],
field_security: Option[FieldSecurity]=None,
query: Option[String]=None,
allow_restricted_indices: Option[Boolean]=None
)
case class FieldSecurity(grant: Seq[String]=Seq(), except: Seq[String]=Seq()) | sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/security/roles/IndexPriveleges.scala | Scala | apache-2.0 | 333 |
package coursier.core
import java.util.concurrent.ConcurrentHashMap
import coursier.util.Artifact
import scala.annotation.tailrec
import scala.collection.compat._
import scala.collection.compat.immutable.LazyList
import scala.collection.mutable
import scala.jdk.CollectionConverters._
import dataclass.data
object Resolution {
type ModuleVersion = (Module, String)
def profileIsActive(
profile: Profile,
properties: Map[String, String],
osInfo: Activation.Os,
jdkVersion: Option[Version],
userActivations: Option[Map[String, Boolean]]
): Boolean = {
val fromUserOrDefault = userActivations match {
case Some(activations) =>
activations.get(profile.id)
case None =>
profile.activeByDefault
.filter(identity)
}
def fromActivation = profile.activation.isActive(properties, osInfo, jdkVersion)
fromUserOrDefault.getOrElse(fromActivation)
}
/** Get the active profiles of `project`, using the current properties `properties`, and
* `profileActivations` stating if a profile is active.
*/
def profiles(
project: Project,
properties: Map[String, String],
osInfo: Activation.Os,
jdkVersion: Option[Version],
userActivations: Option[Map[String, Boolean]]
): Seq[Profile] =
project.profiles.filter { profile =>
profileIsActive(
profile,
properties,
osInfo,
jdkVersion,
userActivations
)
}
private object DepMgmt {
type Key = (Organization, ModuleName, Type, Classifier)
def key(dep: Dependency): Key =
(
dep.module.organization,
dep.module.name,
if (dep.attributes.`type`.isEmpty) Type.jar else dep.attributes.`type`,
dep.attributes.classifier
)
def addSeq(
dict: Map[Key, (Configuration, Dependency)],
deps: Seq[(Configuration, Dependency)]
): Map[Key, (Configuration, Dependency)] =
if (deps.isEmpty)
dict
else {
val b = new mutable.HashMap[Key, (Configuration, Dependency)]()
b.sizeHint(dict.size + deps.length)
b ++= dict
val it = deps.iterator
while (it.hasNext) {
val elem = it.next()
val key0 = key(elem._2)
if (!b.contains(key0))
b += ((key0, elem))
}
b.result()
.toMap // meh
}
}
def addDependencies(
deps: Seq[Seq[(Configuration, Dependency)]]
): Seq[(Configuration, Dependency)] = {
val (_, res) =
deps.foldRight(Set.empty[DepMgmt.Key], Seq.empty[(Configuration, Dependency)]) {
case (deps0, (set, acc)) =>
val deps = deps0.filter {
case (_, dep) =>
!set(DepMgmt.key(dep))
}
(set ++ deps.map { case (_, dep) => DepMgmt.key(dep) }, acc ++ deps)
}
res
}
def hasProps(s: String): Boolean = {
var ok = false
var idx = 0
while (idx < s.length && !ok) {
var dolIdx = idx
while (dolIdx < s.length && s.charAt(dolIdx) != '$')
dolIdx += 1
idx = dolIdx
if (dolIdx < s.length - 2 && s.charAt(dolIdx + 1) == '{') {
var endIdx = dolIdx + 2
while (endIdx < s.length && s.charAt(endIdx) != '}')
endIdx += 1
if (endIdx < s.length) {
assert(s.charAt(endIdx) == '}')
ok = true
}
}
if (!ok && idx < s.length) {
assert(s.charAt(idx) == '$')
idx += 1
}
}
ok
}
def substituteProps(s: String, properties: Map[String, String]): String =
substituteProps(s, properties, trim = false)
def substituteProps(s: String, properties: Map[String, String], trim: Boolean): String = {
// this method is called _very_ often, hence the micro-optimization
var b: java.lang.StringBuilder = null
var idx = 0
while (idx < s.length) {
var dolIdx = idx
while (dolIdx < s.length && s.charAt(dolIdx) != '$')
dolIdx += 1
if (idx != 0 || dolIdx < s.length) {
if (b == null)
b = new java.lang.StringBuilder(s.length + 32)
b.append(s, idx, dolIdx)
}
idx = dolIdx
var name: String = null
if (dolIdx < s.length - 2 && s.charAt(dolIdx + 1) == '{') {
var endIdx = dolIdx + 2
while (endIdx < s.length && s.charAt(endIdx) != '}')
endIdx += 1
if (endIdx < s.length) {
assert(s.charAt(endIdx) == '}')
name = s.substring(dolIdx + 2, endIdx)
}
}
if (name == null) {
if (idx < s.length) {
assert(s.charAt(idx) == '$')
b.append('$')
idx += 1
}
}
else {
idx = idx + 2 + name.length + 1 // == endIdx + 1
properties.get(name) match {
case None =>
b.append(s, dolIdx, idx)
case Some(v) =>
val v0 = if (trim) v.trim else v
b.append(v0)
}
}
}
if (b == null)
s
else
b.toString
}
def withProperties(
dependencies: Seq[(Configuration, Dependency)],
properties: Map[String, String]
): Seq[(Configuration, Dependency)] =
dependencies.map(withProperties(_, properties))
/** Substitutes `properties` in `dependencies`.
*/
private def withProperties(
configDep: (Configuration, Dependency),
properties: Map[String, String]
): (Configuration, Dependency) = {
val (config, dep) = configDep
def substituteTrimmedProps(s: String) =
substituteProps(s, properties, trim = true)
def substituteProps0(s: String) =
substituteProps(s, properties, trim = false)
val dep0 = dep.copy(
module = dep.module.copy(
organization = dep.module.organization.map(substituteProps0),
name = dep.module.name.map(substituteProps0)
),
version = substituteTrimmedProps(dep.version),
attributes = dep.attributes
.withType(dep.attributes.`type`.map(substituteProps0))
.withClassifier(dep.attributes.classifier.map(substituteProps0)),
configuration = dep.configuration.map(substituteProps0),
exclusions = dep.exclusions.map {
case (org, name) =>
(org.map(substituteProps0), name.map(substituteProps0))
}
)
// FIXME The content of the optional tag may also be a property in
// the original POM. Maybe not parse it that earlier?
config.map(substituteProps0) -> dep0
}
/** Merge several dependencies, solving version constraints of duplicated modules.
*
* Returns the conflicted dependencies, and the merged others.
*/
def merge(
dependencies: Seq[Dependency],
forceVersions: Map[Module, String],
reconciliation: Option[Module => Reconciliation],
preserveOrder: Boolean = false
): (Seq[Dependency], Seq[Dependency], Map[Module, String]) = {
def reconcilerByMod(mod: Module): Reconciliation =
reconciliation match {
case Some(f) => f(mod)
case _ => Reconciliation.Default
}
val dependencies0 = dependencies.toVector
val mergedByModVer = dependencies0
.groupBy(dep => dep.module)
.map { case (module, deps) =>
val anyOrgModule = module.withOrganization(Organization("*"))
val forcedVersionOpt = forceVersions.get(module)
.orElse(forceVersions.get(anyOrgModule))
module -> {
val (versionOpt, updatedDeps) = forcedVersionOpt match {
case None =>
if (deps.lengthCompare(1) == 0) (Some(deps.head.version), Right(deps))
else {
val versions = deps.map(_.version)
val reconciler = reconcilerByMod(module)
val versionOpt = reconciler(versions)
(
versionOpt,
versionOpt match {
case Some(version) =>
Right(deps.map(dep => dep.withVersion(version)))
case None =>
Left(deps)
}
)
}
case Some(forcedVersion) =>
(Some(forcedVersion), Right(deps.map(dep => dep.withVersion(forcedVersion))))
}
(updatedDeps, versionOpt)
}
}
val merged =
if (preserveOrder)
dependencies0
.map(_.module)
.distinct
.map(mergedByModVer(_))
else
mergedByModVer
.values
.toVector
(
merged
.collect { case (Left(dep), _) => dep }
.flatten,
merged
.collect { case (Right(dep), _) => dep }
.flatten,
mergedByModVer
.collect { case (mod, (_, Some(ver))) => mod -> ver }
)
}
/** Applies `dependencyManagement` to `dependencies`.
*
* Fill empty version / scope / exclusions, for dependencies found in `dependencyManagement`.
*/
def depsWithDependencyManagement(
dependencies: Seq[(Configuration, Dependency)],
dependencyManagement: Seq[(Configuration, Dependency)]
): Seq[(Configuration, Dependency)] = {
// See http://maven.apache.org/guides/introduction/introduction-to-dependency-mechanism.html#Dependency_Management
lazy val dict = DepMgmt.addSeq(Map.empty, dependencyManagement)
dependencies.map {
case (config0, dep0) =>
var config = config0
var dep = dep0
for ((mgmtConfig, mgmtDep) <- dict.get(DepMgmt.key(dep0))) {
if (mgmtDep.version.nonEmpty)
dep = dep.withVersion(mgmtDep.version)
if (config.isEmpty)
config = mgmtConfig
// FIXME The version and scope/config from dependency management, if any, are substituted
// no matter what. The same is not done for the exclusions and optionality, for a lack of
// way of distinguishing empty exclusions from no exclusion section and optional set to
// false from no optional section in the dependency management for now.
if (dep.exclusions.isEmpty)
dep = dep.withExclusions(mgmtDep.exclusions)
if (mgmtDep.optional)
dep = dep.withOptional(mgmtDep.optional)
}
(config, dep)
}
}
private def withDefaultConfig(dep: Dependency, defaultConfiguration: Configuration): Dependency =
if (dep.configuration.isEmpty)
dep.withConfiguration(defaultConfiguration)
else
dep
/** Filters `dependencies` with `exclusions`.
*/
def withExclusions(
dependencies: Seq[(Configuration, Dependency)],
exclusions: Set[(Organization, ModuleName)]
): Seq[(Configuration, Dependency)] = {
val filter = Exclusions(exclusions)
dependencies
.filter {
case (_, dep) =>
filter(dep.module.organization, dep.module.name)
}
.map {
case (config, dep) =>
config -> dep.withExclusions(Exclusions.minimize(dep.exclusions ++ exclusions))
}
}
def withParentConfigurations(
config: Configuration,
configurations: Map[Configuration, Seq[Configuration]]
): (Configuration, Set[Configuration]) = {
@tailrec
def helper(configs: Set[Configuration], acc: Set[Configuration]): Set[Configuration] =
if (configs.isEmpty)
acc
else if (configs.exists(acc))
helper(configs -- acc, acc)
else if (configs.exists(!configurations.contains(_))) {
val (remaining, notFound) = configs.partition(configurations.contains)
helper(remaining, acc ++ notFound)
}
else {
val extraConfigs = configs.flatMap(configurations)
helper(extraConfigs, acc ++ configs)
}
val config0 = Parse.withFallbackConfig(config) match {
case Some((main, fallback)) =>
if (configurations.contains(main))
main
else if (configurations.contains(fallback))
fallback
else
main
case None => config
}
(config0, helper(Set(config0), Set.empty))
}
private val mavenScopes = {
val base = Map[Configuration, Set[Configuration]](
Configuration.compile -> Set(Configuration.compile),
Configuration.optional -> Set(
Configuration.compile,
Configuration.optional,
Configuration.runtime
),
Configuration.provided -> Set(),
Configuration.runtime -> Set(Configuration.compile, Configuration.runtime),
Configuration.test -> Set(Configuration.compile, Configuration.runtime, Configuration.test)
)
base ++ Seq(
Configuration.default -> base(Configuration.runtime)
)
}
def projectProperties(project: Project): Seq[(String, String)] = {
val packaging = project.packagingOpt.getOrElse(Type.jar)
// FIXME The extra properties should only be added for Maven projects, not Ivy ones
val properties0 = project.properties ++ Seq(
// some artifacts seem to require these (e.g. org.jmock:jmock-legacy:2.5.1)
// although I can find no mention of them in any manual / spec
"pom.groupId" -> project.module.organization.value,
"pom.artifactId" -> project.module.name.value,
"pom.version" -> project.actualVersion,
// Required by some dependencies too (org.apache.directory.shared:shared-ldap:0.9.19 in particular)
"groupId" -> project.module.organization.value,
"artifactId" -> project.module.name.value,
"version" -> project.actualVersion,
"project.groupId" -> project.module.organization.value,
"project.artifactId" -> project.module.name.value,
"project.version" -> project.actualVersion,
"project.packaging" -> packaging.value
) ++ project.parent.toSeq.flatMap {
case (parModule, parVersion) =>
Seq(
"project.parent.groupId" -> parModule.organization.value,
"project.parent.artifactId" -> parModule.name.value,
"project.parent.version" -> parVersion,
"parent.groupId" -> parModule.organization.value,
"parent.artifactId" -> parModule.name.value,
"parent.version" -> parVersion
)
}
// loose attempt at substituting properties in each others in properties0
// doesn't try to go recursive for now, but that could be made so if necessary
substitute(properties0)
}
private def substitute(properties0: Seq[(String, String)]): Seq[(String, String)] = {
val done = properties0
.iterator
.collect {
case kv @ (_, value) if !hasProps(value) =>
kv
}
.toMap
var didSubstitutions = false
val res = properties0.map {
case (k, v) =>
val res = substituteProps(v, done)
if (!didSubstitutions)
didSubstitutions = res != v
k -> res
}
if (didSubstitutions)
substitute(res)
else
res
}
private def parents(
project: Project,
projectCache: ((Module, String)) => Option[Project]
): LazyList[Project] =
project.parent.flatMap(projectCache) match {
case None => LazyList.empty
case Some(parent) => parent #:: parents(parent, projectCache)
}
/** Get the dependencies of `project`, knowing that it came from dependency `from` (that is,
* `from.module == project.module`).
*
* Substitute properties, update scopes, apply exclusions, and get extra parameters from
* dependency management along the way.
*/
private def finalDependencies(
from: Dependency,
project: Project,
defaultConfiguration: Configuration,
projectCache: ((Module, String)) => Option[Project]
): Seq[Dependency] = {
// section numbers in the comments refer to withDependencyManagement
val parentProperties0 = parents(project, projectCache)
.toVector
.flatMap(_.properties)
val project0 = withFinalProperties(
project.withProperties(parentProperties0 ++ project.properties)
)
val properties = project0.properties.toMap
val (actualConfig, configurations) = withParentConfigurations(
if (from.configuration.isEmpty) defaultConfiguration else from.configuration,
project0.configurations
)
// Vague attempt at making the Maven scope model fit into the Ivy configuration one
val keepOpt = mavenScopes.get(actualConfig)
withExclusions(
// 2.1 & 2.2
depsWithDependencyManagement(
// 1.7
withProperties(project0.dependencies, properties),
withProperties(project0.dependencyManagement, properties)
),
from.exclusions
)
.flatMap {
case (config0, dep0) =>
// Dependencies from Maven verify
// dep.configuration.isEmpty
// and expect dep.configuration to be filled here
val dep =
if (from.optional)
dep0.withOptional(true)
else
dep0
val config = if (config0.isEmpty) Configuration.compile else config0
def default =
if (configurations(config))
Seq(dep)
else
Nil
if (dep.configuration.nonEmpty)
default
else
keepOpt.fold(default) { keep =>
if (keep(config)) {
val depConfig =
if (actualConfig == Configuration.test || actualConfig == Configuration.runtime)
Configuration.runtime
else
defaultConfiguration
Seq(dep.withConfiguration(depConfig))
}
else
Nil
}
}
}
/** Default dependency filter used during resolution.
*
* Does not follow optional dependencies.
*/
def defaultFilter(dep: Dependency): Boolean =
!dep.optional
// Same types as sbt, see
// https://github.com/sbt/sbt/blob/47cd001eea8ef42b7c1db9ffdf48bec16b8f733b/main/src/main/scala/sbt/Defaults.scala#L227
// https://github.com/sbt/librarymanagement/blob/bb2c73e183fa52e2fb4b9ae7aca55799f3ff6624/ivy/src/main/scala/sbt/internal/librarymanagement/CustomPomParser.scala#L79
val defaultTypes = Set[Type](
Type.jar,
Type.testJar,
Type.bundle,
Type.Exotic.mavenPlugin,
Type.Exotic.eclipsePlugin,
Type.Exotic.hk2,
Type.Exotic.orbit,
Type.Exotic.scalaJar
)
def overrideScalaModule(sv: String): Dependency => Dependency =
overrideScalaModule(sv, Organization("org.scala-lang"))
def overrideScalaModule(sv: String, scalaOrg: Organization): Dependency => Dependency = {
val sbv = sv.split('.').take(2).mkString(".")
val scalaModules =
if (sbv.startsWith("3"))
Set(
ModuleName("scala3-library"),
ModuleName("scala3-compiler")
)
else
Set(
ModuleName("scala-library"),
ModuleName("scala-compiler"),
ModuleName("scala-reflect"),
ModuleName("scalap")
)
dep =>
if (dep.module.organization == scalaOrg && scalaModules.contains(dep.module.name))
dep.withVersion(sv)
else
dep
}
/** Replaces the full suffix _2.12.8 with the given Scala version.
*/
def overrideFullSuffix(sv: String): Dependency => Dependency = {
val sbv = sv.split('.').take(2).mkString(".")
def fullCrossVersionBase(module: Module): Option[String] =
if (module.attributes.isEmpty && !module.name.value.endsWith("_" + sv)) {
val idx = module.name.value.lastIndexOf("_" + sbv + ".")
if (idx < 0)
None
else {
val lastPart = module.name.value.substring(idx + 1 + sbv.length + 1)
if (lastPart.isEmpty || lastPart.exists(c => !"01234566789MRC-.".contains(c)))
None
else
Some(module.name.value.substring(0, idx))
}
}
else
None
dep =>
fullCrossVersionBase(dep.module) match {
case Some(base) =>
dep.withModule(dep.module.withName(ModuleName(base + "_" + sv)))
case None =>
dep
}
}
@deprecated("Use overrideScalaModule and overrideFullSuffix instead", "2.0.17")
def forceScalaVersion(sv: String): Dependency => Dependency =
overrideScalaModule(sv) andThen overrideFullSuffix(sv)
private def fallbackConfigIfNecessary(dep: Dependency, configs: Set[Configuration]): Dependency =
Parse.withFallbackConfig(dep.configuration) match {
case Some((main, fallback)) =>
val config0 =
if (configs(main))
main
else if (configs(fallback))
fallback
else
dep.configuration
dep.withConfiguration(config0)
case _ =>
dep
}
private def withFinalProperties(project: Project): Project =
project.withProperties(projectProperties(project))
}
/** State of a dependency resolution.
*
* Done if method `isDone` returns `true`.
*
* @param conflicts:
* conflicting dependencies
* @param projectCache:
* cache of known projects
* @param errorCache:
* keeps track of the modules whose project definition could not be found
*/
@data class Resolution(
rootDependencies: Seq[Dependency] = Nil,
dependencySet: DependencySet = DependencySet.empty,
forceVersions: Map[Module, String] = Map.empty,
conflicts: Set[Dependency] = Set.empty,
projectCache: Map[Resolution.ModuleVersion, (ArtifactSource, Project)] = Map.empty,
errorCache: Map[Resolution.ModuleVersion, Seq[String]] = Map.empty,
finalDependenciesCache: Map[Dependency, Seq[Dependency]] = Map.empty,
filter: Option[Dependency => Boolean] = None,
reconciliation: Option[Module => Reconciliation] = None,
osInfo: Activation.Os = Activation.Os.empty,
jdkVersion: Option[Version] = None,
userActivations: Option[Map[String, Boolean]] = None,
mapDependencies: Option[Dependency => Dependency] = None,
extraProperties: Seq[(String, String)] = Nil,
forceProperties: Map[String, String] = Map.empty, // FIXME Make that a seq too?
defaultConfiguration: Configuration = Configuration.defaultCompile
) {
lazy val dependencies: Set[Dependency] =
dependencySet.set
override lazy val hashCode: Int = {
var code = 17 + "coursier.core.Resolution".##
code = 37 * code + tuple.##
37 * code
}
def withDependencies(dependencies: Set[Dependency]): Resolution =
withDependencySet(dependencySet.setValues(dependencies))
def addToErrorCache(entries: Iterable[(Resolution.ModuleVersion, Seq[String])]): Resolution =
copyWithCache(
errorCache = errorCache ++ entries
)
private def copyWithCache(
rootDependencies: Seq[Dependency] = rootDependencies,
dependencySet: DependencySet = dependencySet,
conflicts: Set[Dependency] = conflicts,
errorCache: Map[Resolution.ModuleVersion, Seq[String]] = errorCache
// don't allow changing mapDependencies here - that would invalidate finalDependenciesCache
// don't allow changing projectCache here - use addToProjectCache that takes forceProperties into account
): Resolution =
withRootDependencies(rootDependencies)
.withDependencySet(dependencySet)
.withConflicts(conflicts)
.withErrorCache(errorCache)
.withFinalDependenciesCache(finalDependenciesCache ++ finalDependenciesCache0.asScala)
def addToProjectCache(
projects: (Resolution.ModuleVersion, (ArtifactSource, Project))*
): Resolution = {
val duplicates = projects
.collect {
case (modVer, _) if projectCache.contains(modVer) =>
modVer
}
assert(
duplicates.isEmpty,
s"Projects already added in resolution: ${duplicates.mkString(", ")}"
)
withFinalDependenciesCache(finalDependenciesCache ++ finalDependenciesCache0.asScala)
.withProjectCache {
projectCache ++ projects.map {
case (modVer, (s, p)) =>
val p0 =
withDependencyManagement(
p.withProperties(
extraProperties ++
p.properties.filter(kv => !forceProperties.contains(kv._1)) ++
forceProperties
)
)
(modVer, (s, p0))
}
}
}
import Resolution._
private[core] val finalDependenciesCache0 = new ConcurrentHashMap[Dependency, Seq[Dependency]]
private def finalDependencies0(dep: Dependency): Seq[Dependency] =
if (dep.transitive) {
val deps = finalDependenciesCache.getOrElse(dep, finalDependenciesCache0.get(dep))
if (deps == null)
projectCache.get(dep.moduleVersion) match {
case Some((_, proj)) =>
val res0 = finalDependencies(
dep,
proj,
defaultConfiguration,
k => projectCache.get(k).map(_._2)
).filter(filter getOrElse defaultFilter)
val res = mapDependencies.fold(res0)(res0.map(_))
finalDependenciesCache0.put(dep, res)
res
case None => Nil
}
else
deps
}
else
Nil
def dependenciesOf(dep: Dependency): Seq[Dependency] =
dependenciesOf(dep, withRetainedVersions = false)
def dependenciesOf(dep: Dependency, withRetainedVersions: Boolean): Seq[Dependency] =
dependenciesOf(dep, withRetainedVersions = withRetainedVersions, withFallbackConfig = false)
private def configsOf(dep: Dependency): Set[Configuration] =
projectCache
.get(dep.moduleVersion)
.map(_._2.configurations.keySet)
.getOrElse(Set.empty)
private def updated(
dep: Dependency,
withRetainedVersions: Boolean,
withFallbackConfig: Boolean
): Dependency = {
val dep0 =
if (withRetainedVersions)
dep.withVersion(retainedVersions.getOrElse(dep.module, dep.version))
else
dep
if (withFallbackConfig)
Resolution.fallbackConfigIfNecessary(dep0, configsOf(dep0))
else
dep0
}
def dependenciesOf(
dep: Dependency,
withRetainedVersions: Boolean,
withFallbackConfig: Boolean
): Seq[Dependency] = {
val deps = finalDependencies0(dep)
if (withRetainedVersions || withFallbackConfig)
deps.map(updated(_, withRetainedVersions, withFallbackConfig))
else
deps
}
/** Transitive dependencies of the current dependencies, according to what there currently is in
* cache.
*
* No attempt is made to solve version conflicts here.
*/
lazy val transitiveDependencies: Seq[Dependency] =
(dependencySet.minimizedSet -- conflicts)
.toVector
.flatMap(finalDependencies0)
/** The "next" dependency set, made of the current dependencies and their transitive dependencies,
* trying to solve version conflicts. Transitive dependencies are calculated with the current
* cache.
*
* May contain dependencies added in previous iterations, but no more required. These are
* filtered below, see `newDependencies`.
*
* Returns a tuple made of the conflicting dependencies, all the dependencies, and the retained
* version of each module.
*/
lazy val nextDependenciesAndConflicts: (Seq[Dependency], Seq[Dependency], Map[Module, String]) =
// TODO Provide the modules whose version was forced by dependency overrides too
merge(
rootDependencies.map(withDefaultConfig(_, defaultConfiguration)) ++ transitiveDependencies,
forceVersions,
reconciliation
)
private def updatedRootDependencies =
merge(
rootDependencies.map(withDefaultConfig(_, defaultConfiguration)),
forceVersions,
reconciliation,
preserveOrder = true
)._2
lazy val reconciledVersions: Map[Module, String] =
nextDependenciesAndConflicts._3.map {
case k @ (m, v) =>
m -> projectCache.get(k).fold(v)(_._2.version)
}
def retainedVersions: Map[Module, String] =
nextDependenciesAndConflicts._3
/** The modules we miss some info about.
*/
lazy val missingFromCache: Set[ModuleVersion] = {
val modules = dependencies
.map(_.moduleVersion)
val nextModules = nextDependenciesAndConflicts._2
.map(_.moduleVersion)
(modules ++ nextModules)
.filterNot(mod => projectCache.contains(mod) || errorCache.contains(mod))
}
/** Whether the resolution is done.
*/
lazy val isDone: Boolean = {
def isFixPoint = {
val (nextConflicts, _, _) = nextDependenciesAndConflicts
dependencies == (newDependencies ++ nextConflicts) &&
conflicts == nextConflicts.toSet
}
missingFromCache.isEmpty && isFixPoint
}
private def eraseVersion(dep: Dependency) =
dep.withVersion("")
/** Returns a map giving the dependencies that brought each of the dependency of the "next"
* dependency set.
*
* The versions of all the dependencies returned are erased (emptied).
*/
lazy val reverseDependencies: Map[Dependency, Vector[Dependency]] = {
val (updatedConflicts, updatedDeps, _) = nextDependenciesAndConflicts
val trDepsSeq =
for {
dep <- updatedDeps
trDep <- finalDependencies0(dep)
} yield eraseVersion(trDep) -> eraseVersion(dep)
val knownDeps = (updatedDeps ++ updatedConflicts)
.map(eraseVersion)
.toSet
trDepsSeq
.groupBy(_._1)
.view
.mapValues(_.map(_._2).toVector)
.filterKeys(knownDeps)
.toMap // Eagerly evaluate filterKeys/mapValues
}
/** Returns dependencies from the "next" dependency set, filtering out those that are no more
* required.
*
* The versions of all the dependencies returned are erased (emptied).
*/
lazy val remainingDependencies: Set[Dependency] = {
val rootDependencies0 = rootDependencies
.map(withDefaultConfig(_, defaultConfiguration))
.map(eraseVersion)
.toSet
@tailrec
def helper(
reverseDeps: Map[Dependency, Vector[Dependency]]
): Map[Dependency, Vector[Dependency]] = {
val (toRemove, remaining) = reverseDeps
.partition(kv => kv._2.isEmpty && !rootDependencies0(kv._1))
if (toRemove.isEmpty)
reverseDeps
else
helper(
remaining
.view
.mapValues(broughtBy =>
broughtBy
.filter(x => remaining.contains(x) || rootDependencies0(x))
)
.iterator
.toMap
)
}
val filteredReverseDependencies = helper(reverseDependencies)
rootDependencies0 ++ filteredReverseDependencies.keys
}
/** The final next dependency set, stripped of no more required ones.
*/
lazy val newDependencies: Set[Dependency] = {
val remainingDependencies0 = remainingDependencies
nextDependenciesAndConflicts._2
.filter(dep => remainingDependencies0(eraseVersion(dep)))
.toSet
}
private lazy val nextNoMissingUnsafe: Resolution = {
val (newConflicts, _, _) = nextDependenciesAndConflicts
copyWithCache(
dependencySet = dependencySet.setValues(newDependencies ++ newConflicts),
conflicts = newConflicts.toSet
)
}
/** If no module info is missing, the next state of the resolution, which can be immediately
* calculated. Else, the current resolution.
*/
@tailrec
final def nextIfNoMissing: Resolution = {
val missing = missingFromCache
if (missing.isEmpty) {
val next0 = nextNoMissingUnsafe
if (next0 == this)
this
else
next0.nextIfNoMissing
}
else
this
}
/** Required modules for the dependency management of `project`.
*/
def dependencyManagementRequirements(
project: Project
): Set[ModuleVersion] = {
val needsParent =
project.parent.exists { par =>
val parentFound = projectCache.contains(par) || errorCache.contains(par)
!parentFound
}
if (needsParent)
project.parent.toSet
else {
val parentProperties0 = parents(project, k => projectCache.get(k).map(_._2))
.toVector
.flatMap(_.properties)
// 1.1 (see above)
val approxProperties = parentProperties0.toMap ++ projectProperties(project)
val profiles0 = profiles(
project,
approxProperties,
osInfo,
jdkVersion,
userActivations
)
val profileDependencies = profiles0.flatMap(p => p.dependencies ++ p.dependencyManagement)
val project0 =
project.withProperties(
project.properties ++ profiles0.flatMap(_.properties)
) // belongs to 1.5 & 1.6
val propertiesMap0 = withFinalProperties(
project0.withProperties(parentProperties0 ++ project0.properties)
).properties.toMap
val modules = withProperties(
project0.dependencies ++ project0.dependencyManagement ++ profileDependencies,
propertiesMap0
).collect {
case (Configuration.`import`, dep) => dep.moduleVersion
}
modules.toSet
}
}
/** Missing modules in cache, to get the full list of dependencies of `project`, taking dependency
* management / inheritance into account.
*
* Note that adding the missing modules to the cache may unveil other missing modules, so these
* modules should be added to the cache, and `dependencyManagementMissing` checked again for new
* missing modules.
*/
def dependencyManagementMissing(project: Project): Set[ModuleVersion] = {
@tailrec
def helper(
toCheck: Set[ModuleVersion],
done: Set[ModuleVersion],
missing: Set[ModuleVersion]
): Set[ModuleVersion] =
if (toCheck.isEmpty)
missing
else if (toCheck.exists(done))
helper(toCheck -- done, done, missing)
else if (toCheck.exists(missing))
helper(toCheck -- missing, done, missing)
else if (toCheck.exists(projectCache.contains)) {
val (checking, remaining) = toCheck.partition(projectCache.contains)
val directRequirements = checking
.flatMap(mod => dependencyManagementRequirements(projectCache(mod)._2))
helper(remaining ++ directRequirements, done ++ checking, missing)
}
else if (toCheck.exists(errorCache.contains)) {
val (errored, remaining) = toCheck.partition(errorCache.contains)
helper(remaining, done ++ errored, missing)
}
else
helper(Set.empty, done, missing ++ toCheck)
helper(
dependencyManagementRequirements(project),
Set(project.moduleVersion),
Set.empty
)
}
/** Add dependency management / inheritance related items to `project`, from what's available in
* cache.
*
* It is recommended to have fetched what `dependencyManagementMissing` returned prior to calling
* this.
*/
def withDependencyManagement(project: Project): Project = {
/*
Loosely following what [Maven says](http://maven.apache.org/components/ref/3.3.9/maven-model-builder/):
(thanks to @MasseGuillaume for pointing that doc out)
phase 1
1.1 profile activation: see available activators. Notice that model interpolation hasn't happened yet, then interpolation for file-based activation is limited to ${basedir} (since Maven 3), System properties and request properties
1.2 raw model validation: ModelValidator (javadoc), with its DefaultModelValidator implementation (source)
1.3 model normalization - merge duplicates: ModelNormalizer (javadoc), with its DefaultModelNormalizer implementation (source)
1.4 profile injection: ProfileInjector (javadoc), with its DefaultProfileInjector implementation (source)
1.5 parent resolution until super-pom
1.6 inheritance assembly: InheritanceAssembler (javadoc), with its DefaultInheritanceAssembler implementation (source). Notice that project.url, project.scm.connection, project.scm.developerConnection, project.scm.url and project.distributionManagement.site.url have a special treatment: if not overridden in child, the default value is parent's one with child artifact id appended
1.7 model interpolation (see below)
N/A url normalization: UrlNormalizer (javadoc), with its DefaultUrlNormalizer implementation (source)
phase 2, with optional plugin processing
N/A model path translation: ModelPathTranslator (javadoc), with its DefaultModelPathTranslator implementation (source)
N/A plugin management injection: PluginManagementInjector (javadoc), with its DefaultPluginManagementInjector implementation (source)
N/A (optional) lifecycle bindings injection: LifecycleBindingsInjector (javadoc), with its DefaultLifecycleBindingsInjector implementation (source)
2.1 dependency management import (for dependencies of type pom in the <dependencyManagement> section)
2.2 dependency management injection: DependencyManagementInjector (javadoc), with its DefaultDependencyManagementInjector implementation (source)
2.3 model normalization - inject default values: ModelNormalizer (javadoc), with its DefaultModelNormalizer implementation (source)
N/A (optional) reports configuration: ReportConfigurationExpander (javadoc), with its DefaultReportConfigurationExpander implementation (source)
N/A (optional) reports conversion to decoupled site plugin: ReportingConverter (javadoc), with its DefaultReportingConverter implementation (source)
N/A (optional) plugins configuration: PluginConfigurationExpander (javadoc), with its DefaultPluginConfigurationExpander implementation (source)
2.4 effective model validation: ModelValidator (javadoc), with its DefaultModelValidator implementation (source)
N/A: does not apply here (related to plugins, path of project being built, ...)
*/
// A bit fragile, but seems to work
val parentProperties0 = parents(project, k => projectCache.get(k).map(_._2))
.toVector
.flatMap(_.properties)
// 1.1 (see above)
val approxProperties = parentProperties0.toMap ++ projectProperties(project)
val profiles0 = profiles(
project,
approxProperties,
osInfo,
jdkVersion,
userActivations
)
// 1.2 made from Pom.scala (TODO look at the very details?)
// 1.3 & 1.4 (if only vaguely so)
val project0 =
project.withProperties(
project.properties ++ profiles0.flatMap(_.properties)
) // belongs to 1.5 & 1.6
val propertiesMap0 = withFinalProperties(
project0.withProperties(parentProperties0 ++ project0.properties)
).properties.toMap
val (importDeps, standardDeps) = {
val dependencies0 = addDependencies((project0.dependencies +: profiles0.map(_.dependencies)))
val (importDeps0, standardDeps0) = dependencies0
.map { dep =>
val dep0 = withProperties(dep, propertiesMap0)
if (dep0._1 == Configuration.`import`)
(dep0._2 :: Nil, Nil)
else
(Nil, dep :: Nil) // not dep0 (properties with be substituted later)
}
.unzip
(importDeps0.flatten, standardDeps0.flatten)
}
val importDepsMgmt = {
val dependenciesMgmt0 =
addDependencies((project0.dependencyManagement +: profiles0.map(_.dependencyManagement)))
dependenciesMgmt0.flatMap { dep =>
val (conf0, dep0) = withProperties(dep, propertiesMap0)
if (conf0 == Configuration.`import`)
dep0 :: Nil
else
Nil
}
}
val parentDeps =
importDeps.map(_.moduleVersion) ++
importDepsMgmt.map(_.moduleVersion) ++
project0.parent // belongs to 1.5 & 1.6
val retainedParentDeps = parentDeps.filter(projectCache.contains)
val retainedParentProjects = retainedParentDeps.map(projectCache(_)._2)
val depMgmt = (
project0.dependencyManagement +: (
profiles0.map(_.dependencyManagement) ++
retainedParentProjects.map { p =>
val parentProperties0 = parents(p, k => projectCache.get(k).map(_._2))
.toVector
.flatMap(_.properties)
val props = withFinalProperties(
p.withProperties(parentProperties0 ++ p.properties)
).properties.toMap
withProperties(p.dependencyManagement, props)
}
)
)
.foldLeft(Map.empty[DepMgmt.Key, (Configuration, Dependency)])(DepMgmt.addSeq)
val retainedParentDepsSet = retainedParentDeps.toSet
project0
.withPackagingOpt(project0.packagingOpt.map(_.map(substituteProps(_, propertiesMap0))))
.withVersion(substituteProps(project0.version, propertiesMap0))
.withDependencies(
standardDeps ++
project0.parent // belongs to 1.5 & 1.6
.filter(projectCache.contains)
.toSeq
.flatMap(projectCache(_)._2.dependencies)
)
.withDependencyManagement(
depMgmt.values
.filterNot { case (config, dep) =>
config == Configuration.`import`
}
.toList
)
}
/** Minimized dependency set. Returns `dependencies` with no redundancy.
*
* E.g. `dependencies` may contains several dependencies towards module org:name:version, a first
* one excluding A and B, and a second one excluding A and C. In practice, B and C will be
* brought anyway, because the first dependency doesn't exclude C, and the second one doesn't
* exclude B. So having both dependencies is equivalent to having only one dependency towards
* org:name:version, excluding just A.
*
* The same kind of substitution / filtering out can be applied with configurations. If
* `dependencies` contains several dependencies towards org:name:version, a first one bringing
* its configuration "runtime", a second one "compile", and the configuration mapping of
* org:name:version says that "runtime" extends "compile", then all the dependencies brought by
* the latter will be brought anyway by the former, so that the latter can be removed.
*
* @return
* A minimized `dependencies`, applying this kind of substitutions.
*/
def minDependencies: Set[Dependency] =
dependencySet.minimizedSet.map { dep =>
Resolution.fallbackConfigIfNecessary(dep, configsOf(dep))
}
def orderedDependencies: Seq[Dependency] = {
def helper(deps: List[Dependency], done: DependencySet): LazyList[Dependency] =
deps match {
case Nil => LazyList.empty
case h :: t =>
if (done.covers(h))
helper(t, done)
else {
lazy val done0 = done.add(h)
val todo = dependenciesOf(h, withRetainedVersions = true, withFallbackConfig = true)
// filtering with done0 rather than done for some cycles (dependencies having themselves as dependency)
.filter(!done0.covers(_))
val t0 =
if (todo.isEmpty) t
else t ::: todo.toList
h #:: helper(t0, done0)
}
}
val rootDeps = updatedRootDependencies
.map(withDefaultConfig(_, defaultConfiguration))
.map(dep => updated(dep, withRetainedVersions = true, withFallbackConfig = true))
.toList
helper(rootDeps, DependencySet.empty).toVector
}
def artifacts(): Seq[Artifact] =
artifacts(defaultTypes, None)
def artifacts(types: Set[Type]): Seq[Artifact] =
artifacts(types, None)
def artifacts(classifiers: Option[Seq[Classifier]]): Seq[Artifact] =
artifacts(defaultTypes, classifiers)
def artifacts(types: Set[Type], classifiers: Option[Seq[Classifier]]): Seq[Artifact] =
artifacts(types, classifiers, classpathOrder = true)
def artifacts(
types: Set[Type],
classifiers: Option[Seq[Classifier]],
classpathOrder: Boolean
): Seq[Artifact] =
dependencyArtifacts(classifiers)
.collect {
case (_, pub, artifact) if types(pub.`type`) =>
artifact
}
.distinct
def dependencyArtifacts(): Seq[(Dependency, Publication, Artifact)] =
dependencyArtifacts(None)
def dependencyArtifacts(
classifiers: Option[Seq[Classifier]]
): Seq[(Dependency, Publication, Artifact)] =
dependencyArtifacts(classifiers, classpathOrder = true)
def dependencyArtifacts(
classifiers: Option[Seq[Classifier]],
classpathOrder: Boolean
): Seq[(Dependency, Publication, Artifact)] =
for {
dep <- (if (classpathOrder) orderedDependencies else minDependencies.toSeq)
(source, proj) <- projectCache
.get(dep.moduleVersion)
.toSeq
classifiers0 =
if (dep.attributes.classifier.isEmpty)
classifiers
else
Some(classifiers.getOrElse(Nil) ++ Seq(dep.attributes.classifier))
(pub, artifact) <- source.artifacts(dep, proj, classifiers0)
} yield (dep, pub, artifact)
@deprecated("Use the artifacts overload accepting types and classifiers instead", "1.1.0-M8")
def classifiersArtifacts(classifiers: Seq[String]): Seq[Artifact] =
artifacts(classifiers = Some(classifiers.map(Classifier(_))))
@deprecated("Use artifacts overload accepting types and classifiers instead", "1.1.0-M8")
def artifacts(withOptional: Boolean): Seq[Artifact] =
artifacts()
@deprecated("Use dependencyArtifacts overload accepting classifiers instead", "1.1.0-M8")
def dependencyArtifacts(withOptional: Boolean): Seq[(Dependency, Artifact)] =
dependencyArtifacts().map(t => (t._1, t._3))
@deprecated("Use dependencyArtifacts overload accepting classifiers instead", "1.1.0-M8")
def dependencyClassifiersArtifacts(classifiers: Seq[String]): Seq[(Dependency, Artifact)] =
dependencyArtifacts(Some(classifiers.map(Classifier(_)))).map(t => (t._1, t._3))
/** Returns errors on dependencies
* @return
* errors
*/
def errors: Seq[(ModuleVersion, Seq[String])] = errorCache.toSeq
@deprecated("Use errors instead", "1.1.0")
def metadataErrors: Seq[(ModuleVersion, Seq[String])] = errors
def dependenciesWithRetainedVersions: Set[Dependency] =
dependencies.map { dep =>
retainedVersions.get(dep.module).fold(dep) { v =>
dep.withVersion(v)
}
}
/** Removes from this `Resolution` dependencies that are not in `dependencies` neither brought
* transitively by them.
*
* This keeps the versions calculated by this `Resolution`. The common dependencies of different
* subsets will thus be guaranteed to have the same versions.
*
* @param dependencies:
* the dependencies to keep from this `Resolution`
*/
def subset(dependencies: Seq[Dependency]): Resolution = {
def updateVersion(dep: Dependency): Dependency =
dep.withVersion(retainedVersions.getOrElse(dep.module, dep.version))
@tailrec def helper(current: Set[Dependency]): Set[Dependency] = {
val newDeps = current ++ current
.flatMap(finalDependencies0)
.map(updateVersion)
val anyNewDep = (newDeps -- current).nonEmpty
if (anyNewDep)
helper(newDeps)
else
newDeps
}
val dependencies0 = dependencies
.map(withDefaultConfig(_, defaultConfiguration))
.map(dep => updated(dep, withRetainedVersions = true, withFallbackConfig = true))
val allDependencies = helper(dependencies0.toSet)
val subsetForceVersions = allDependencies.map(_.moduleVersion).toMap
copyWithCache(
rootDependencies = dependencies0,
dependencySet = dependencySet.setValues(allDependencies)
// don't know if something should be done about conflicts
).withForceVersions(subsetForceVersions ++ forceVersions)
}
}
| coursier/coursier | modules/core/shared/src/main/scala/coursier/core/Resolution.scala | Scala | apache-2.0 | 47,613 |
package com.thetestpeople.trt
import play.api.test.FakeApplication
import com.thetestpeople.trt.Config._
object FakeApplicationFactory {
val TestJdbcUrl = "jdbc:h2:mem:tests;DB_CLOSE_DELAY=-1"
def fakeApplication = new FakeApplication(
additionalConfiguration = Map(
Ci.Poller.Enabled -> false,
Db.Default.Driver -> "org.h2.Driver",
Db.Default.Url -> TestJdbcUrl,
Db.Default.User -> "",
Db.Default.Password -> "",
Lucene.InMemory -> "true"),
additionalPlugins = Seq(classOf[DontStopBoneCPPlugin].getName))
} | thetestpeople/trt | test/com/thetestpeople/trt/FakeApplicationFactory.scala | Scala | mit | 560 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index.legacy
import org.locationtech.geomesa.accumulo.data.AccumuloWritableFeature
import org.locationtech.geomesa.accumulo.index.AccumuloJoinIndex
import org.locationtech.geomesa.index.api.ShardStrategy.NoShardStrategy
import org.locationtech.geomesa.index.api.{RowKeyValue, WritableFeature}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV3
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV7.AttributeIndexKeySpaceV7
import org.locationtech.geomesa.index.index.attribute.{AttributeIndexKey, AttributeIndexKeySpace}
import org.locationtech.geomesa.utils.index.IndexMode.IndexMode
import org.opengis.feature.simple.SimpleFeatureType
class JoinIndexV3(ds: GeoMesaDataStore[_],
sft: SimpleFeatureType,
attribute: String,
dtg: Option[String],
mode: IndexMode)
extends AttributeIndexV3(ds, sft, attribute, dtg, mode) with AccumuloJoinIndex {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
override val keySpace: AttributeIndexKeySpace =
new AttributeIndexKeySpaceV7(sft, sft.getTableSharingBytes, NoShardStrategy, attribute) {
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[AttributeIndexKey] = {
val kv = super.toIndexKey(writable, tier, id, lenient)
kv.copy(values = writable.asInstanceOf[AccumuloWritableFeature].indexValues)
}
}
}
| aheyne/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/legacy/JoinIndexV3.scala | Scala | apache-2.0 | 2,182 |
import java.io._
import java.net.{InetSocketAddress, Socket, SocketTimeoutException}
import java.text.SimpleDateFormat
import java.util.Calendar
import java.util.concurrent.atomic.AtomicInteger
import org.apache.poi.xssf.usermodel.{XSSFCell, XSSFSheet, XSSFWorkbook}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future, Promise}
import scala.io.{BufferedSource, StdIn}
import scala.util.{Failure, Success, Try}
case class Analyzer(socket: Socket) {
socket.setSoTimeout(Main.networkingTimeout)
// important (156)
lazy val in = new BufferedSource(socket.getInputStream)
val out = new PrintStream(socket.getOutputStream)
var model, modelNr, modelFamily, serial, timestamp, date = ""
var config, tlist, vlist: Option[Seq[(String, String)]] = None
def initializeViaNetwork() = {
sendCommand("v config")
sendCommand("t list")
sendCommand("v list!")
val responseLines = readResponses()
socket.close()
val (configString, rest) = responseLines.span(_.startsWith("V")) // important
val (tlistString, vlistString) = rest.span(_.startsWith("T"))
def isPair(string: String) = string.contains("=")
this.config = Some(configString.toSeq.filter(isPair).map(formatToPair))
this.tlist = Some(tlistString.toSeq.filter(isPair).map(formatToPair))
this.vlist = Some(vlistString.toSeq.filter(isPair).map(formatToPair))
model = config.get.find { case (name, value) => name == "CONFIG[0]" }.map(_._2).get
modelNr = model.takeWhile(_ != ' ')
modelFamily = model.dropWhile(!_.isDigit).takeWhile(_.isDigit)
serial = vlist.get.find { case (name, value) => name == "SERIAL_NUMBER" }.map(_._2).get.replace(model.split(" ")(0), "").replaceAll("\\"|\\\\s|-", "").dropWhile(_ == '0')
val binaryDate = Calendar.getInstance().getTime
timestamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").format(binaryDate)
date = new SimpleDateFormat("yyyy-MM-dd").format(binaryDate)
}
def sendCommand(command: String): Unit = {
out.println(command)
out.flush()
}
def readResponses() = {
val builder = new StringBuilder()
try {
while (true)
builder.append(in.next())
} catch {
case ex: SocketTimeoutException => //left blank intentionally
}
new BufferedSource(new ByteArrayInputStream(builder.mkString.getBytes)).getLines()
}
def formatToPair(pair: String): (String, String) = {
// [ ]+ matches any number of spaces
val parts = pair.split("[ ]+").drop(3).mkString(" ").split("=")
parts(0) -> parts(1)
}
}
/**
* Created by Giymo11 on 2015-07-07 at 14:08.
*/
object Main {
val ports = Seq(502, 3000)
val networkingTimeout = 5000
val tabInSpaces = 8
val coverSheetName = "Cover"
val modelRow = 9
val modelColumn = 'K'
val serialRow = 11
val serialColumn = 'K'
val entryDateRow = 12
val entryDateColumn = 'D'
val exitDateRow = 12
val exitDateColumn = 'K'
val paramSheetName = "Parameter"
val paramRowStart = 3
val paramRowEnd = 100
val parameterColumn = 'B'
val entryColumn = 'E'
val exitColumn = 'G'
val vlistSheetName = "Vlist"
val vlistRowStart = 3
val vlistNameColumn = 'A'
val vlistValueColumn = 'C'
val finishedReportsDirectory = "./finished"
val excelEnding = "xlsm"
def main(args: Array[String]): Unit = {
import scala.concurrent.duration._
val isReport = args.contains("-r")
val isOpening = args.contains("-o")
val passedParameters = args.filter(_.charAt(0) != '-')
val ips = if(passedParameters.length == 0) {
print("Please enter the IPs separated by a space: ")
val input = StdIn.readLine()
input.split(" ").toSeq
} else {
passedParameters.toSeq
}
val sockets = ips.map(tryIp).map(_.map { analyzer =>
archive(analyzer, openWhenFinished = isOpening && !isReport)
if (isReport) {
report(analyzer, "Deutsch", isOpening)
report(analyzer, "English", isOpening)
}
analyzer.socket.getInetAddress
})
val tries = Await.result(Future.sequence(sockets.map(future2try)), Duration.Inf)
tries.filter(_ isSuccess).foreach(addr => println(s"${{addr.get.toString}} worked"))
tries.filter(_ isFailure).foreach(addr => println(s"${{addr.failed.get.getMessage}} didn't work"))
System.exit(0)
}
def report(analyzer: Analyzer, postfix: String, openWhenFinished: Boolean): AnyVal = {
// check if entry or exit
val reportName = s"Report for ${{analyzer.serial}} - ${{analyzer.model}} $postfix.$excelEnding"
val reportFile = new File(reportName)
val isEntry = !reportFile.exists()
var reportInputStream: Option[InputStream] = None
val template = if (isEntry) getEntryTemplate(analyzer, postfix) else {
reportInputStream = Some(new FileInputStream(reportFile))
new XSSFWorkbook(reportInputStream.get)
}
val parameterSheet = template.getSheet(Main.paramSheetName)
val coverSheet = template.getSheet(Main.coverSheetName)
if (isEntry) {
println(s"Recording entry of ${{analyzer.serial}} $postfix")
getCellAt(coverSheet, modelRow, modelColumn).setCellValue(analyzer.model)
getCellAt(coverSheet, serialRow, serialColumn).setCellValue(analyzer.serial)
getCellAt(coverSheet, entryDateRow, entryDateColumn).setCellValue(analyzer.date)
insertParameters(analyzer, parameterSheet, entryColumn)
} else {
println(s"Recording entry of ${{analyzer.serial}} $postfix")
getCellAt(coverSheet, exitDateRow, exitDateColumn).setCellValue(analyzer.date)
insertVlist(analyzer, if (template.getSheet(vlistSheetName) == null) template.createSheet(vlistSheetName) else template.getSheet(vlistSheetName))
insertParameters(analyzer, parameterSheet, exitColumn)
}
val file = if (!isEntry)
getFile(finishedReportsDirectory, reportName.replace(s" $postfix.$excelEnding", "") + " - finished " + analyzer.date + s" - $postfix.$excelEnding")
else
reportFile
val out = new FileOutputStream(file)
template.write(out)
out.flush()
out.close()
reportInputStream.foreach(_ close())
if (!isEntry) {
println("Moving " + reportFile.getName)
reportFile.delete()
}
if(openWhenFinished)
FileOpener.open(file.getCanonicalFile)
}
def getEntryTemplate(analyzer: Analyzer, postfix: String): XSSFWorkbook = {
val templateName = s"Template for ${{analyzer.modelNr}} $postfix.$excelEnding"
val templateFile = new File(templateName)
if (!templateFile.exists()) {
println(s"$templateName does not exist!")
throw new scala.Exception(analyzer.socket.getInetAddress.toString)
}
templateFile.setReadOnly()
new XSSFWorkbook(templateFile)
}
def insertParameters(analyzer: Analyzer, reportSheet: XSSFSheet, column: Int) = {
// get wanted parameters
val parameterPairs = for (row <- paramRowStart to paramRowEnd) yield getCellAt(reportSheet, row, parameterColumn).getStringCellValue -> row
val parameterMap = parameterPairs.toMap
def insertPair(pair: (String, String), hasUnits: Boolean) = {
val (name, value) = pair
parameterMap.get(name) match {
case Some(row) =>
val cell = getCellAt(reportSheet, row, column)
if (hasUnits) cell.setCellValue(value.takeWhile(_ != ' ').toDouble) // drop the units on the pairs
else cell.setCellValue(value)
case None => // left blank intentionally
}
}
analyzer.config.get.foreach(insertPair(_, hasUnits = false))
analyzer.tlist.get.foreach(insertPair(_, hasUnits = true))
}
def insertVlist(analyzer: Analyzer, vlistSheet: XSSFSheet): Unit = {
val vlist = analyzer.vlist.get
val wrapText = vlistSheet.getWorkbook.createCellStyle()
wrapText.setWrapText(true)
for (row <- vlist.indices) {
val current = vlist(row)
getCellAt(vlistSheet, row + vlistRowStart, vlistNameColumn).setCellValue(current._1)
getCellAt(vlistSheet, row + vlistRowStart, vlistValueColumn).setCellStyle(wrapText)
getCellAt(vlistSheet, row + vlistRowStart, vlistValueColumn).setCellValue(current._2)
}
vlistSheet.autoSizeColumn(vlistNameColumn - 'A')
}
def getCellAt(sheet: XSSFSheet, rowIndex: Int, columnIndex: Int): XSSFCell = {
var row = sheet.getRow(rowIndex - 1)
if (row == null) row = sheet.createRow(rowIndex - 1)
var cell = row.getCell(columnIndex - 'A')
if (cell == null) cell = row.createCell(columnIndex - 'A')
cell
}
def getFile(dirName: String, fileName: String) = {
val dir = new File(dirName)
if (!dir.exists())
dir.mkdir()
val file = new File(dir, fileName)
if (!file.exists())
file.createNewFile()
file
}
def archive(analyzer: Analyzer, openWhenFinished: Boolean) = {
val file = getFile("./" + analyzer.modelFamily, s"${{analyzer.serial}} - ${{analyzer.model}}.txt")
val in = new BufferedSource(new FileInputStream(file))
val oldContent = in.getLines().toList
in.close()
val builder = new StringBuilder(oldContent.size * 2)
val linebreak = System.lineSeparator()
builder.append("Date: " + analyzer.timestamp).append(linebreak)
builder.append(s"Model: ${{analyzer.model}}, Serial: ${{analyzer.serial}}").append(linebreak * 2)
builder.append(prettyFormat(analyzer.config.get)).append(linebreak)
builder.append(prettyFormat(analyzer.tlist.get)).append(linebreak)
builder.append(prettyFormat(analyzer.vlist.get)).append(linebreak * 3)
builder.append(oldContent.mkString(linebreak))
val out = new FileOutputStream(file, false) // important
out.write(builder.mkString.getBytes)
out.close()
if(openWhenFinished)
FileOpener.open(file.getCanonicalFile)
}
def prettyFormat(pairs: Seq[(String, String)]): String = {
val builder = new StringBuilder()
val lengthInTabs = pairs.maxBy(pair => pair._1.length)._1.length / tabInSpaces + 1
pairs.foreach { case (name, value) =>
val tabs = lengthInTabs - name.length / tabInSpaces
builder.append(name).append("\\t" * tabs).append(value).append(System.lineSeparator())
}
builder.mkString
}
def tryIp(ip: String): Future[Analyzer] = {
def getAnalyzerForPort(port: Int): Future[Analyzer] = Future {
val socket = new Socket()
val addr = new InetSocketAddress(ip, port)
var analyzer: Option[Analyzer] = None
try {
socket.connect(addr, networkingTimeout)
val tmp = Analyzer(socket)
tmp.initializeViaNetwork()
analyzer = Some(tmp)
} catch {
case ex: Exception => throw new Exception(addr.getAddress.toString)
}
analyzer.get
}
val promise = Promise[Analyzer]()
val count = new AtomicInteger(ports.size)
ports.map(getAnalyzerForPort).foreach(_ onComplete {
case Success(analyzer) => promise.trySuccess(analyzer)
case Failure(ex) => if (count.decrementAndGet() == 0) promise.failure(ex)
})
promise.future
}
def future2try[T](future: Future[T]): Future[Try[T]] = {
future.map(Success(_)).recover{ case t: Throwable => Failure(t) }
}
}
| Giymo11/TListing | src/main/scala/Main.scala | Scala | gpl-2.0 | 11,176 |
package com.sksamuel.elastic4s
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import org.scalatest.matchers.should.Matchers
import org.scalatest.matchers.{MatchResult, Matcher}
trait JsonSugar extends Matchers {
private val mapper = new ObjectMapper with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
def matchJsonResource(resourceName: String) = new JsonResourceMatcher(resourceName)
def matchJson(right: String) = new Matcher[String] {
override def apply(left: String): MatchResult = {
withClue(s"expected JSON [$right] ") {
right should not be null
}
val expectedJson = mapper.readTree(right)
val actualJson = mapper.readTree(left)
MatchResult(
expectedJson == actualJson,
s"$actualJson did not match resource [$right]: $expectedJson",
s"$actualJson did match resource [$right]: $expectedJson"
)
}
}
class JsonResourceMatcher(resourceName: String) extends Matcher[String] {
override def apply(left: String): MatchResult = {
val jsonResource = getClass.getResource(resourceName)
withClue(s"expected JSON resource [$resourceName] ") {
jsonResource should not be null
}
val expectedJson = mapper.readTree(jsonResource)
val actualJson = mapper.readTree(left)
MatchResult(
expectedJson == actualJson,
s"$actualJson did not match resource [$resourceName]: $expectedJson",
s"$actualJson did match resource [$resourceName]: $expectedJson"
)
}
}
}
| sksamuel/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/JsonSugar.scala | Scala | apache-2.0 | 1,680 |
/**
* Copyright (c) 2014 MongoDB, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* For questions and comments about this product, please see the project page at:
*
* https://github.com/mongodb/mongo-scala-driver
*
*/
package org.mongodb.scala
import org.mongodb.{Codec, Document, ReadPreference, WriteConcern}
import org.mongodb.codecs.{DocumentCodec, PrimitiveCodecs}
object MongoDatabaseOptions {
def apply(options: MongoClientOptions): MongoDatabaseOptions = {
MongoDatabaseOptions(options.primitiveCodecs, options.writeConcern,
options.readPreference, new DocumentCodec(options.primitiveCodecs))
}
}
case class MongoDatabaseOptions(primitiveCodecs: PrimitiveCodecs,
writeConcern: WriteConcern,
readPreference: ReadPreference,
documentCodec: Codec[Document])
| antonnik/code-classifier | naive_bayes/resources/scala/MongoDatabaseOptions.scala | Scala | apache-2.0 | 1,663 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.types.Row
import org.apache.flink.table.api.scala.stream.utils.{StreamITCase, StreamTestData}
import org.apache.flink.table.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase
import org.apache.flink.table.api.{TableEnvironment, ValidationException}
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
class UnionITCase extends StreamingMultipleProgramsTestBase {
@Test
def testUnion(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val ds2 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f)
val unionDs = ds1.unionAll(ds2).select('c)
val results = unionDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"Hi", "Hello", "Hello world", "Hi", "Hello", "Hello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testUnionWithFilter(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'a, 'b, 'd, 'c, 'e)
val unionDs = ds1.unionAll(ds2.select('a, 'b, 'c)).filter('b < 2).select('c)
val results = unionDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList("Hi", "Hallo")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test(expected = classOf[ValidationException])
def testUnionFieldsNameNotOverlap1(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'a, 'b, 'd, 'c, 'e)
val unionDs = ds1.unionAll(ds2)
val results = unionDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink)
env.execute()
assertEquals(true, StreamITCase.testResults.isEmpty)
}
@Test(expected = classOf[ValidationException])
def testUnionFieldsNameNotOverlap2(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.select('a, 'b, 'c)
val unionDs = ds1.unionAll(ds2)
val results = unionDs.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink)
env.execute()
assertEquals(true, StreamITCase.testResults.isEmpty)
}
@Test(expected = classOf[ValidationException])
def testUnionTablesFromDifferentEnvs(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv1 = TableEnvironment.getTableEnvironment(env)
val tEnv2 = TableEnvironment.getTableEnvironment(env)
val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv1, 'a, 'b, 'c)
val ds2 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv2, 'a, 'b, 'c)
// Must fail. Tables are bound to different TableEnvironments.
ds1.unionAll(ds2)
}
}
| gustavoanatoly/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/stream/table/UnionITCase.scala | Scala | apache-2.0 | 4,763 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs/contributors
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package org.ensime.sexp
class SexpParserSpec extends SexpSpec {
import SexpParser.{ apply => parse }
val foo = SexpString("foo")
val bar = SexpString("bar")
val one = SexpInteger(1)
val negtwo = SexpInteger(-2)
val pi = SexpFloat(3.14)
val fourexp = SexpFloat(4e+16)
val foosym = SexpSymbol("foo")
val barsym = SexpSymbol("bar")
val fookey = SexpSymbol(":foo")
val barkey = SexpSymbol(":bar")
"Sexp Parser" should "parse nil" in {
parse("nil") shouldBe SexpNil
parse("()") shouldBe SexpNil
parse("( )") shouldBe SexpNil
parse("(;blah\\n)") shouldBe SexpNil
}
it should "parse lists of strings" in {
parse("""("foo" "bar")""") shouldBe SexpList(foo, bar)
}
it should "parse escaped chars in strings" in {
parse(""""z \\\\ \\" \\t \\\\t \\\\\\t x\\ x"""") shouldBe SexpString(
"z \\\\ \\" \\t \\\\t \\\\\\t xx"
)
parse(""""import foo\\n\\n\\nexport bar\\n"""") shouldBe SexpString(
"import foo\\n\\n\\nexport bar\\n"
)
parse(""""C:\\\\my\\\\folder"""") shouldBe SexpString("""C:\\my\\folder""")
}
it should "parse unescaped chars in strings" in {
parse("\\"import foo\\n\\n\\nexport bar\\n\\"") shouldBe SexpString(
"import foo\\n\\n\\nexport bar\\n"
)
}
it should "parse lists of chars" in {
parse("""(?f ?b)""") shouldBe SexpList(SexpChar('f'), SexpChar('b'))
}
it should "parse lists of symbols" in {
parse("(foo bar is?)") shouldBe SexpList(foosym, barsym, SexpSymbol("is?"))
}
it should "parse lists of numbers" in {
parse("(1 -2 3.14 4e+16)") shouldBe SexpList(one, negtwo, pi, fourexp)
}
it should "parse NaN" in {
parse("0.0e+NaN") should matchPattern {
case SexpFloat(d) if d.isNaN =>
}
parse("-0.0e+NaN") should matchPattern {
case SexpFloat(d) if d.isNaN =>
}
}
it should "parse infinity" in {
parse("1.0e+INF") shouldBe SexpFloat(Double.PositiveInfinity)
parse("-1.0e+INF") shouldBe SexpFloat(Double.NegativeInfinity)
}
it should "parse lists within lists" in {
parse("""((foo))""") shouldBe SexpList(SexpList(foosym))
parse("""((foo) foo)""") shouldBe SexpList(SexpList(foosym), foosym)
}
it should "parse quoted expressions" in {
parse("""'(:foo "foo" :bar "bar")""") shouldBe
SexpCons(SexpSymbol("quote"), SexpList(fookey, foo, barkey, bar))
parse("'foo") shouldBe SexpCons(SexpSymbol("quote"), foosym)
}
it should "parse cons" in {
parse("(foo . bar)") shouldBe SexpCons(foosym, barsym)
}
it should "parse symbols with dots in their name" in {
parse("foo.bar") shouldBe SexpSymbol("foo.bar")
parse(":foo.bar") shouldBe SexpSymbol(":foo.bar")
}
it should "parse symbols starting with nil in their name" in {
parse("nilsamisanidiot") shouldBe SexpSymbol("nilsamisanidiot")
}
}
| ensime/ensime-server | s-express/src/test/scala/org/ensime/sexp/SexpParserSpec.scala | Scala | gpl-3.0 | 2,960 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.