code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*-------------------------------------------------------------------------*\\
** ScalaCheck **
** Copyright (c) 2007-2014 Rickard Nilsson. All rights reserved. **
** http://www.scalacheck.org **
** **
** This software is released under the terms of the Revised BSD License. **
** There is NO WARRANTY. See the file LICENSE for the full text. **
\\*------------------------------------------------------------------------ */
package org.scalacheck.util
import collection._
trait Buildable[T,C[_]] {
def builder: mutable.Builder[T,C[T]]
def fromIterable(it: Traversable[T]): C[T] = {
val b = builder
b ++= it
b.result()
}
}
trait Buildable2[T,U,C[_,_]] {
def builder: mutable.Builder[(T,U),C[T,U]]
def fromIterable(it: Traversable[(T,U)]): C[T,U] = {
val b = builder
b ++= it
b.result()
}
}
object Buildable {
import generic.CanBuildFrom
implicit def buildableCanBuildFrom[T, C[_]](implicit c: CanBuildFrom[C[_], T, C[T]]) =
new Buildable[T, C] {
def builder = c.apply
}
import java.util.ArrayList
implicit def buildableArrayList[T] = new Buildable[T,ArrayList] {
def builder = new mutable.Builder[T,ArrayList[T]] {
val al = new ArrayList[T]
def +=(x: T) = {
al.add(x)
this
}
def clear() = al.clear()
def result() = al
}
}
}
object Buildable2 {
implicit def buildableMutableMap[T,U] = new Buildable2[T,U,mutable.Map] {
def builder = mutable.Map.newBuilder
}
implicit def buildableImmutableMap[T,U] = new Buildable2[T,U,immutable.Map] {
def builder = immutable.Map.newBuilder
}
implicit def buildableMap[T,U] = new Buildable2[T,U,Map] {
def builder = Map.newBuilder
}
implicit def buildableImmutableSortedMap[T: Ordering, U] = new Buildable2[T,U,immutable.SortedMap] {
def builder = immutable.SortedMap.newBuilder
}
implicit def buildableSortedMap[T: Ordering, U] = new Buildable2[T,U,SortedMap] {
def builder = SortedMap.newBuilder
}
}
| slothspot/scala | src/partest-extras/scala/org/scalacheck/util/Buildable.scala | Scala | bsd-3-clause | 2,206 |
package tu.coreservice.action.way2think.correlation
import tu.coreservice.action.way2think.Way2Think
import tu.model.knowledge.communication.{ContextHelper, ShortTermMemory}
import tu.model.knowledge.{Resource, Constant}
import tu.model.knowledge.annotator.AnnotatedNarrative
import tu.model.knowledge.domain.{Concept, ConceptNetwork}
import tu.exception.UnexpectedException
import tu.model.knowledge.narrative.Narrative
/**
* @author max talanov
* date 2012-09-10
* time: 11:23 PM
*/
class CorrelationWay2Think extends Way2Think {
/**
* Way2Think interface.
* @param inputContext ShortTermMemory of all inbound parameters.
* @return outputContext
*/
def apply(inputContext: ShortTermMemory) = {
try {
inputContext.findByName(Constant.LINK_PARSER_RESULT_NAME) match {
case Some(narrative: AnnotatedNarrative) => {
inputContext.simulationModel match {
case Some(model: ConceptNetwork) => {
inputContext.simulationResult match {
case Some(result: ConceptNetwork) => {
val tripleResult: Option[Triple[List[Concept], List[Concept], List[Concept]]] = this.apply(narrative, result, model)
tripleResult match {
case Some(tr: Triple[List[Concept], List[Concept], List[Concept]]) => {
val updatedSimulationResult = ConceptNetwork(result.nodes ::: tr._1, this.getClass.getName + "Simulation" + Constant.RESULT)
val context = ContextHelper(List[Resource](), updatedSimulationResult, this.getClass.getName + "ShortTermMemory" + Constant.RESULT)
if (tr._2.size > 0) {
val updatedSimulationModel = ConceptNetwork(model.nodes ::: tr._2, this.getClass.getName + "Model" + Constant.RESULT)
context.simulationModel = Some(updatedSimulationModel)
this.setResultsToReport(context, tr._2)
}
if (tr._3.size > 0) {
context.notUnderstoodConcepts = tr._3
}
context
}
case None => {
throw new UnexpectedException("$No_matches_detected_in_domain_model")
}
}
}
case None => {
val tripleResult: Option[Triple[List[Concept], List[Concept], List[Concept]]] = this.apply(narrative, model)
tripleResult match {
case Some(tr: Triple[List[Concept], List[Concept], List[Concept]]) => {
val updatedSimulationResult = ConceptNetwork(tr._1, this.getClass.getName + "Training" + Constant.RESULT)
val context = ContextHelper(List[Resource](), updatedSimulationResult, this.getClass.getName + "ShortTermMemory" + Constant.RESULT)
if (tr._2.size > 0) {
val updatedSimulationModel = ConceptNetwork(model.nodes ::: tr._2, this.getClass.getName + "Model" + Constant.RESULT)
context.simulationModel = Some(updatedSimulationModel)
this.setResultsToReport(context, tr._2)
}
if (tr._3.size > 0) {
context.notUnderstoodConcepts = tr._3
}
context
}
case None => {
throw new UnexpectedException("$No_matches_detected_in_domain_model")
}
}
}
}
}
case None => {
throw new UnexpectedException("$No_domain_model_specified")
}
}
}
case None => {
throw new UnexpectedException("$Context_lastResult_is_None")
}
}
} catch {
case e: ClassCastException => {
throw new UnexpectedException("$Context_lastResult_is_not_expectedType " + e.getMessage)
}
}
}
/**
* Searches for mapping paths from simulation result to domainModel via clarification.
* @param clarification AnnotatedNarrative of clarification
* @param simulationResult description of current situation as ConceptNetwork
* @param domainModel overall domain model to be used to analyse current situation as ConceptNetwork.
* @return shortest maps, domainModel concepts List, notUnderstood concepts List.
*/
def apply(clarification: AnnotatedNarrative, simulationResult: ConceptNetwork, domainModel: ConceptNetwork):
Option[Triple[List[Concept], List[Concept], List[Concept]]] = {
val s = new Correlation()
s.apply(clarification, simulationResult, domainModel)
}
/**
* Searches for mapping paths from simulation result to domainModel via clarification.
* @param clarification AnnotatedNarrative of clarification
* @param domainModel overall domain model to be used to analyse current situation as ConceptNetwork.
* @return shortest maps, domainModel concepts List, notUnderstood concepts List.
*/
def apply(clarification: AnnotatedNarrative, domainModel: ConceptNetwork):
Option[Triple[List[Concept], List[Concept], List[Concept]]] = {
val s = new Correlation()
s.apply(clarification, domainModel)
}
def start() = false
def stop() = false
}
| keskival/2 | coreservice.action.way2think/src/main/scala/tu/coreservice/action/way2think/correlation/CorrelationWay2Think.scala | Scala | gpl-3.0 | 5,419 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.code.person.model
import org.beangle.data.model.annotation.code
import org.openurp.code.CodeBean
/**
* 血型
* 参见教育部标准JY/T 1001 4.5.14
*/
@code("industry")
class BloodType extends CodeBean
/**
* 困难原因
* 参见教育部标准JY/T 1001 4.2.23
*/
@code("industry")
class DifficultyCause extends CodeBean
/**
* 困难程度
* 参见教育部标准JY/T 1001 4.2.22
*/
@code("industry")
class DifficultyDegree extends CodeBean
/**
* 家庭类别
* 参见教育部标准JY/T 1001 4.2.19
*/
@code("industry")
class FamilyCategory extends CodeBean
/**
* 户口类别
* 参见国家标准GA 324.1-2001 见表C.3
*/
@code("industry")
class HouseholdType extends CodeBean
/**
* 护照类别
* 参见GA 59.7 涉外信息管理代码 第 7 部分:护照证件种类代码
*/
@code("industry")
class PassportType extends CodeBean
/**
* 中国签证类别
* GA/T 704.8-2007
*/
@code("industry")
class VisaType extends CodeBean
| openurp/api | code/src/main/scala/org/openurp/code/person/model/industry.scala | Scala | lgpl-3.0 | 1,703 |
package spinoco.fs2.cassandra.sample
import java.net.InetAddress
import java.util.UUID
import fs2.Chunk
import shapeless.tag
import shapeless.tag._
import spinoco.fs2.cassandra.CType.{Ascii, Type1}
import scala.concurrent.duration.FiniteDuration
case class SimpleTableRow(
intColumn: Int
, longColumn: Long
, stringColumn: String
, asciiColumn: String @@ Ascii
, floatColumn: Float
, doubleColumn: Double
, bigDecimalColumn: BigDecimal
, bigIntColumn: BigInt
, blobColumn: Chunk[Byte]
, uuidColumn: UUID
, timeUuidColumn: UUID @@ Type1
, durationColumn: FiniteDuration
, inetAddressColumn: InetAddress
, enumColumn: TestEnumeration.Value
)
object SimpleTableRow {
import com.datastax.driver.core.utils.UUIDs.timeBased
val simpleInstance = SimpleTableRow(
intColumn = 1
, longColumn = 2
, stringColumn = "varchar string"
, asciiColumn = tag[Ascii]("ascii string")
, floatColumn = 1.1f
, doubleColumn = 2.2d
, bigDecimalColumn = BigDecimal(0.3d)
, bigIntColumn = BigInt(3)
, blobColumn = Chunk.bytes(Array.emptyByteArray)
, uuidColumn = timeBased
, timeUuidColumn = tag[Type1](timeBased)
, durationColumn = FiniteDuration(1,"s")
, inetAddressColumn = InetAddress.getLocalHost
, enumColumn = TestEnumeration.One
)
} | Spinoco/fs2-cassandra | test/src/test/scala/spinoco/fs2/cassandra/sample/SimpleTableRow.scala | Scala | mit | 1,314 |
package io.udash.web.guide.views.frontend.demos
import io.udash._
import io.udash.bootstrap.utils.BootstrapStyles
import io.udash.bootstrap.form.UdashInputGroup
import io.udash.web.commons.views.Component
import io.udash.web.guide.styles.partials.GuideStyles
import scalatags.JsDom
class MultiSelectDemoComponent extends Component {
import JsDom.all._
sealed trait Fruit
case object Apple extends Fruit
case object Orange extends Fruit
case object Banana extends Fruit
val favoriteFruits: SeqProperty[Fruit] = SeqProperty[Fruit](Apple, Banana)
val favoriteFruitsStrings = favoriteFruits.transform(
(f: Fruit) => f.toString,
(s: String) => s match {
case "Apple" => Apple
case "Orange" => Orange
case "Banana" => Banana
}
)
override def getTemplate: Modifier = div(id := "multi-select-demo", GuideStyles.frame, GuideStyles.useBootstrap)(
form(BootstrapStyles.containerFluid)(
div(BootstrapStyles.Grid.row)(
div(
selector()
),
br(),
div(
selector()
)
)
)
)
def selector() =
UdashInputGroup()(
UdashInputGroup.prependText("Fruits:"),
UdashInputGroup.select(
Select(
favoriteFruitsStrings, Seq(Apple, Orange, Banana).map(_.toString).toSeqProperty
)(Select.defaultLabel, BootstrapStyles.Form.control).render
),
UdashInputGroup.appendText(span(cls := "multi-select-demo-fruits")(bind(favoriteFruits)))
).render
}
| UdashFramework/udash-guide | guide/src/main/scala/io/udash/web/guide/views/frontend/demos/MultiSelectDemoComponent.scala | Scala | gpl-3.0 | 1,504 |
package skuber.json.ext
/**
* @author David O'Riordan
*
* Implicit JSON formatters for the extensions API objects
*/
import java.awt.font.ImageGraphicAttribute
import skuber._
import skuber.ext.Ingress.Backend
import skuber.ext._
import play.api.libs.json._
import play.api.libs.functional.syntax._
import skuber.json.format._ // reuse some core formatters
package object format {
// Scale formatters
implicit val scaleStatusFormat: Format[Scale.Status] = (
(JsPath \\ "replicas").formatMaybeEmptyInt() and
(JsPath \\ "selector").formatMaybeEmptyString() and
(JsPath \\ "targetSelector").formatNullable[String]
)(Scale.Status.apply _, unlift(Scale.Status.unapply))
implicit val scaleSpecFormat: Format[Scale.Spec] = Json.format[Scale.Spec]
implicit val scaleFormat: Format[Scale] = Json.format[Scale]
// SubresourceReference formatter
implicit val subresFmt: Format[SubresourceReference] = (
(JsPath \\ "kind").formatMaybeEmptyString() and
(JsPath \\ "name").formatMaybeEmptyString() and
(JsPath \\ "apiVersion").formatMaybeEmptyString() and
(JsPath \\ "subresource").formatMaybeEmptyString()
)(SubresourceReference.apply _, unlift(SubresourceReference.unapply))
// HorizontalPodAutoscaler formatters
implicit val cpuTUFmt: Format[CPUTargetUtilization] = Json.format[CPUTargetUtilization]
implicit val hpasSpecFmt: Format[HorizontalPodAutoscaler.Spec] = Json.format[HorizontalPodAutoscaler.Spec]
implicit val hpasStatusFmt: Format[HorizontalPodAutoscaler.Status] = Json.format[HorizontalPodAutoscaler.Status]
implicit val hpasFmt: Format[HorizontalPodAutoscaler] = Json.format[HorizontalPodAutoscaler]
// DaemonSet formatters
implicit val daemonsetStatusFmt: Format[DaemonSet.Status] = Json.format[DaemonSet.Status]
implicit val daemonsetSpecFmt: Format[DaemonSet.Spec] = (
(JsPath \\ "selector").formatNullableLabelSelector and
(JsPath \\ "template").formatNullable[Pod.Template.Spec]
)(DaemonSet.Spec.apply, unlift(DaemonSet.Spec.unapply))
implicit lazy val daemonsetFmt: Format[DaemonSet] = (
objFormat and
(JsPath \\ "spec").formatNullable[DaemonSet.Spec] and
(JsPath \\ "status").formatNullable[DaemonSet.Status]
) (DaemonSet.apply _, unlift(DaemonSet.unapply))
// Deployment formatters
implicit val depStatusFmt: Format[Deployment.Status] = (
(JsPath \\ "replicas").formatMaybeEmptyInt() and
(JsPath \\ "updatedReplicas").formatMaybeEmptyInt() and
(JsPath \\ "availableReplicas").formatMaybeEmptyInt() and
(JsPath \\ "unavailableReplicas").formatMaybeEmptyInt() and
(JsPath \\ "observedGeneration").formatMaybeEmptyInt()
)(Deployment.Status.apply _, unlift(Deployment.Status.unapply))
implicit val rollingUpdFmt: Format[Deployment.RollingUpdate] = (
(JsPath \\ "maxUnavailable").formatMaybeEmptyIntOrString(Left(1)) and
(JsPath \\ "maxSurge").formatMaybeEmptyIntOrString(Left(1))
)(Deployment.RollingUpdate.apply _, unlift(Deployment.RollingUpdate.unapply))
implicit val depStrategyFmt: Format[Deployment.Strategy] = (
(JsPath \\ "type").formatEnum(Deployment.StrategyType, Some(Deployment.StrategyType.RollingUpdate)) and
(JsPath \\ "rollingUpdate").formatNullable[Deployment.RollingUpdate]
)(Deployment.Strategy.apply _, unlift(Deployment.Strategy.unapply))
implicit val depSpecFmt: Format[Deployment.Spec] = (
(JsPath \\ "replicas").formatNullable[Int] and
(JsPath \\ "selector").formatNullableLabelSelector and
(JsPath \\ "template").formatNullable[Pod.Template.Spec] and
(JsPath \\ "strategy").formatNullable[Deployment.Strategy] and
(JsPath \\ "minReadySeconds").formatMaybeEmptyInt()
)(Deployment.Spec.apply _, unlift(Deployment.Spec.unapply))
implicit lazy val depFormat: Format[Deployment] = (
objFormat and
(JsPath \\ "spec").formatNullable[Deployment.Spec] and
(JsPath \\ "status").formatNullable[Deployment.Status]
) (Deployment.apply _, unlift(Deployment.unapply))
implicit val replsetSpecFormat: Format[ReplicaSet.Spec] = (
(JsPath \\ "replicas").formatNullable[Int] and
(JsPath \\ "selector").formatNullableLabelSelector and
(JsPath \\ "template").formatNullable[Pod.Template.Spec]
)(ReplicaSet.Spec.apply _, unlift(ReplicaSet.Spec.unapply))
implicit val replsetStatusFormat = Json.format[ReplicaSet.Status]
implicit lazy val replsetFormat: Format[ReplicaSet] = (
objFormat and
(JsPath \\ "spec").formatNullable[ReplicaSet.Spec] and
(JsPath \\ "status").formatNullable[ReplicaSet.Status]
) (ReplicaSet.apply _, unlift(ReplicaSet.unapply))
implicit val ingressBackendFmt: Format[Ingress.Backend] = Json.format[Ingress.Backend]
implicit val ingressPathFmt: Format[Ingress.Path] = (
(JsPath \\ "path").formatMaybeEmptyString() and
(JsPath \\ "backend").format[Backend]
) (Ingress.Path.apply _, unlift(Ingress.Path.unapply))
implicit val ingressHttpRuledFmt: Format[Ingress.HttpRule] = Json.format[Ingress.HttpRule]
implicit val ingressRuleFmt: Format[Ingress.Rule] = Json.format[Ingress.Rule]
implicit val ingressTLSFmt: Format[Ingress.TLS] = Json.format[Ingress.TLS]
implicit val ingressSpecFormat: Format[Ingress.Spec] = (
(JsPath \\ "backend").formatNullable[Ingress.Backend] and
(JsPath \\ "rules").formatMaybeEmptyList[Ingress.Rule] and
(JsPath \\ "tls").formatMaybeEmptyList[Ingress.TLS]
)(Ingress.Spec.apply _, unlift(Ingress.Spec.unapply))
implicit val ingrlbingFormat: Format[Ingress.Status.LoadBalancer.Ingress] =
Json.format[Ingress.Status.LoadBalancer.Ingress]
implicit val ingrlbFormat: Format[Ingress.Status.LoadBalancer] =
Json.format[Ingress.Status.LoadBalancer]
implicit val ingressStatusFormat = Json.format[Ingress.Status]
implicit lazy val ingressFormat: Format[Ingress] = (
objFormat and
(JsPath \\ "spec").formatNullable[Ingress.Spec] and
(JsPath \\ "status").formatNullable[Ingress.Status]
) (Ingress.apply _, unlift(Ingress.unapply))
implicit val deplListFmt: Format[DeploymentList] = KListFormat[Deployment].apply(DeploymentList.apply _,unlift(DeploymentList.unapply))
implicit val daesetListFmt: Format[DaemonSetList] = KListFormat[DaemonSet].apply(DaemonSetList.apply _,unlift(DaemonSetList.unapply))
implicit val hpasListFmt: Format[HorizontalPodAutoscalerList] = KListFormat[HorizontalPodAutoscaler].apply(HorizontalPodAutoscalerList.apply _,unlift(HorizontalPodAutoscalerList.unapply))
implicit val replsetListFmt: Format[ReplicaSetList] = KListFormat[ReplicaSet].apply(ReplicaSetList.apply _,unlift(ReplicaSetList.unapply))
implicit val ingressListFmt: Format[IngressList] = KListFormat[Ingress].apply(IngressList.apply _,unlift(IngressList.unapply))
}
| coryfklein/skuber | client/src/main/scala/skuber/json/ext/format/package.scala | Scala | apache-2.0 | 6,782 |
package maastar.game
import maastar.agent.Agent
class State(
desc: String = "Default",
jointActionTransitions: Map[Map[Agent, Action], Transition] = null
) {
def getJointActionTransition(jointAction: Map[Agent, Action]): Transition = {
jointActionTransitions(jointAction)
}
override def toString(): String = {
"\\"" + desc + "\\""
}
} | nwertzberger/maastar | src/main/scala/maastar/game/State.scala | Scala | apache-2.0 | 376 |
package com.owtelse
import org.scalacheck._
/**
* Created by IntelliJ IDEA.
* User: robertk
*/
object DefaultScalaCheckTest extends Properties("String") {
import Prop.forAll
val propReverseList = forAll { l: List[String] => l.reverse.reverse == l }
val propConcatString = forAll { (s1: String, s2: String) =>
(s1 + s2).endsWith(s2)
}
propReverseList.check
val x = propConcatString.check
}
| karlroberts/release-o-matic | src/test/scala/com/owtelse/DefaultScalaCheckTest.scala | Scala | bsd-3-clause | 419 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.batch
import org.apache.flink.table.CalcitePair
import org.apache.flink.table.functions.UserDefinedFunction
import org.apache.flink.table.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.plan.nodes.physical.batch.OverWindowMode.OverWindowMode
import org.apache.flink.table.plan.util.RelExplainUtil
import org.apache.calcite.plan._
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Window.Group
import org.apache.calcite.rel.core.{AggregateCall, Window}
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rex.RexLiteral
import org.apache.calcite.sql.fun.SqlLeadLagAggFunction
import org.apache.calcite.tools.RelBuilder
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
/**
* Batch physical RelNode for sort-based over [[Window]].
*/
class BatchExecOverAggregate(
cluster: RelOptCluster,
relBuilder: RelBuilder,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
windowGroupToAggCallToAggFunction: Seq[
(Window.Group, Seq[(AggregateCall, UserDefinedFunction)])],
grouping: Array[Int],
orderKeyIndices: Array[Int],
orders: Array[Boolean],
nullIsLasts: Array[Boolean],
logicWindow: Window)
extends SingleRel(cluster, traitSet, inputRel)
with BatchPhysicalRel {
private lazy val modeToGroupToAggCallToAggFunction:
Seq[(OverWindowMode, Window.Group, Seq[(AggregateCall, UserDefinedFunction)])] =
splitOutOffsetOrInsensitiveGroup()
lazy val aggregateCalls: Seq[AggregateCall] =
windowGroupToAggCallToAggFunction.flatMap(_._2).map(_._1)
def getGrouping: Array[Int] = grouping
override def deriveRowType: RelDataType = outputRowType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new BatchExecOverAggregate(
cluster,
relBuilder,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
windowGroupToAggCallToAggFunction,
grouping,
orderKeyIndices,
orders,
nullIsLasts,
logicWindow)
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
// sort is done in the last sort operator.
val inputRows = mq.getRowCount(getInput())
if (inputRows == null) {
return null
}
val cpu = FlinkCost.FUNC_CPU_COST * inputRows *
modeToGroupToAggCallToAggFunction.flatMap(_._3).size
val averageRowSize: Double = mq.getAverageRowSize(this)
val memCost = averageRowSize
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
costFactory.makeCost(mq.getRowCount(this), cpu, 0, 0, memCost)
}
override def explainTerms(pw: RelWriter): RelWriter = {
val partitionKeys: Array[Int] = grouping
val groups = modeToGroupToAggCallToAggFunction.map(_._2)
val constants: Seq[RexLiteral] = logicWindow.constants
val writer = super.explainTerms(pw)
.itemIf("partitionBy", RelExplainUtil.fieldToString(partitionKeys, inputRowType),
partitionKeys.nonEmpty)
.itemIf("orderBy",
RelExplainUtil.collationToString(groups.head.orderKeys, inputRowType),
orderKeyIndices.nonEmpty)
var offset = inputRowType.getFieldCount
groups.zipWithIndex.foreach { case (group, index) =>
val namedAggregates = generateNamedAggregates(group)
val select = RelExplainUtil.overAggregationToString(
inputRowType,
outputRowType,
constants,
namedAggregates,
outputInputName = false,
rowTypeOffset = offset)
offset += namedAggregates.size
val windowRange = RelExplainUtil.windowRangeToString(logicWindow, group)
writer.item("window#" + index, select + windowRange)
}
writer.item("select", getRowType.getFieldNames.mkString(", "))
}
private def generateNamedAggregates(
groupWindow: Group): Seq[CalcitePair[AggregateCall, String]] = {
val aggregateCalls = groupWindow.getAggregateCalls(logicWindow)
for (i <- 0 until aggregateCalls.size())
yield new CalcitePair[AggregateCall, String](aggregateCalls.get(i), "windowAgg$" + i)
}
private[flink] def splitOutOffsetOrInsensitiveGroup()
: Seq[(OverWindowMode, Window.Group, Seq[(AggregateCall, UserDefinedFunction)])] = {
def compareTo(o1: Window.RexWinAggCall, o2: Window.RexWinAggCall): Boolean = {
val allowsFraming1 = o1.getOperator.allowsFraming
val allowsFraming2 = o2.getOperator.allowsFraming
if (!allowsFraming1 && !allowsFraming2) {
o1.getOperator.getClass == o2.getOperator.getClass
} else {
allowsFraming1 == allowsFraming2
}
}
def inferGroupMode(group: Window.Group): OverWindowMode = {
val aggCall = group.aggCalls(0)
if (aggCall.getOperator.allowsFraming()) {
if (group.isRows) OverWindowMode.Row else OverWindowMode.Range
} else {
if (aggCall.getOperator.isInstanceOf[SqlLeadLagAggFunction]) {
OverWindowMode.Offset
} else {
OverWindowMode.Insensitive
}
}
}
def createNewGroup(
group: Window.Group,
aggCallsBuffer: Seq[(Window.RexWinAggCall, (AggregateCall, UserDefinedFunction))])
: (OverWindowMode, Window.Group, Seq[(AggregateCall, UserDefinedFunction)]) = {
val newGroup = new Window.Group(
group.keys,
group.isRows,
group.lowerBound,
group.upperBound,
group.orderKeys,
aggCallsBuffer.map(_._1))
val mode = inferGroupMode(newGroup)
(mode, group, aggCallsBuffer.map(_._2))
}
val windowGroupInfo =
ArrayBuffer[(OverWindowMode, Window.Group, Seq[(AggregateCall, UserDefinedFunction)])]()
windowGroupToAggCallToAggFunction.foreach { case (group, aggCallToAggFunction) =>
var lastAggCall: Window.RexWinAggCall = null
val aggCallsBuffer =
ArrayBuffer[(Window.RexWinAggCall, (AggregateCall, UserDefinedFunction))]()
group.aggCalls.zip(aggCallToAggFunction).foreach { case (aggCall, aggFunction) =>
if (lastAggCall != null && !compareTo(lastAggCall, aggCall)) {
windowGroupInfo.add(createNewGroup(group, aggCallsBuffer))
aggCallsBuffer.clear()
}
aggCallsBuffer.add((aggCall, aggFunction))
lastAggCall = aggCall
}
if (aggCallsBuffer.nonEmpty) {
windowGroupInfo.add(createNewGroup(group, aggCallsBuffer))
aggCallsBuffer.clear()
}
}
windowGroupInfo
}
}
object OverWindowMode extends Enumeration {
type OverWindowMode = Value
val Row: OverWindowMode = Value
val Range: OverWindowMode = Value
//Then it is a special kind of Window when the agg is LEAD&LAG.
val Offset: OverWindowMode = Value
val Insensitive: OverWindowMode = Value
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/batch/BatchExecOverAggregate.scala | Scala | apache-2.0 | 7,751 |
package com.github.lomeo.future
import scala.concurrent.Future
import cats.data.OptionT
import cats.instances.FutureInstances
import org.scalatest._
class FutureOptionTest extends AsyncFlatSpec with Matchers with FutureInstances {
import implicits._
behavior of "FutureOption.getOrElse"
it should "return value wrapped with Some" in {
Future(Option(42)).getOrElse(0).map(n => assert(n === 42))
}
it should "return default value if it evaluates to None" in {
Future(Option.empty[Int]).getOrElse(0).map(n => assert(n === 0))
}
it should "fail if original future is failed" in {
recoverToSucceededIf[IllegalStateException] {
Future[Option[Int]](throw new IllegalStateException).getOrElse(0)
}
}
behavior of "FutureOption.orElse"
it should "return value wrapped with Some" in {
Future(Option(42)).orElse(Future(0)).map(n => assert(n === 42))
}
it should "return default value if it evaluates to None" in {
Future(Option.empty[Int]).orElse(Future(0)).map(n => assert(n === 0))
}
it should "fail if original future is failed" in {
recoverToSucceededIf[IllegalStateException] {
Future[Option[Int]](throw new IllegalStateException).orElse(Future(0))
}
}
behavior of "OptionT"
it should "return value wrapped with Some" in {
OptionT(Future(Option(42))).getOrElse(0).map(n => assert(n === 42))
OptionT(Future(Option(42))).getOrElseF(Future(0)).map(n => assert(n === 42))
}
it should "return default value if it evaluates to None" in {
OptionT(Future(Option.empty[Int])).getOrElse(0).map(n => assert(n === 0))
OptionT(Future(Option.empty[Int])).getOrElseF(Future(0)).map(n => assert(n === 0))
}
it should "fail if original future is failed" in {
recoverToSucceededIf[IllegalStateException] {
OptionT(Future[Option[Int]](throw new IllegalStateException)).getOrElse(0)
}
recoverToSucceededIf[IllegalStateException] {
OptionT(Future[Option[Int]](throw new IllegalStateException)).getOrElseF(Future(0))
}
}
behavior of "MyOptionT"
implicit val futureMonad = new MyMonad[Future] {
def pure[A](x: A): Future[A] =
Future.successful(x)
def map[A, B](m: Future[A])(f: A => B): Future[B] =
m.map(f)
def flatMap[A, B](m: Future[A])(f: A => Future[B]): Future[B] =
m.flatMap(f)
}
it should "map value as usual" in {
new MyOptionT(Future(Option(42))).map(_ / 2).getOrElse(0).map(n => assert(n === 21))
new MyOptionT(Future(Option.empty[Int])).map(_ / 2).getOrElse(0).map(n => assert(n === 0))
}
it should "flatMap value as usual" in {
new MyOptionT(Future(Option(42))).flatMap(x => new MyOptionT(Future(Option(x / 2)))).getOrElse(0).map(n => assert(n === 21))
new MyOptionT(Future(Option.empty[Int])).flatMap(x => new MyOptionT(Future(Option(x / 2)))).getOrElse(0).map(n => assert(n === 0))
}
it should "return value wrapped with Some" in {
new MyOptionT(Future(Option(42))).getOrElse(0).map(n => assert(n === 42))
new MyOptionT(Future(Option(42))).orElse(Future(0)).map(n => assert(n === 42))
}
it should "return default value if it evaluates to None" in {
new MyOptionT(Future(Option.empty[Int])).getOrElse(0).map(n => assert(n === 0))
new MyOptionT(Future(Option.empty[Int])).orElse(Future(0)).map(n => assert(n === 0))
}
it should "fail if original future is failed" in {
recoverToSucceededIf[IllegalStateException] {
new MyOptionT(Future[Option[Int]](throw new IllegalStateException)).getOrElse(0)
}
recoverToSucceededIf[IllegalStateException] {
new MyOptionT(Future[Option[Int]](throw new IllegalStateException)).orElse(Future(0))
}
}
}
| lomeo/futures-examples | src/test/scala/com/github/lomeo/future/FutureOptionTest.scala | Scala | mit | 3,941 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package scalapb.docs.person
object PersonProto extends _root_.scalapb.GeneratedFileObject {
lazy val dependencies: Seq[_root_.scalapb.GeneratedFileObject] = Seq.empty
lazy val messagesCompanions
: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] =
Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]](
scalapb.docs.person.Person
)
private lazy val ProtoBytes: _root_.scala.Array[Byte] =
scalapb.Encoding.fromBase64(
scala.collection.immutable
.Seq(
"""CgxwZXJzb24ucHJvdG8SDHNjYWxhcGIuZG9jcyLvAgoGUGVyc29uEh0KBG5hbWUYASABKAlCCeI/BhIEbmFtZVIEbmFtZRIaC
gNhZ2UYAiABKAVCCOI/BRIDYWdlUgNhZ2USSgoJYWRkcmVzc2VzGAMgAygLMhwuc2NhbGFwYi5kb2NzLlBlcnNvbi5BZGRyZXNzQ
g7iPwsSCWFkZHJlc3Nlc1IJYWRkcmVzc2VzGqQBCgdBZGRyZXNzElUKDGFkZHJlc3NfdHlwZRgBIAEoDjIgLnNjYWxhcGIuZG9jc
y5QZXJzb24uQWRkcmVzc1R5cGVCEOI/DRILYWRkcmVzc1R5cGVSC2FkZHJlc3NUeXBlEiMKBnN0cmVldBgCIAEoCUIL4j8IEgZzd
HJlZXRSBnN0cmVldBIdCgRjaXR5GAMgASgJQgniPwYSBGNpdHlSBGNpdHkiNwoLQWRkcmVzc1R5cGUSEwoESE9NRRAAGgniPwYSB
EhPTUUSEwoEV09SSxABGgniPwYSBFdPUktiBnByb3RvMw=="""
)
.mkString
)
lazy val scalaDescriptor: _root_.scalapb.descriptors.FileDescriptor = {
val scalaProto = com.google.protobuf.descriptor.FileDescriptorProto.parseFrom(ProtoBytes)
_root_.scalapb.descriptors.FileDescriptor
.buildFrom(scalaProto, dependencies.map(_.scalaDescriptor))
}
lazy val javaDescriptor: com.google.protobuf.Descriptors.FileDescriptor = {
val javaProto = com.google.protobuf.DescriptorProtos.FileDescriptorProto.parseFrom(ProtoBytes)
com.google.protobuf.Descriptors.FileDescriptor.buildFrom(
javaProto,
_root_.scala.Array(
)
)
}
@deprecated(
"Use javaDescriptor instead. In a future version this will refer to scalaDescriptor.",
"ScalaPB 0.5.47"
)
def descriptor: com.google.protobuf.Descriptors.FileDescriptor = javaDescriptor
}
| scalapb/ScalaPB | docs/src/main/scala/generated/scalapb/docs/person/PersonProto.scala | Scala | apache-2.0 | 2,084 |
package com.twitter.finagle.serverset2
import com.twitter.conversions.time._
import com.twitter.finagle.serverset2.client._
import com.twitter.finagle.service.Backoff
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.zookeeper.ZkInstance
import com.twitter.io.Buf
import com.twitter.util._
import org.junit.runner.RunWith
import org.scalatest.concurrent.Eventually._
import org.scalatest.junit.JUnitRunner
import org.scalatest.time._
import org.scalatest.{BeforeAndAfter, FunSuite}
@RunWith(classOf[JUnitRunner])
class ZkSessionEndToEndTest extends FunSuite with BeforeAndAfter {
val zkTimeout = 100.milliseconds
val retryStream = new RetryStream(Backoff.const(zkTimeout))
@volatile var inst: ZkInstance = _
def toSpan(d: Duration): Span = Span(d.inNanoseconds, Nanoseconds)
implicit val patienceConfig =
PatienceConfig(timeout = toSpan(1.second), interval = toSpan(zkTimeout))
/* This can be useful if you want to retain ZK logging output for debugging.
val app = new org.apache.log4j.ConsoleAppender
app.setTarget(org.apache.log4j.ConsoleAppender.SYSTEM_ERR)
app.setLayout(new org.apache.log4j.SimpleLayout)
app.activateOptions()
org.apache.log4j.Logger.getRootLogger().addAppender(app)
*/
before {
inst = new ZkInstance
inst.start()
}
after {
inst.stop()
}
// COORD-339
if (!sys.props.contains("SKIP_FLAKY")) test("Session expiration 2") {
implicit val timer = new MockTimer
val connected: (WatchState => Boolean) = {
case WatchState.SessionState(SessionState.SyncConnected) => true
case _ => false
}
val notConnected: (WatchState => Boolean) = w => !connected(w)
val session1 = ZkSession.retrying(
retryStream,
() => ZkSession(retryStream, inst.zookeeperConnectString, statsReceiver = NullStatsReceiver)
)
@volatile var states = Seq.empty[SessionState]
val state = session1 flatMap { session1 =>
session1.state
}
state.changes.register(Witness({ ws: WatchState =>
ws match {
case WatchState.SessionState(s) => states = s +: states
case _ =>
}
}))
Await.result(state.changes.filter(connected).toFuture())
val cond = state.changes.filter(notConnected).toFuture()
val session2 = {
val z = Var.sample(session1)
val p = new Array[Byte](z.sessionPasswd.length)
z.sessionPasswd.write(p, 0)
ClientBuilder()
.hosts(inst.zookeeperConnectString)
.sessionTimeout(zkTimeout)
.sessionId(z.sessionId)
.password(Buf.ByteArray.Owned(p))
.reader()
}
Await.result(session2.state.changes.filter(connected).toFuture())
session2.value.close()
Await.result(cond)
Await.result(state.changes.filter(connected).toFuture())
assert(
states == Seq(
SessionState.SyncConnected,
SessionState.Expired,
SessionState.Disconnected,
SessionState.SyncConnected
)
)
}
// COORD-339
if (!sys.props.contains("SKIP_FLAKY")) test("ZkSession.retrying") {
implicit val timer = new MockTimer
val watch = Stopwatch.start()
val varZkSession = ZkSession.retrying(
retryStream,
() => ZkSession(retryStream, inst.zookeeperConnectString, statsReceiver = NullStatsReceiver)
)
val varZkState = varZkSession flatMap { _.state }
@volatile var zkStates = Seq[(SessionState, Duration)]()
varZkState.changes.register(Witness({ ws: WatchState =>
ws match {
case WatchState.SessionState(state) =>
zkStates = (state, watch()) +: zkStates
case _ =>
}
}))
@volatile var sessions = Seq[ZkSession]()
varZkSession.changes.register(Witness({ s: ZkSession =>
sessions = s +: sessions
}))
// Wait for the initial connect.
eventually {
assert(
Var.sample(varZkState) ==
WatchState.SessionState(SessionState.SyncConnected)
)
assert(sessions.size == 1)
}
val session1 = Var.sample(varZkSession)
// Hijack the session by reusing its id and password.
val session2 = {
val p = new Array[Byte](session1.sessionPasswd.length)
session1.sessionPasswd.write(p, 0)
ClientBuilder()
.hosts(inst.zookeeperConnectString)
.sessionTimeout(zkTimeout)
.sessionId(session1.sessionId)
.password(Buf.ByteArray.Owned(p))
.reader()
}
val connected = new Promise[Unit]
val closed = new Promise[Unit]
session2.state.changes.register(Witness({ ws: WatchState =>
ws match {
case WatchState.SessionState(SessionState.SyncConnected) =>
connected.setDone(); ()
case WatchState.SessionState(SessionState.Disconnected) =>
closed.setDone(); ()
case _ => ()
}
}))
Await.ready(connected)
Await.ready(session2.value.close())
// This will expire the session.
val session1Expired =
session1.state.changes.filter(_ == WatchState.SessionState(SessionState.Expired)).toFuture()
val zkConnected =
varZkState.changes.filter(_ == WatchState.SessionState(SessionState.SyncConnected)).toFuture()
Await.ready(session1.getData("/sadfads"))
Await.ready(session1Expired)
Await.ready(zkConnected)
eventually {
assert(
(zkStates map { case (s, _) => s }).reverse ==
Seq(
SessionState.SyncConnected,
SessionState.Disconnected,
SessionState.Expired,
SessionState.SyncConnected
)
)
}
assert(sessions.size == 2)
}
}
| mkhq/finagle | finagle-serversets/src/test/scala/com/twitter/finagle/serverset2/ZkSessionEndToEndTest.scala | Scala | apache-2.0 | 5,596 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.mvc
import java.util.Locale
import org.junit.Test
import org.junit.Assert._
import org.springframework.mock.web.{ MockHttpServletRequest, MockServletContext }
import com.tzavellas.coeus.bind.ConverterRegistry
import com.tzavellas.coeus.i18n.locale.FixedLocaleResolver
import com.tzavellas.coeus.test.Assertions._
import scope.{ ApplicationScope, AbstractScopedContainerTest }
class WebRequestTest extends AbstractScopedContainerTest {
val mockContext = new MockServletContext
val mock = new MockHttpServletRequest("GET", "/index")
val request = new WebRequest(application = new ApplicationScope(mockContext),
servletRequest = mock,
pathContext = null,
localeResolver = new FixedLocaleResolver(Locale.US),
converters = ConverterRegistry.defaultConverters,
messages = null)
val attributes = request
def setAttributeToMock(name: String, value: Any) {
mock.setAttribute(name, value)
}
@Test
def no_sesion_is_created_if_it_does_not_exist() {
assertEquals(None, request.existingSession)
}
@Test
def create_and_get_an_existing_session() {
val session = request.session
assertNotNull(session)
assertSame(session, request.existingSession.get)
}
@Test
def reseting_session_creates_a_new_and_invalidates_the_old() {
val oldSession = request.session
oldSession("attr") = "some value"
val newSession = request.resetSession()
assertNotSame(oldSession, newSession)
assertSame(newSession, request.session)
assertTrue(oldSession.attributes.isEmpty)
}
@Test
def getting_the_application_scope_does_not_create_a_session() {
assertEquals(None, request.existingSession)
assertNotNull(request.application)
assertEquals(None, request.existingSession)
}
@Test
def get_the_http_method() {
assertEquals("GET", request.method)
}
@Test
def a_request_is_not_ajax_when_X_Requested_With_is_missing() {
assertFalse(request.isAjax)
}
@Test
def a_request_is_not_ajax_when_X_Requested_With_is_not_XMLHttpRequest() {
mock.addHeader("X-Requested-With", "a browser")
assertFalse(request.isAjax)
}
@Test
def a_request_is_ajax_when_X_Requested_With_is_XMLHttpRequest() {
mock.addHeader("X-Requested-With", "XMLHttpRequest")
assertTrue(request.isAjax)
}
@Test
def is_modified() {
assertTrue(request.isModifiedSince(1))
mock.addHeader("If-Modified-Since", new java.util.Date(0))
assertTrue(request.isModifiedSince(System.currentTimeMillis))
}
@Test
def is_not_modified() {
mock.addHeader("If-Modified-Since", new java.util.Date(1000))
assertFalse(request.isModifiedSince(1000))
assertFalse(request.isModifiedSince(500))
}
@Test
def etag_matches() {
assertFalse(request.etagMatches("12345"))
mock.addHeader("If-None-Match", "12345")
assertTrue(request.etagMatches("12345"))
assertFalse(request.etagMatches(""))
}
@Test
def is_multipart_only_when_method_is_post() {
mock.setContentType("multipart/form-data")
mock.setMethod("POST")
assertTrue(request.isMultipart)
mock.setMethod("GET")
assertFalse(request.isMultipart)
mock.setMethod("DELETE")
assertFalse(request.isMultipart)
}
@Test
def is_multipart_when_has_mutipart_content_type() {
mock.setMethod("POST")
assertFalse(request.isMultipart)
mock.setContentType("multipart/form-data")
assertTrue(request.isMultipart)
mock.setContentType("multipart/mixed")
assertTrue(request.isMultipart)
mock.setContentType("application/xml")
assertFalse(request.isMultipart)
}
@Test
def no_files_available_when_no_multipart() {
assertFalse(request.isMultipart)
assertNone(request.files("form-file"))
}
}
| sptz45/coeus | src/test/scala/com/tzavellas/coeus/mvc/WebRequestTest.scala | Scala | apache-2.0 | 4,102 |
package object pkgTest {
}
package pkgTest {
@placebo
class Z
}
| scalamacros/paradise | tests/src/test/scala/annotations/run/PackagePackageObject.scala | Scala | bsd-3-clause | 69 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.security
import java.lang.reflect.UndeclaredThrowableException
import java.security.PrivilegedExceptionAction
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.metadata.Hive
import org.apache.hadoop.io.Text
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.Token
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.KEYTAB
import org.apache.spark.security.HadoopDelegationTokenProvider
import org.apache.spark.util.Utils
private[spark] class HiveDelegationTokenProvider
extends HadoopDelegationTokenProvider with Logging {
override def serviceName: String = "hive"
private val classNotFoundErrorStr = s"You are attempting to use the " +
s"${getClass.getCanonicalName}, but your Spark distribution is not built with Hive libraries."
private def hiveConf(hadoopConf: Configuration): Configuration = {
try {
new HiveConf(hadoopConf, classOf[HiveConf])
} catch {
case NonFatal(e) =>
logWarning("Fail to create Hive Configuration", e)
hadoopConf
case e: NoClassDefFoundError =>
logWarning(classNotFoundErrorStr)
hadoopConf
}
}
override def delegationTokensRequired(
sparkConf: SparkConf,
hadoopConf: Configuration): Boolean = {
// Delegation tokens are needed only when:
// - trying to connect to a secure metastore
// - either deploying in cluster mode without a keytab, or impersonating another user
//
// Other modes (such as client with or without keytab, or cluster mode with keytab) do not need
// a delegation token, since there's a valid kerberos TGT for the right user available to the
// driver, which is the only process that connects to the HMS.
//
// Note that this means Hive tokens are not re-created periodically by the token manager.
// This is because HMS connections are only performed by the Spark driver, and the driver
// either has a TGT, in which case it does not need tokens, or it has a token created
// elsewhere, in which case it cannot create new ones. The check for an existing token avoids
// printing an exception to the logs in the latter case.
val currentToken = UserGroupInformation.getCurrentUser().getCredentials().getToken(tokenAlias)
currentToken == null && UserGroupInformation.isSecurityEnabled &&
hiveConf(hadoopConf).getTrimmed("hive.metastore.uris", "").nonEmpty &&
(SparkHadoopUtil.get.isProxyUser(UserGroupInformation.getCurrentUser()) ||
(!Utils.isClientMode(sparkConf) && !sparkConf.contains(KEYTAB)))
}
override def obtainDelegationTokens(
hadoopConf: Configuration,
sparkConf: SparkConf,
creds: Credentials): Option[Long] = {
try {
val conf = hiveConf(hadoopConf)
val principalKey = "hive.metastore.kerberos.principal"
val principal = conf.getTrimmed(principalKey, "")
require(principal.nonEmpty, s"Hive principal $principalKey undefined")
val metastoreUri = conf.getTrimmed("hive.metastore.uris", "")
require(metastoreUri.nonEmpty, "Hive metastore uri undefined")
val currentUser = UserGroupInformation.getCurrentUser()
logDebug(s"Getting Hive delegation token for ${currentUser.getUserName()} against " +
s"$principal at $metastoreUri")
doAsRealUser {
val hive = Hive.get(conf, classOf[HiveConf])
val tokenStr = hive.getDelegationToken(currentUser.getUserName(), principal)
val hive2Token = new Token[DelegationTokenIdentifier]()
hive2Token.decodeFromUrlString(tokenStr)
logDebug(s"Get Token from hive metastore: ${hive2Token.toString}")
creds.addToken(tokenAlias, hive2Token)
}
None
} catch {
case NonFatal(e) =>
logWarning(s"Failed to get token from service $serviceName", e)
None
case e: NoClassDefFoundError =>
logWarning(classNotFoundErrorStr)
None
} finally {
Utils.tryLogNonFatalError {
Hive.closeCurrent()
}
}
}
/**
* Run some code as the real logged in user (which may differ from the current user, for
* example, when using proxying).
*/
private def doAsRealUser[T](fn: => T): T = {
val currentUser = UserGroupInformation.getCurrentUser()
val realUser = Option(currentUser.getRealUser()).getOrElse(currentUser)
// For some reason the Scala-generated anonymous class ends up causing an
// UndeclaredThrowableException, even if you annotate the method with @throws.
try {
realUser.doAs(new PrivilegedExceptionAction[T]() {
override def run(): T = fn
})
} catch {
case e: UndeclaredThrowableException => throw Option(e.getCause()).getOrElse(e)
}
}
private def tokenAlias: Text = new Text("hive.server2.delegation.token")
}
| caneGuy/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/security/HiveDelegationTokenProvider.scala | Scala | apache-2.0 | 5,957 |
package org.scalawiki.wlx
import org.scalawiki.dto.markup.Table
import org.scalawiki.util.TestUtils._
import org.scalawiki.wlx.dto._
import org.scalawiki.wlx.dto.lists.ListConfig
import org.specs2.mutable.Specification
class CountryListParserSpec extends Specification {
"category name parser" should {
"parse WLE" in {
CountryParser.fromCategoryName("Category:Images from Wiki Loves Earth 2015") === Some(
Contest(ContestType.WLE, NoAdmDivision, 2015)
)
}
"parse WLM" in {
CountryParser.fromCategoryName("Category:Images from Wiki Loves Monuments 2015") === Some(
Contest(ContestType.WLM, NoAdmDivision, 2015)
)
}
"parse WLE country" in {
CountryParser.fromCategoryName("Category:Images from Wiki Loves Earth 2015 in Algeria")
.map(c => c.copy(country = c.country.withoutLangCodes)) === Some(
Contest(ContestType.WLE, Country("DZ", "Algeria"), 2015)
)
}
"parse WLM country" in {
CountryParser.fromCategoryName("Category:Images from Wiki Loves Monuments 2015 in Algeria")
.map(c => c.copy(country = c.country.withoutLangCodes)) === Some(
Contest(ContestType.WLM, Country("DZ", "Algeria"), 2015)
)
}
"parse WLE UA" in {
CountryParser.fromCategoryName("Category:Images from Wiki Loves Earth 2015 in Ukraine") === Some(
Contest(ContestType.WLE, Country.Ukraine, 2015,
uploadConfigs = Seq(
UploadConfig(
campaign = "wle-ua",
listTemplate = "ВЛЗ-рядок",
fileTemplate = "UkrainianNaturalHeritageSite",
listConfig = ListConfig.WleUa
)))
)
}
"parse WLM UA" in {
CountryParser.fromCategoryName("Category:Images from Wiki Loves Monuments 2015 in Ukraine") === Some(
Contest(ContestType.WLM, Country.Ukraine, 2015,
uploadConfigs = Seq(
UploadConfig(
campaign = "wlm-ua",
listTemplate = "ВЛП-рядок",
fileTemplate = "Monument Ukraine",
listConfig = ListConfig.WlmUa
)))
)
}
}
"table parser" should {
"not parse no table" in {
CountryParser.parse("nothing useful") === Nil
}
"not parse bad table" in {
val wiki = new Table(Seq("Pigs", "Dogs"), Seq(Seq("1", "2"))).asWiki
CountryParser.parse(wiki) === Nil
}
"parse wle 2016" in {
val wiki = resourceAsString("/org/scalawiki/wlx/wle_2016_participating.wiki")
val contests = CountryParser.parse(wiki)
val countries = contests.map(_.country.withoutLangCodes)
contests.map(_.contestType).toSet == Set(ContestType.WLE)
contests.map(_.year).toSet == Set(2016)
countries === Seq(
Country("DZ", "Algeria"),
Country("AU", "Australia"),
Country("AT", "Austria"),
Country("AZ", "Azerbaijan"),
Country("BR", "Brazil"),
Country("BG", "Bulgaria"),
Country("FR", "France"),
Country("DE", "Germany"),
Country("GR", "Greece"),
Country("IQ", "Iraq"),
Country("MD", "Moldova"),
Country("MA", "Morocco"),
Country("NP", "Nepal"),
Country("PK", "Pakistan"),
Country("RU", "Russia"),
Country("RS", "Serbia"),
Country("ES", "Spain"),
Country("CH", "Switzerland"),
Country("TH", "Thailand"),
Country("TN", "Tunisia"),
Country.Ukraine.withoutLangCodes
)
}
"parse wlm 2016" in {
val wiki = resourceAsString("/org/scalawiki/wlx/wlm_2016_participating.wiki")
val contests = CountryParser.parse(wiki)
val countries = contests.map(_.country.withoutLangCodes)
contests.map(_.contestType).toSet == Set(ContestType.WLM)
contests.map(_.year).toSet == Set(2016)
countries === Seq(
Country("DZ", "Algeria"),
Country("", "Andorra and Catalan Areas"),
Country("AZ", "Azerbaijan"),
Country("BD", "Bangladesh"),
Country("", "Belgium & Luxembourg"),
Country("BR", "Brazil"),
Country("BG", "Bulgaria"),
Country("CM", "Cameroon"),
Country("GR", "Greece"),
Country("EG", "Egypt"),
Country("FR", "France"),
Country("GE", "Georgia"),
Country("GH", "Ghana"),
Country("IR", "Iran"),
Country("IE", "Ireland"),
Country("IL", "Israel"),
Country("IT", "Italy"),
Country("LV", "Latvia"),
Country("MY", "Malaysia"),
Country("NP", "Nepal"),
Country("NG", "Nigeria"),
Country("PK", "Pakistan"),
Country("PA", "Panama"),
Country("PE", "Peru"),
Country("RU", "Russia"),
Country("RS", "Serbia"),
Country("SK", "Slovakia"),
Country("KR", "South Korea"),
Country("ES", "Spain"),
Country("SE", "Sweden"),
Country("TH", "Thailand"),
Country("TN", "Tunisia"),
Country.Ukraine.withoutLangCodes,
Country("", "the United Kingdom"),
Country("", "Esperantujo")
)
}
}
}
| intracer/scalawiki | scalawiki-wlx/src/test/scala/org/scalawiki/wlx/CountryListParserSpec.scala | Scala | apache-2.0 | 5,127 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.sclearn.dataset.spark.sql.types
import io.github.sclearn.dataset.spark.annotation.InterfaceStability
/**
* The data type representing `NULL` values. Please use the singleton `DataTypes.NullType`.
*
* @since 1.3.0
*/
@InterfaceStability.Stable
class NullType private() extends DataType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "NullType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
override def defaultSize: Int = 1
private[spark] override def asNullable: NullType = this
}
/**
* @since 1.3.0
*/
@InterfaceStability.Stable
case object NullType extends NullType
| sclearn/sclearn | sc/src/main/scala/io/github/sclearn/dataset/spark/sql/types/NullType.scala | Scala | apache-2.0 | 1,586 |
package app
import util.{LockUtil, CollaboratorsAuthenticator, JGitUtil, ReferrerAuthenticator, Notifier, Keys}
import util.Directory._
import util.Implicits._
import util.ControlUtil._
import service._
import org.eclipse.jgit.api.Git
import jp.sf.amateras.scalatra.forms._
import org.eclipse.jgit.transport.RefSpec
import scala.collection.JavaConverters._
import org.eclipse.jgit.lib.{ObjectId, CommitBuilder, PersonIdent}
import service.IssuesService._
import service.PullRequestService._
import util.JGitUtil.DiffInfo
import service.RepositoryService.RepositoryTreeNode
import util.JGitUtil.CommitInfo
import org.slf4j.LoggerFactory
import org.eclipse.jgit.merge.MergeStrategy
import org.eclipse.jgit.errors.NoMergeBaseException
class PullRequestsController extends PullRequestsControllerBase
with RepositoryService with AccountService with IssuesService with PullRequestService with MilestonesService with ActivityService
with ReferrerAuthenticator with CollaboratorsAuthenticator
trait PullRequestsControllerBase extends ControllerBase {
self: RepositoryService with AccountService with IssuesService with MilestonesService with ActivityService with PullRequestService
with ReferrerAuthenticator with CollaboratorsAuthenticator =>
private val logger = LoggerFactory.getLogger(classOf[PullRequestsControllerBase])
val pullRequestForm = mapping(
"title" -> trim(label("Title" , text(required, maxlength(100)))),
"content" -> trim(label("Content", optional(text()))),
"targetUserName" -> trim(text(required, maxlength(100))),
"targetBranch" -> trim(text(required, maxlength(100))),
"requestUserName" -> trim(text(required, maxlength(100))),
"requestBranch" -> trim(text(required, maxlength(100))),
"commitIdFrom" -> trim(text(required, maxlength(40))),
"commitIdTo" -> trim(text(required, maxlength(40)))
)(PullRequestForm.apply)
val mergeForm = mapping(
"message" -> trim(label("Message", text(required)))
)(MergeForm.apply)
case class PullRequestForm(
title: String,
content: Option[String],
targetUserName: String,
targetBranch: String,
requestUserName: String,
requestBranch: String,
commitIdFrom: String,
commitIdTo: String)
case class MergeForm(message: String)
get("/:owner/:repository/pulls")(referrersOnly { repository =>
searchPullRequests(None, repository)
})
get("/:owner/:repository/pulls/:userName")(referrersOnly { repository =>
searchPullRequests(Some(params("userName")), repository)
})
get("/:owner/:repository/pull/:id")(referrersOnly { repository =>
params("id").toIntOpt.flatMap{ issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map { case(issue, pullreq) =>
using(Git.open(getRepositoryDir(owner, name))){ git =>
val (commits, diffs) =
getRequestCompareInfo(owner, name, pullreq.commitIdFrom, owner, name, pullreq.commitIdTo)
pulls.html.pullreq(
issue, pullreq,
getComments(owner, name, issueId),
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestonesWithIssueCount(owner, name),
commits,
diffs,
hasWritePermission(owner, name, context.loginAccount),
repository)
}
}
} getOrElse NotFound
})
ajaxGet("/:owner/:repository/pull/:id/mergeguide")(collaboratorsOnly { repository =>
params("id").toIntOpt.flatMap{ issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map { case(issue, pullreq) =>
pulls.html.mergeguide(
checkConflictInPullRequest(owner, name, pullreq.branch, pullreq.requestUserName, name, pullreq.requestBranch, issueId),
pullreq,
s"${baseUrl}/git/${pullreq.requestUserName}/${pullreq.requestRepositoryName}.git")
}
} getOrElse NotFound
})
post("/:owner/:repository/pull/:id/merge", mergeForm)(collaboratorsOnly { (form, repository) =>
params("id").toIntOpt.flatMap { issueId =>
val owner = repository.owner
val name = repository.name
LockUtil.lock(s"${owner}/${name}/merge"){
getPullRequest(owner, name, issueId).map { case (issue, pullreq) =>
using(Git.open(getRepositoryDir(owner, name))) { git =>
// mark issue as merged and close.
val loginAccount = context.loginAccount.get
createComment(owner, name, loginAccount.userName, issueId, form.message, "merge")
createComment(owner, name, loginAccount.userName, issueId, "Close", "close")
updateClosed(owner, name, issueId, true)
// record activity
recordMergeActivity(owner, name, loginAccount.userName, issueId, form.message)
// merge
val mergeBaseRefName = s"refs/heads/${pullreq.branch}"
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(mergeBaseRefName)
val mergeTip = git.getRepository.resolve(s"refs/pull/${issueId}/head")
val conflicted = try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
if (conflicted) {
throw new RuntimeException("This pull request can't merge automatically.")
}
// creates merge commit
val mergeCommit = new CommitBuilder()
mergeCommit.setTreeId(merger.getResultTreeId)
mergeCommit.setParentIds(Array[ObjectId](mergeBaseTip, mergeTip): _*)
val personIdent = new PersonIdent(loginAccount.fullName, loginAccount.mailAddress)
mergeCommit.setAuthor(personIdent)
mergeCommit.setCommitter(personIdent)
mergeCommit.setMessage(s"Merge pull request #${issueId} from ${pullreq.requestUserName}/${pullreq.requestRepositoryName}\n\n" +
form.message)
// insertObject and got mergeCommit Object Id
val inserter = git.getRepository.newObjectInserter
val mergeCommitId = inserter.insert(mergeCommit)
inserter.flush()
inserter.release()
// update refs
val refUpdate = git.getRepository.updateRef(mergeBaseRefName)
refUpdate.setNewObjectId(mergeCommitId)
refUpdate.setForceUpdate(false)
refUpdate.setRefLogIdent(personIdent)
refUpdate.setRefLogMessage("merged", true)
refUpdate.update()
val (commits, _) = getRequestCompareInfo(owner, name, pullreq.commitIdFrom,
pullreq.requestUserName, pullreq.requestRepositoryName, pullreq.commitIdTo)
commits.flatten.foreach { commit =>
if(!existsCommitId(owner, name, commit.id)){
insertCommitId(owner, name, commit.id)
}
}
// notifications
Notifier().toNotify(repository, issueId, "merge"){
Notifier.msgStatus(s"${baseUrl}/${owner}/${name}/pull/${issueId}")
}
redirect(s"/${owner}/${name}/pull/${issueId}")
}
}
}
} getOrElse NotFound
})
get("/:owner/:repository/compare")(referrersOnly { forkedRepository =>
(forkedRepository.repository.originUserName, forkedRepository.repository.originRepositoryName) match {
case (Some(originUserName), Some(originRepositoryName)) => {
getRepository(originUserName, originRepositoryName, baseUrl).map { originRepository =>
using(
Git.open(getRepositoryDir(originUserName, originRepositoryName)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
){ (oldGit, newGit) =>
val oldBranch = JGitUtil.getDefaultBranch(oldGit, originRepository).get._2
val newBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository).get._2
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}/compare/${originUserName}:${oldBranch}...${newBranch}")
}
} getOrElse NotFound
}
case _ => {
using(Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))){ git =>
JGitUtil.getDefaultBranch(git, forkedRepository).map { case (_, defaultBranch) =>
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}/compare/${defaultBranch}...${defaultBranch}")
} getOrElse {
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}")
}
}
}
}
})
get("/:owner/:repository/compare/*...*")(referrersOnly { repository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifie(origin, repository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifie(forked, repository.owner)
(getRepository(originOwner, repository.name, baseUrl),
getRepository(forkedOwner, repository.name, baseUrl)) match {
case (Some(originRepository), Some(forkedRepository)) => {
using(
Git.open(getRepositoryDir(originOwner, repository.name)),
Git.open(getRepositoryDir(forkedOwner, repository.name))
){ case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
val forkedId = getForkedCommitId(oldGit, newGit,
originOwner, repository.name, originBranch,
forkedOwner, repository.name, forkedBranch)
val oldId = oldGit.getRepository.resolve(forkedId)
val newId = newGit.getRepository.resolve(forkedBranch)
val (commits, diffs) = getRequestCompareInfo(
originOwner, repository.name, oldId.getName,
forkedOwner, repository.name, newId.getName)
pulls.html.compare(
commits,
diffs,
repository.repository.originUserName.map { userName =>
userName :: getForkedRepositories(userName, repository.name)
} getOrElse List(repository.owner),
originBranch,
forkedBranch,
oldId.getName,
newId.getName,
repository,
originRepository,
forkedRepository,
hasWritePermission(repository.owner, repository.name, context.loginAccount))
}
}
case _ => NotFound
}
})
ajaxGet("/:owner/:repository/compare/*...*/mergecheck")(collaboratorsOnly { repository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifie(origin, repository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifie(forked, repository.owner)
(getRepository(originOwner, repository.name, baseUrl),
getRepository(forkedOwner, repository.name, baseUrl)) match {
case (Some(originRepository), Some(forkedRepository)) => {
using(
Git.open(getRepositoryDir(originOwner, repository.name)),
Git.open(getRepositoryDir(forkedOwner, repository.name))
){ case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
pulls.html.mergecheck(
checkConflict(originOwner, repository.name, originBranch, forkedOwner, repository.name, forkedBranch))
}
}
case _ => NotFound()
}
})
post("/:owner/:repository/pulls/new", pullRequestForm)(referrersOnly { (form, repository) =>
val loginUserName = context.loginAccount.get.userName
val issueId = createIssue(
owner = repository.owner,
repository = repository.name,
loginUser = loginUserName,
title = form.title,
content = form.content,
assignedUserName = None,
milestoneId = None,
isPullRequest = true)
createPullRequest(
originUserName = repository.owner,
originRepositoryName = repository.name,
issueId = issueId,
originBranch = form.targetBranch,
requestUserName = form.requestUserName,
requestRepositoryName = repository.name,
requestBranch = form.requestBranch,
commitIdFrom = form.commitIdFrom,
commitIdTo = form.commitIdTo)
// fetch requested branch
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
git.fetch
.setRemote(getRepositoryDir(form.requestUserName, repository.name).toURI.toString)
.setRefSpecs(new RefSpec(s"refs/heads/${form.requestBranch}:refs/pull/${issueId}/head"))
.call
}
// record activity
recordPullRequestActivity(repository.owner, repository.name, loginUserName, issueId, form.title)
// notifications
Notifier().toNotify(repository, issueId, form.content.getOrElse("")){
Notifier.msgPullRequest(s"${baseUrl}/${repository.owner}/${repository.name}/pull/${issueId}")
}
redirect(s"/${repository.owner}/${repository.name}/pull/${issueId}")
})
/**
* Checks whether conflict will be caused in merging. Returns true if conflict will be caused.
*/
private def checkConflict(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String): Boolean = {
LockUtil.lock(s"${userName}/${repositoryName}/merge-check"){
using(Git.open(getRepositoryDir(requestUserName, requestRepositoryName))) { git =>
val remoteRefName = s"refs/heads/${branch}"
val tmpRefName = s"refs/merge-check/${userName}/${branch}"
withTmpRefSpec(new RefSpec(s"${remoteRefName}:${tmpRefName}").setForceUpdate(true), git) { ref =>
// fetch objects from origin repository branch
git.fetch
.setRemote(getRepositoryDir(userName, repositoryName).toURI.toString)
.setRefSpecs(ref)
.call
// merge conflict check
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(s"refs/heads/${requestBranch}")
val mergeTip = git.getRepository.resolve(tmpRefName)
try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
}
}
}
}
/**
* Checks whether conflict will be caused in merging within pull request. Returns true if conflict will be caused.
*/
private def checkConflictInPullRequest(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String,
issueId: Int): Boolean = {
LockUtil.lock(s"${userName}/${repositoryName}/merge") {
using(Git.open(getRepositoryDir(userName, repositoryName))) { git =>
// merge
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(s"refs/heads/${branch}")
val mergeTip = git.getRepository.resolve(s"refs/pull/${issueId}/head")
try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
}
}
}
/**
* Parses branch identifier and extracts owner and branch name as tuple.
*
* - "owner:branch" to ("owner", "branch")
* - "branch" to ("defaultOwner", "branch")
*/
private def parseCompareIdentifie(value: String, defaultOwner: String): (String, String) =
if(value.contains(':')){
val array = value.split(":")
(array(0), array(1))
} else {
(defaultOwner, value)
}
/**
* Extracts all repository names from [[service.RepositoryService.RepositoryTreeNode]] as flat list.
*/
private def getRepositoryNames(node: RepositoryTreeNode): List[String] =
node.owner :: node.children.map { child => getRepositoryNames(child) }.flatten
/**
* Returns the identifier of the root commit (or latest merge commit) of the specified branch.
*/
private def getForkedCommitId(oldGit: Git, newGit: Git, userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String): String =
JGitUtil.getCommitLogs(newGit, requestBranch, true){ commit =>
existsCommitId(userName, repositoryName, commit.getName) &&
JGitUtil.getBranchesOfCommit(oldGit, commit.getName).contains(branch)
}.head.id
private def getRequestCompareInfo(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestCommitId: String): (Seq[Seq[CommitInfo]], Seq[DiffInfo]) = {
using(
Git.open(getRepositoryDir(userName, repositoryName)),
Git.open(getRepositoryDir(requestUserName, requestRepositoryName))
){ (oldGit, newGit) =>
val oldId = oldGit.getRepository.resolve(branch)
val newId = newGit.getRepository.resolve(requestCommitId)
val commits = newGit.log.addRange(oldId, newId).call.iterator.asScala.map { revCommit =>
new CommitInfo(revCommit)
}.toList.splitWith{ (commit1, commit2) =>
view.helpers.date(commit1.time) == view.helpers.date(commit2.time)
}
val diffs = JGitUtil.getDiffs(newGit, oldId.getName, newId.getName, true)
(commits, diffs)
}
}
private def searchPullRequests(userName: Option[String], repository: RepositoryService.RepositoryInfo) =
defining(repository.owner, repository.name){ case (owner, repoName) =>
val filterUser = userName.map { x => Map("created_by" -> x) } getOrElse Map("all" -> "")
val page = IssueSearchCondition.page(request)
val sessionKey = Keys.Session.Pulls(owner, repoName)
// retrieve search condition
val condition = session.putAndGet(sessionKey,
if(request.hasQueryString) IssueSearchCondition(request)
else session.getAs[IssueSearchCondition](sessionKey).getOrElse(IssueSearchCondition())
)
pulls.html.list(
searchIssue(condition, filterUser, true, (page - 1) * PullRequestLimit, PullRequestLimit, owner -> repoName),
getPullRequestCountGroupByUser(condition.state == "closed", owner, Some(repoName)),
userName,
page,
countIssue(condition.copy(state = "open" ), filterUser, true, owner -> repoName),
countIssue(condition.copy(state = "closed"), filterUser, true, owner -> repoName),
countIssue(condition, Map.empty, true, owner -> repoName),
condition,
repository,
hasWritePermission(owner, repoName, context.loginAccount))
}
}
| martinx/gitbucket | src/main/scala/app/PullRequestsController.scala | Scala | apache-2.0 | 19,325 |
package com.tribbloids.spookystuff.row
import com.tribbloids.spookystuff.SpookyContext
import com.tribbloids.spookystuff.execution._
import com.tribbloids.spookystuff.extractors._
import org.apache.spark.ml.dsl.utils.refl.ScalaUDT
import org.apache.spark.sql.types.{DataType, StructField, StructType}
import scala.collection.immutable.ListMap
import scala.language.implicitConversions
//this is a special StructType that carries more metadata
//TODO: override sqlType, serialize & deserialize to compress into InternalRow
case class SpookySchema(
ec: SpookyExecutionContext,
fieldTypes: ListMap[Field, DataType] = ListMap.empty
) extends ScalaUDT[DataRow] {
import org.apache.spark.ml.dsl.utils.refl.ScalaType._
def spooky: SpookyContext = ec.spooky
final def fields: List[Field] = fieldTypes.keys.toList
final def typedFields: List[TypedField] = fieldTypes.iterator.toList.map(tuple => TypedField(tuple._1, tuple._2))
final def indexedFields: List[IndexedField] = typedFields.zipWithIndex
final def typedFor(field: Field): Option[TypedField] = {
fieldTypes.get(field).map {
TypedField(field, _)
}
}
final def indexedFor(field: Field): Option[IndexedField] = {
indexedFields.find(_._1.self == field)
}
def filterFields(filter: Field => Boolean = _.isSelected): SpookySchema = {
this.copy(
fieldTypes = ListMap(fieldTypes.filterKeys(filter).toSeq: _*)
)
}
lazy val structFields: Seq[StructField] = fieldTypes.toSeq
.map { tuple =>
StructField(
tuple._1.name,
tuple._2.reified
)
}
lazy val structType: StructType = {
StructType(structFields)
}
def --(field: Iterable[Field]): SpookySchema = this.copy(
fieldTypes = fieldTypes -- field
)
def newResolver: Resolver = new Resolver()
class Resolver extends Serializable {
val buffer: LinkedMap[Field, DataType] = LinkedMap()
buffer ++= SpookySchema.this.fieldTypes.toSeq
// val lookup: mutable.HashMap[Extractor[_], Resolved[_]] = mutable.HashMap()
def build: SpookySchema = SpookySchema.this.copy(fieldTypes = ListMap(buffer.toSeq: _*))
private def resolveField(field: Field): Field = {
val existingOpt = buffer.keys.find(_ == field)
val crOpt = existingOpt.map { existing =>
field.effectiveConflictResolving(existing)
}
val revised = crOpt
.map(
cr => field.copy(conflictResolving = cr)
)
.getOrElse(field)
revised
}
def includeTyped(typed: TypedField*): Seq[TypedField] = {
typed.map { t =>
val resolvedField = resolveField(t.self)
mergeType(resolvedField, t.dataType)
val result = TypedField(resolvedField, t.dataType)
buffer += result.self -> result.dataType
result
}
}
private def _include[R](
ex: Extractor[R]
): Resolved[R] = {
val alias = ex match {
case a: Alias[_, _] =>
val resolvedField = resolveField(a.field)
ex withAlias resolvedField
case _ =>
val names = buffer.keys.toSeq.map(_.name)
val i = (1 to Int.MaxValue)
.find(
i => !names.contains("_c" + i)
)
.get
ex withAlias Field("_c" + i)
}
val resolved = alias.resolve(SpookySchema.this)
val dataType = alias.resolveType(SpookySchema.this)
val mergedType = mergeType(alias.field, dataType)
buffer += alias.field -> mergedType
Resolved(
resolved,
TypedField(alias.field, mergedType)
)
}
def include[R](exs: Extractor[R]*): Seq[Resolved[R]] = {
exs.map { ex =>
this._include(ex)
}
}
}
def mergeType(resolvedField: Field, dataType: DataType): DataType = {
val existingTypeOpt = SpookySchema.this.fieldTypes.get(resolvedField)
(existingTypeOpt, resolvedField.conflictResolving) match {
case (Some(existingType), Field.Overwrite) =>
if (dataType == existingType) dataType
else if (dataType.reified == existingType.reified) dataType.reified
else
throw new IllegalArgumentException(
s"""
|Overwriting field ${resolvedField.name} with inconsistent type:
|old: $existingType
|new: $dataType
|set conflictResolving=Replace to fix it
""".stripMargin
)
case _ =>
dataType
}
}
//use it after Row-based data representation
object ImplicitLookup {
implicit def fieldToTyped(field: Field): TypedField = SpookySchema.this.typedFor(field).get
implicit def fieldToIndexed(field: Field): IndexedField = SpookySchema.this.indexedFor(field).get
}
}
| tribbloid/spookystuff | core/src/main/scala/com/tribbloids/spookystuff/row/SpookySchema.scala | Scala | apache-2.0 | 4,749 |
package dhg.ccg.tag.learn
import dhg.util._
import math.{ log, exp }
import com.typesafe.scalalogging.slf4j.{ StrictLogging => Logging }
import scalaz._
import Scalaz._
import scala.collection.breakOut
import dhg.ccg.prob._
import dhg.ccg.tagdict.TagDictionary
trait TransitionDistributioner[Tag] {
type Word = String
def apply(
taggedSentences: Vector[Vector[(Word, Tag)]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Tag]
def make(
transitionCounts: Map[Tag, Map[Tag, LogDouble]],
tagCounts: Map[Tag, LogDouble], // INCLUDING START/END
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Tag]
}
abstract class AbstractTransitionDistributioner[Tag] extends TransitionDistributioner[Tag] {
def apply(
taggedSentences: Vector[Vector[(Word, Tag)]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Tag] = {
val tagdict = initialTagdict.withWords(taggedSentences.flatten.map(_._1).toSet).withTags(taggedSentences.flatten.map(_._2).toSet)
val transitionCounts =
taggedSentences
.flatMap(s => (tagdict.startTag +: s.map(_._2) :+ tagdict.endTag).sliding2)
.filter { case (at, bt) => !tagdict.excludedTags(at) && !tagdict.excludedTags(bt) }
.groupByKey.mapVals(_.counts.mapVals(LogDouble(_)))
val tagCounts = taggedSentences.flatMap(s => (tagdict.startTag +: s.map(_._2) :+ tagdict.endTag)).counts.mapVals(LogDouble(_)) -- tagdict.excludedTags
make(transitionCounts, tagCounts, tagdict)
}
}
class UnsmoothedTransitionDistributioner[Tag]()
extends AbstractTransitionDistributioner[Tag] {
def make(
transitionCounts: Map[Tag, Map[Tag, LogDouble]],
tagCounts: Map[Tag, LogDouble], // INCLUDING START/END
tagdict: TagDictionary[Tag]) = {
new SimpleConditionalLogProbabilityDistribution((transitionCounts -- tagdict.excludedTags).mapVals(c => new SimpleLogProbabilityDistribution(c -- tagdict.excludedTags)),
LogProbabilityDistribution.empty[Tag],
Some(tagdict.excludedTags + tagdict.endTag))
}
override def toString = f"UnsmoothedTransitionDistributioner()"
}
/**
* tdcutoff 0.1, lambda=0.23
* tdcutoff 0.01, lambda=0.21
* tdcutoff 0.001, lambda=0.20
* tdcutoff 0.0, lambda=0.20
*/
class AddLambdaTransitionDistributioner[Tag](lambda: Double = 0.2)
extends AbstractTransitionDistributioner[Tag] {
def make(
transitionCounts: Map[Tag, Map[Tag, LogDouble]],
tagCounts: Map[Tag, LogDouble], // INCLUDING START/END
tagdict: TagDictionary[Tag]) = {
val allTransitionToAbleTags = Some(tagdict.allTags + tagdict.endTag)
val startTag = Some(Set(tagdict.startTag) ++ tagdict.excludedTags)
new SimpleConditionalLogProbabilityDistribution(
(tagdict.allTags.mapToVal(Map[Tag, LogDouble]()).toMap ++ transitionCounts -- tagdict.excludedTags)
.mapVals(new LaplaceLogProbabilityDistribution(_, allTransitionToAbleTags, startTag, LogDouble(lambda))) +
(tagdict.startTag -> new LaplaceLogProbabilityDistribution(transitionCounts.getOrElse(tagdict.startTag, Map.empty), allTransitionToAbleTags, Some(Set(tagdict.startTag, tagdict.endTag) ++ tagdict.excludedTags), LogDouble(lambda))) + // Start Tag can't transition to End Tag
(tagdict.endTag -> LogProbabilityDistribution.empty[Tag]), // End Tag can't transition to anything
new LaplaceLogProbabilityDistribution(Map.empty[Tag, LogDouble], allTransitionToAbleTags, startTag, LogDouble(lambda)),
Some(tagdict.excludedTags + tagdict.endTag))
}
override def toString = f"AddLambdaTransitionDistributioner($lambda)"
}
/**
* C(t_{i-1}, t_i) + a(t_{i-1}) * p(t_i)
* p(t_i | t_{i-1}) = -------------------------------------
* C(t_{i-1}) + a(t_{i-1})
* where a(t_{i-1}) = |t_i : C(t_{i-1}, t_i) = 1| + \epsilon
*/
class OneCountTransitionDistributioner[Tag](singletonEpsilon: Double, tagPriorLambda: Double, flatPrior: Boolean = false, flatSingletons: Boolean = false)
extends AbstractTransitionDistributioner[Tag] {
def make(
transitionCounts: Map[Tag, Map[Tag, LogDouble]],
tagCounts: Map[Tag, LogDouble], // INCLUDING START/END
tagdict: TagDictionary[Tag]) = {
val allTransitionToAbleTags = tagdict.allTags + tagdict.endTag
val someAllTransitionToAbleTags = Some(allTransitionToAbleTags)
val startTag = Some(Set(tagdict.startTag) ++ tagdict.excludedTags)
val seTags = Some(Set(tagdict.startTag, tagdict.endTag) ++ tagdict.excludedTags)
val singletonTransitions =
if (flatSingletons)
Map[Tag, LogDouble]().withDefaultValue(LogDouble(tagPriorLambda * someAllTransitionToAbleTags.get.size))
else
(tagdict.allTags + tagdict.startTag).mapTo { t => LogDouble(transitionCounts.get(t).fold(0)(_.count(_._2 < LogDouble(1.00000001))) + singletonEpsilon) }.toMap
val transitionToAbleTagPrior =
if (flatPrior) {
if (flatSingletons)
new DefaultedLogProbabilityDistribution(Map(), someAllTransitionToAbleTags, startTag, LogDouble(1.0 / someAllTransitionToAbleTags.get.size))
else
new DefaultedLogProbabilityDistribution(Map(), someAllTransitionToAbleTags, startTag, LogDouble(tagPriorLambda))
}
else
new LaplaceLogProbabilityDistribution(tagCounts -- tagdict.excludedTags, someAllTransitionToAbleTags, startTag, LogDouble(tagPriorLambda))
val smoothedTransitionCounts =
(tagdict.allTags + tagdict.startTag).mapTo { tPrime =>
allTransitionToAbleTags.mapTo { t =>
val c = (for (tPrimeCounts <- transitionCounts.get(tPrime); c <- tPrimeCounts.get(t)) yield c).getOrElse(LogDouble.zero)
c + singletonTransitions(tPrime) * transitionToAbleTagPrior(t)
}.toMap
}.toMap
new SimpleConditionalLogProbabilityDistribution[Tag, Tag](
smoothedTransitionCounts.mapt { (tPrime, counts) =>
tPrime -> new DefaultedLogProbabilityDistribution(counts, someAllTransitionToAbleTags, startTag, singletonTransitions(tPrime) * transitionToAbleTagPrior.defaultProb)
} +
(tagdict.startTag -> {
val startDestinationTagPrior =
if (flatPrior) {
if (flatSingletons)
new DefaultedLogProbabilityDistribution(Map.empty[Tag, LogDouble], someAllTransitionToAbleTags, startTag, LogDouble(1.0 / someAllTransitionToAbleTags.get.size))
else
new DefaultedLogProbabilityDistribution(Map.empty[Tag, LogDouble], someAllTransitionToAbleTags, startTag, LogDouble(tagPriorLambda))
}
else
new LaplaceLogProbabilityDistribution(tagCounts -- tagdict.excludedTags, someAllTransitionToAbleTags, seTags, LogDouble(tagPriorLambda))
val startSmoothedCounts = tagdict.allTags.mapTo { t => transitionCounts(tagdict.startTag).getOrElse(t, LogDouble.zero) + singletonTransitions(tagdict.startTag) * startDestinationTagPrior(t) }.toMap
val default = singletonTransitions(tagdict.startTag) * startDestinationTagPrior.defaultProb
new DefaultedLogProbabilityDistribution(startSmoothedCounts, someAllTransitionToAbleTags, seTags, default) // Start Tag can't transition to End Tag
}) +
(tagdict.endTag -> LogProbabilityDistribution.empty[Tag]), // End Tag can't transition to anything
transitionToAbleTagPrior,
Some(tagdict.excludedTags + tagdict.endTag))
}
override def toString = f"OneCountTransitionDistributioner($singletonEpsilon, $tagPriorLambda)"
}
//
//
//
trait EmissionDistributioner[Tag] {
type Word = String
def apply(
taggedSentences: Vector[Vector[(Word, Tag)]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word]
def make(
emissionCounts: Map[Tag, Map[Word, LogDouble]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word]
}
abstract class AbstractEmissionDistributioner[Tag] extends EmissionDistributioner[Tag] {
def apply(
taggedSentences: Vector[Vector[(Word, Tag)]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(taggedSentences.flatten.map(_._1).to[Set]).withTags(taggedSentences.flatten.map(_._2).to[Set])
val emissionCounts = taggedSentences.flatten.filter { case (w, t) => tagdict(w)(t) && !tagdict.excludedTags(t) }.map(_.swap).groupByKey.mapVals(_.counts.mapVals(LogDouble(_)))
make(emissionCounts, tagdict)
}
}
class UnsmoothedEmissionDistributioner[Tag]() extends AbstractEmissionDistributioner[Tag] {
def make(
emissionCounts: Map[Tag, Map[Word, LogDouble]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(emissionCounts.values.flatMap(_.keys).to[Set]).withTags(emissionCounts.keySet)
val allWordsSet = Some(tagdict.allWords)
new SimpleConditionalLogProbabilityDistribution(
(emissionCounts -- tagdict.excludedTags).mapt((t, tcounts) => t -> new LaplaceLogProbabilityDistribution(tcounts, allWordsSet, /*Some(tagdict.entries.keySet -- tagdict.knownWordsForTag(t) +*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble.zero)) +
(tagdict.startTag -> new SimpleLogProbabilityDistribution(Map(tagdict.startWord -> LogDouble.one))) +
(tagdict.endTag -> new SimpleLogProbabilityDistribution(Map(tagdict.endWord -> LogDouble.one))),
LogProbabilityDistribution.empty[Word],
Some(tagdict.excludedTags))
}
override def toString = f"UnsmoothedEmissionDistributioner()"
}
/*
* tdcutoff 0.1, lambda=0.10
* tdcutoff 0.01, lambda=0.10
* tdcutoff 0.001, lambda=0.10
* tdcutoff 0.0, lambda=0.10
*/
class AddLambdaEmissionDistributioner[Tag](lambda: Double = 0.1) extends AbstractEmissionDistributioner[Tag] {
def make(
emissionCounts: Map[Tag, Map[Word, LogDouble]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(emissionCounts.values.flatMap(_.keys).to[Set]).withTags(emissionCounts.keySet)
val allWordsSet = Some(tagdict.allWords)
new SimpleConditionalLogProbabilityDistribution(
(tagdict.allTags.mapToVal(Map[Word, LogDouble]()).toMap ++ emissionCounts -- tagdict.excludedTags)
.mapt((t, tcounts) => t -> new LaplaceLogProbabilityDistribution(tcounts, allWordsSet, /*Some(tagdict.entries.keySet -- tagdict.knownWordsForTag.getOrElse(t, Set.empty) +*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(lambda))) +
(tagdict.startTag -> new SimpleLogProbabilityDistribution(Map(tagdict.startWord -> LogDouble.one))) +
(tagdict.endTag -> new SimpleLogProbabilityDistribution(Map(tagdict.endWord -> LogDouble.one))),
new LaplaceLogProbabilityDistribution(Map(), allWordsSet, /*Some(tagdict.entries.keySet +*/ Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(1.0)),
Some(tagdict.excludedTags))
}
override def toString = f"AddLambdaEmissionDistributioner($lambda)"
}
/**
* C(t,w) + b(t) * p(w)
* p(w | t) = --------------------
* C(t) + b(t)
* where b(t) = |w : C(t,w) = 1| + \epsilon
*/
class OneCountEmissionDistributioner[Tag](singletonEpsilon: Double, wordPriorLambda: Double) extends AbstractEmissionDistributioner[Tag] {
def make(
emissionCounts: Map[Tag, Map[Word, LogDouble]],
initialTagdict: TagDictionary[Tag]): ConditionalLogProbabilityDistribution[Tag, Word] = {
val tagdict = initialTagdict.withWords(emissionCounts.values.flatMap(_.keys).to[Set]).withTags(emissionCounts.keySet)
val allWordsSet = Some(tagdict.allWords)
val singletonEmissions = tagdict.allTags.mapTo { t => LogDouble(emissionCounts.get(t).fold(0)(_.count(_._2 < LogDouble(1.00000001))) + singletonEpsilon) }.toMap
val wordCounts = emissionCounts.values.reduce(_ |+| _)
val wordPrior = new LaplaceLogProbabilityDistribution(wordCounts.mapVals(LogDouble(_)), Some(tagdict.allWords), Some(Set(tagdict.startWord, tagdict.endWord)), LogDouble(wordPriorLambda))
val smoothedEmissionCounts =
tagdict.allTags.mapTo { t =>
tagdict.allWords.mapTo { w =>
val c = (for (tCounts <- emissionCounts.get(t); c <- tCounts.get(w)) yield c).getOrElse(LogDouble.zero)
c + singletonEmissions(t) * wordPrior(w)
}.toMap
}.toMap
new SimpleConditionalLogProbabilityDistribution(
(tagdict.allTags.mapToVal(Map[Word, LogDouble]()).toMap ++ smoothedEmissionCounts -- tagdict.excludedTags)
.mapt { (t, tcounts) =>
val default = singletonEmissions(t) * wordPrior.defaultProb
t -> new DefaultedLogProbabilityDistribution(tcounts.mapVals(LogDouble(_)), allWordsSet, /*Some(tagdict.entries.keySet -- tagdict.knownWordsForTag(t) +*/ Some(Set(tagdict.startWord, tagdict.endWord)), default)
} +
(tagdict.startTag -> new SimpleLogProbabilityDistribution(Map(tagdict.startWord -> LogDouble.one))) +
(tagdict.endTag -> new SimpleLogProbabilityDistribution(Map(tagdict.endWord -> LogDouble.one))),
wordPrior,
Some(tagdict.excludedTags))
}
override def toString = f"OneCountEmissionDistributioner($singletonEpsilon, $wordPriorLambda)"
}
| dhgarrette/2015-ccg-parsing | src/main/scala/dhg/ccg/tag/learn/SupHmmDistributioner.scala | Scala | apache-2.0 | 13,311 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.businessmatching.updateservice.remove
import play.api.i18n.Messages
import utils.AmlsViewSpec
import views.Fixture
import views.html.businessmatching.updateservice.remove.unable_to_remove_activity
class unable_to_remove_activitySpec extends AmlsViewSpec {
trait ViewFixture extends Fixture {
lazy val unable_to_remove_activity = app.injector.instanceOf[unable_to_remove_activity]
implicit val requestWithToken = addTokenForView()
def view = unable_to_remove_activity("test")
}
"The unable_to_remove_activity view" must {
"have the correct title" in new ViewFixture {
doc.title must startWith(Messages("businessmatching.updateservice.removeactivitiesinformation.title") + " - " + Messages("summary.updateservice"))
}
"have correct heading" in new ViewFixture {
heading.html must be(Messages("businessmatching.updateservice.removeactivitiesinformation.heading", "test", Messages("summary.updateinformation")))
}
"have the back link button" in new ViewFixture {
doc.getElementsByAttributeValue("class", "link-back") must not be empty
}
"show the correct content" in new ViewFixture {
doc.body().text() must include(Messages("businessmatching.updateservice.removeactivitiesinformation.info.3"))
doc.body().text() must include(Messages("businessmatching.updateservice.removeactivitiesinformation.info.2"))
}
}
}
| hmrc/amls-frontend | test/views/businessmatching/updateservice/remove/unable_to_remove_activitySpec.scala | Scala | apache-2.0 | 2,019 |
package frameless
import org.scalacheck.{Arbitrary, Cogen}
case class X1[A](a: A)
object X1 {
implicit def arbitrary[A: Arbitrary]: Arbitrary[X1[A]] =
Arbitrary(implicitly[Arbitrary[A]].arbitrary.map(X1(_)))
implicit def cogen[A](implicit A: Cogen[A]): Cogen[X1[A]] =
A.contramap(_.a)
implicit def ordering[A: Ordering]: Ordering[X1[A]] = Ordering[A].on(_.a)
}
case class X2[A, B](a: A, b: B)
object X2 {
implicit def arbitrary[A: Arbitrary, B: Arbitrary]: Arbitrary[X2[A, B]] =
Arbitrary(Arbitrary.arbTuple2[A, B].arbitrary.map((X2.apply[A, B] _).tupled))
implicit def cogen[A, B](implicit A: Cogen[A], B: Cogen[B]): Cogen[X2[A, B]] =
Cogen.tuple2(A, B).contramap(x => (x.a, x.b))
implicit def ordering[A: Ordering, B: Ordering]: Ordering[X2[A, B]] = Ordering.Tuple2[A, B].on(x => (x.a, x.b))
}
case class X3[A, B, C](a: A, b: B, c: C)
object X3 {
implicit def arbitrary[A: Arbitrary, B: Arbitrary, C: Arbitrary]: Arbitrary[X3[A, B, C]] =
Arbitrary(Arbitrary.arbTuple3[A, B, C].arbitrary.map((X3.apply[A, B, C] _).tupled))
implicit def cogen[A, B, C](implicit A: Cogen[A], B: Cogen[B], C: Cogen[C]): Cogen[X3[A, B, C]] =
Cogen.tuple3(A, B, C).contramap(x => (x.a, x.b, x.c))
implicit def ordering[A: Ordering, B: Ordering, C: Ordering]: Ordering[X3[A, B, C]] =
Ordering.Tuple3[A, B, C].on(x => (x.a, x.b, x.c))
}
case class X3U[A, B, C](a: A, b: B, u: Unit, c: C)
object X3U {
implicit def arbitrary[A: Arbitrary, B: Arbitrary, C: Arbitrary]: Arbitrary[X3U[A, B, C]] =
Arbitrary(Arbitrary.arbTuple3[A, B, C].arbitrary.map(x => X3U[A, B, C](x._1, x._2, (), x._3)))
implicit def cogen[A, B, C](implicit A: Cogen[A], B: Cogen[B], C: Cogen[C]): Cogen[X3U[A, B, C]] =
Cogen.tuple3(A, B, C).contramap(x => (x.a, x.b, x.c))
implicit def ordering[A: Ordering, B: Ordering, C: Ordering]: Ordering[X3U[A, B, C]] =
Ordering.Tuple3[A, B, C].on(x => (x.a, x.b, x.c))
}
case class X4[A, B, C, D](a: A, b: B, c: C, d: D)
object X4 {
implicit def arbitrary[A: Arbitrary, B: Arbitrary, C: Arbitrary, D: Arbitrary]: Arbitrary[X4[A, B, C, D]] =
Arbitrary(Arbitrary.arbTuple4[A, B, C, D].arbitrary.map((X4.apply[A, B, C, D] _).tupled))
implicit def cogen[A, B, C, D](implicit A: Cogen[A], B: Cogen[B], C: Cogen[C], D: Cogen[D]): Cogen[X4[A, B, C, D]] =
Cogen.tuple4(A, B, C, D).contramap(x => (x.a, x.b, x.c, x.d))
implicit def ordering[A: Ordering, B: Ordering, C: Ordering, D: Ordering]: Ordering[X4[A, B, C, D]] =
Ordering.Tuple4[A, B, C, D].on(x => (x.a, x.b, x.c, x.d))
}
case class X5[A, B, C, D, E](a: A, b: B, c: C, d: D, e: E)
object X5 {
implicit def arbitrary[A: Arbitrary, B: Arbitrary, C: Arbitrary, D: Arbitrary, E: Arbitrary]: Arbitrary[X5[A, B, C, D, E]] =
Arbitrary(Arbitrary.arbTuple5[A, B, C, D, E].arbitrary.map((X5.apply[A, B, C, D, E] _).tupled))
implicit def cogen[A, B, C, D, E](implicit A: Cogen[A], B: Cogen[B], C: Cogen[C], D: Cogen[D], E: Cogen[E]): Cogen[X5[A, B, C, D, E]] =
Cogen.tuple5(A, B, C, D, E).contramap(x => (x.a, x.b, x.c, x.d, x.e))
implicit def ordering[A: Ordering, B: Ordering, C: Ordering, D: Ordering, E: Ordering]: Ordering[X5[A, B, C, D, E]] =
Ordering.Tuple5[A, B, C, D, E].on(x => (x.a, x.b, x.c, x.d, x.e))
}
case class X6[A, B, C, D, E, F](a: A, b: B, c: C, d: D, e: E, f: F)
object X6 {
implicit def arbitrary[A: Arbitrary, B: Arbitrary, C: Arbitrary, D: Arbitrary, E: Arbitrary, F: Arbitrary]: Arbitrary[X6[A, B, C, D, E, F]] =
Arbitrary(Arbitrary.arbTuple6[A, B, C, D, E, F].arbitrary.map((X6.apply[A, B, C, D, E, F] _).tupled))
implicit def cogen[A, B, C, D, E, F](implicit A: Cogen[A], B: Cogen[B], C: Cogen[C], D: Cogen[D], E: Cogen[E], F: Cogen[F]): Cogen[X6[A, B, C, D, E, F]] =
Cogen.tuple6(A, B, C, D, E, F).contramap(x => (x.a, x.b, x.c, x.d, x.e, x.f))
implicit def ordering[A: Ordering, B: Ordering, C: Ordering, D: Ordering, E: Ordering, F: Ordering]: Ordering[X6[A, B, C, D, E, F]] =
Ordering.Tuple6[A, B, C, D, E, F].on(x => (x.a, x.b, x.c, x.d, x.e, x.f))
} | adelbertc/frameless | dataset/src/test/scala/frameless/XN.scala | Scala | apache-2.0 | 4,055 |
/*
* Code Pulse: A real-time code coverage testing tool. For more information
* see http://code-pulse.com
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.codepulse.components.includes.snippet
import net.liftweb.http._
import scala.xml.NodeSeq
import scala.xml.Elem
import scala.xml.Attribute
import scala.xml.Text
import scala.xml.Null
import language.implicitConversions
trait IncludesRegistry {
trait Dependency { def asXml: NodeSeq }
case class Registration(key: String, dep: Dependency) extends Dependency { def asXml = dep.asXml }
/** Remembers the Registrations made by calling `register`. Can be used
* to dispatch XML from the registration keys.
*/
protected val registry = collection.mutable.Map.empty[String, Registration]
/** Register several dependencies under the given `key`. The arguments `dep0`, and
* `depN` are to allow the use of varargs syntax but prevent a 0-argument usage.
*/
protected def register(key: String, dep0: Dependency, depN: Dependency*): Registration = {
val reg = Registration(key, dep0 +: depN)
registry.put(key, reg)
reg
}
/** A Javascript Dependency, rendered as a `<script>` tag */
case class JS(src: String) extends Dependency {
lazy val asXml = <script class="lift:with-resource-id lift:head" type="text/javascript" src={ "/" + LiftRules.resourceServerPath + "/" + src }/>
}
/** A CSS Dependency, rendered as a stylesheet `<link>` */
case class CSS(src: String) extends Dependency {
lazy val asXml = <link class="lift:with-resource-id lift:head" href={ "/" + LiftRules.resourceServerPath + "/" + src } rel="stylesheet" type="text/css"></link>
}
/** Allows a sequence of dependencies to be treated as a single dependency */
implicit class DependencySeq(seq: Seq[Dependency]) extends Dependency {
lazy val asXml: NodeSeq = seq flatMap { _.asXml }
}
}
object Includes extends DispatchSnippet with IncludesRegistry {
/** Causes the resource server to allow the directories that this
* object intends to provide access to. This method only needs to
* be called once, but any further times will do nothing.
*/
lazy val init = {
ResourceServer.allow({
case "common" :: _ => true
case "widgets" :: _ => true
case "pages" :: _ => true
case "thirdparty" :: _ => true
})
true
}
/** Snippet dispatch looks in the `registry` for each `key`, returning
* the registration's XML representation.
*/
object dispatch extends PartialFunction[String, NodeSeq => NodeSeq] {
def isDefinedAt(key: String) = registry contains key
def apply(key: String) = { _ => registry(key).asXml }
}
/*
* Third party dependencies:
*/
val bacon = register("baconjs", JS("thirdparty/bacon/Bacon-0.7.2-min.js"))
val bootstrap = register("bootstrap", JS("thirdparty/bootstrap/js/bootstrap.min.js"), CSS("thirdparty/bootstrap/css/bootstrap.min.css"))
val colorpicker = register("colorpicker", JS("thirdparty/colorpicker/colorpicker.min.js"))
val d3 = register("d3", JS("thirdparty/d3/d3.min.js"))
val fileupload = register("jqfileupload", JS("thirdparty/fileupload/jquery.ui.widget.js"),
JS("thirdparty/fileupload/jquery.iframe-transport.js"),
JS("thirdparty/fileupload/jquery.fileupload.js"))
val fontAwesome = register("FontAwesome", CSS("thirdparty/fontawesome/css/font-awesome.min.css"))
val icomoon = register("icomoon", CSS("thirdparty/icomoon/css/style.css"))
val jquery = register("jquery", JS("thirdparty/jquery/jquery-2.0.2.min.js"))
val qtip2 = register("qtip2", JS("thirdparty/qtip2/jquery.qtip.min.js"), CSS("thirdparty/qtip2/jquery.qtip.min.css"))
val spinner = register("spinner", JS("thirdparty/spin/spin.min.js"))
val timeago = register("timeago", JS("thirdparty/timeago/jquery.timeago.js"))
val codemirror = register("codemirror",
JS("thirdparty/codemirror/lib/codemirror.js"),
CSS("thirdparty/codemirror/lib/codemirror.css"),
JS("thirdparty/codemirror/addon/scroll/simplescrollbars.js"),
CSS("thirdparty/codemirror/addon/scroll/simplescrollbars.css"),
JS("thirdparty/codemirror/addon/selection/active-line.js"),
JS("thirdparty/codemirror/addon/mode/multiplex.js"),
JS("thirdparty/codemirror/mode/meta.js"),
JS("thirdparty/codemirror/mode/clike/clike.js"),
JS("thirdparty/codemirror/mode/javascript/javascript.js"),
JS("thirdparty/codemirror/mode/xml/xml.js"),
JS("thirdparty/codemirror/mode/javascript/javascript.js"),
JS("thirdparty/codemirror/mode/css/css.js"),
JS("thirdparty/codemirror/mode/htmlmixed/htmlmixed.js"),
JS("thirdparty/codemirror/mode/htmlembedded/htmlembedded.js"),
JS("thirdparty/codemirror/mode/mllike/mllike.js"))
val underscore = register("underscore", JS("thirdparty/underscore/underscore-min.js"))
val handlebars = register("handlebars", JS("thirdparty/handlebars/handlebars.js"))
/*
* Hand-crafted-with-love dependencies:
*/
val commonJs = register("commonJs", JS("common/common.js"))
val overlay = register("overlay", spinner, JS("widgets/overlay/overlay.js"), CSS("widgets/overlay/overlay.css"))
val commonStyle = register("commonStyle", CSS("common/common.css"))
val desktopStyle = register("desktopStyle", CSS("common/desktop.css"))
val projectList = register("ProjectList", JS("pages/ProjectList/ProjectList.js"))
val projectSwitcher = register("ProjectSwitcher", JS("pages/ProjectSwitcher/ProjectSwitcher.js"), CSS("pages/ProjectSwitcher/ProjectSwitcher.css"))
val codepulseCommon = register("CodePulseCommon", JS("common/CodePulse.js"))
val downloader = register("Downloader", JS("common/Downloader.js"))
val api = register("API", JS("pages/projects/API.js"))
val codeTreemap = register("codetreemap", overlay, qtip2, JS("widgets/codetreemap/treemap.js"), CSS("widgets/codetreemap/treemap.css"))
val colorpickerTooltip = register("colorpickerTooltip", colorpicker, qtip2, JS("pages/projects/colorpicker-tooltip.js"), CSS("pages/projects/colorpicker-tooltip.css"))
val notifications = register("notifications", CSS("widgets/Notifications/Notifications.css"), JS("widgets/Notifications/Notifications.js"), JS("widgets/Notifications/PieClock.js"))
val projectInputForm = register("ProjectInputForm", CSS("pages/ProjectInputForm/ProjectInputForm.css"), JS("pages/ProjectInputForm/ProjectInputForm.js"))
val brandingStyle = register("brandingStyle", CSS("common/branding.css"))
val updates = register("updates", JS("common/UpdateController.js"), CSS("widgets/updates/updates.css"), JS("widgets/updates/updates.js"))
val sourceview = register("sourceview", JS("widgets/sourceview/sourceview.js"), CSS("widgets/sourceview/sourceview.css"), JS("widgets/sourceview/SourceDataProvider.js"))
val indexPage = register("indexPage", CSS("pages/index/index.css"))
val projectsPage = register("projectsPage",
api,
JS("pages/projects/TraceDataUpdates.js"),
JS("pages/projects/TraceStatus.js"),
JS("pages/projects/TreeData.js"),
JS("pages/projects/TraceTreeData.js"),
colorpickerTooltip,
CSS("pages/projects/PackageWidget.css"),
JS("pages/projects/PackageWidget.js"),
JS("pages/projects/PackageController.js"),
CSS("pages/projects/treemap-tooltip.css"),
JS("pages/projects/Recording.js"),
JS("pages/projects/RecordingWidget.js"),
JS("pages/projects/RecordingManager.js"),
JS("pages/projects/trace-recording-controls.js"),
CSS("pages/projects/trace-recording-controls.css"),
JS("pages/projects/editable.js"),
JS("pages/projects/projects.js"),
CSS("pages/projects/projects.css"),
JS("pages/projects/DependencyCheck.js"),
JS("pages/projects/SurfaceDetector.js"),
codemirror,
underscore,
handlebars)
val traceConnectionUI = register("TraceConnectorUI",
overlay,
CSS("widgets/TraceConnectorUI/ConnectionHelpForm.css"),
JS("widgets/TraceConnectorUI/ConnectionHelpForm.js"),
CSS("widgets/TraceConnectorUI/TraceConnectorUI.css"),
JS("widgets/TraceConnectorUI/TraceConnectorUI.js"))
} | secdec/codepulse | codepulse/src/main/scala/com/secdec/codepulse/components/includes/snippet/Includes.scala | Scala | apache-2.0 | 8,450 |
package it.trenzalore.hdfs.compactor.run
import it.trenzalore.hdfs.compactor.formats.FileFormat
import org.apache.spark.sql.{ DataFrame, SparkSession }
trait FileFormatReader {
def getDataFrame(inputFiles: Seq[String])(implicit spark: SparkSession): DataFrame
}
object ParquetReader extends FileFormatReader {
def getDataFrame(inputFiles: Seq[String])(implicit spark: SparkSession): DataFrame = {
spark.read.parquet(inputFiles: _*)
}
}
object AvroReader extends FileFormatReader {
def getDataFrame(inputFiles: Seq[String])(implicit spark: SparkSession): DataFrame = {
spark.read.format("com.databricks.spark.avro").load(inputFiles: _*)
}
} | JunkieLand/HDFS-Compactor | hdfs-compactor/src/main/scala/it/trenzalore/hdfs/compactor/run/FileFormatReader.scala | Scala | apache-2.0 | 661 |
package cmwell.analytics.data
import com.datastax.spark.connector._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
/**
* Get an RDD[String] for all infotons with duplicated system fields.
*/
object InfotonWithDuplicatedSystemFields {
def apply()
(implicit spark: SparkSession): RDD[String] = {
val infotonRdd = spark.sparkContext.cassandraTable("data2", "infoton")
.select("uuid", "field")
.where("quad = ?", "cmwell://meta/sys") // only look at system fields
.spanBy(row => row.getString("uuid"))
// Map the grouped data to Infoton objects containing only the system fields.
infotonRdd.filter { case (_, fields) =>
val fieldNames = fields.map(row => row.getString("field")).toSeq
.filter(_ != "data") // The data field can legitimately be duplicated
fieldNames.length != fieldNames.distinct.length
}
.map { case (uuid, _) => uuid }
}
}
| thomsonreuters/CM-Well | tools/dataConsistencyTool/cmwell-spark-analysis/src/main/scala/cmwell/analytics/data/InfotonWithDuplicatedSystemFields.scala | Scala | apache-2.0 | 950 |
// import scalaxb.compiler.wsdl11.{Driver}
// import java.io.{File}
// import scalaxb.compiler.Config
// import scalaxb.compiler.ConfigEntry._
// object Wsdl11Soap11AsyncTest extends TestBase {
// override val module = new Driver // with Verbose
// val packageName = "genericbarcode"
// val inFile = new File("integration/src/test/resources/genericbarcode.wsdl")
// val config = Config.default.update(PackageNames(Map(None -> Some(packageName)))).
// update(Outdir(tmp)).
// update(GenerateGigahorseClient).
// update(GeneratePackageDir)
// lazy val generated = module.process(inFile, config)
// "stockquote.scala file must compile" in {
// (List("""import genericbarcode._
// import scala.concurrent._, ExecutionContext.Implicits._
// import scala.concurrent.duration._""",
// """val service = (new BarCodeSoapBindings with scalaxb.Soap11ClientsAsync with scalaxb.DispatchHttpClientsAsync {}).service
// val data = BarCodeData(120, 120, 0, 1, 1, 20, 20, true, None, None, None, 10.0f, Both, CodeEAN128B, NoneType, BottomCenter, PNG)
// println(scalaxb.toXML(data, "BarCodeParam", defaultScope))
// val fresponse = service.generateBarCode(data, Some("1234"))
// val response = Await.result(fresponse, 5.seconds)
// println(response)""",
// """response.toString.contains("iVB")"""), generated) must evaluateTo(true,
// outdir = "./tmp", usecurrentcp = true)
// }
// "stockquote.scala file must compile with Gigahorse" in {
// (List("""import genericbarcode._
// import scala.concurrent._, ExecutionContext.Implicits._
// import scala.concurrent.duration._""",
// """val service = (new BarCodeSoapBindings with scalaxb.Soap11ClientsAsync with scalaxb.GigahorseHttpClientsAsync {}).service
// val data = BarCodeData(120, 120, 0, 1, 1, 20, 20, true, None, None, None, 10.0f, Both, CodeEAN128B, NoneType, BottomCenter, PNG)
// println(scalaxb.toXML(data, "BarCodeParam", defaultScope))
// val fresponse = service.generateBarCode(data, Some("1234"))
// val response = Await.result(fresponse, 5.seconds)
// println(response)""",
// """response.toString.contains("iVB")"""), generated) must evaluateTo(true,
// outdir = "./tmp", usecurrentcp = true)
// }
// }
| eed3si9n/scalaxb | integration/src/test/scala/Wsdl11Soap11AsyncTest.scala | Scala | mit | 2,331 |
package chapter24
/**
* 24.13 성능 특성
*
* 지금까지 설명한 내용에서 알 수 있듯이, 각기 다른 컬렉션은 각기 다른 성능 특성을 지녔다.
* 때로 성능이 어떤 컬렉션을 다른 컬렉션 대신 고르는 주요 기준이 되곤 한다.
*/
object c24_i13 {
} | seraekim/srkim-lang-scala | src/main/java/chapter24/c24_i13.scala | Scala | bsd-3-clause | 308 |
package hammock
package asynchttpclient
import cats.implicits._
import cats.effect._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.asynchttpclient._
import io.netty.handler.codec.http.{DefaultHttpHeaders, HttpHeaders}
import io.netty.handler.codec.http.cookie.Cookie
import org.scalatestplus.mockito._
import AsyncHttpClientInterpreter._
import scala.jdk.CollectionConverters._
class AsyncHttpClientInterpreterSpec extends AnyWordSpec with Matchers with MockitoSugar {
implicit val client: AsyncHttpClient = new DefaultAsyncHttpClient()
"asynchttpclient" should {
"map requests correctly" in {
val hreq1 = Get(HttpRequest(uri"http://google.com", Map.empty[String, String], None))
val req1 = mapRequest[IO](hreq1).unsafeRunSync().build()
val hreq2 = Post(HttpRequest(uri"http://google.com", Map("header1" -> "value1"), None))
val req2 = mapRequest[IO](hreq2).unsafeRunSync().build()
val hreq3 = Put(
HttpRequest(
uri"http://google.com",
Map("header1" -> "value1", "header2" -> "value2"),
Some(Entity.StringEntity("the body"))))
val req3 = mapRequest[IO](hreq3).unsafeRunSync().build()
req1.getUrl shouldEqual hreq1.req.uri.show
req1.getMethod shouldEqual "GET"
req1.getHeaders shouldBe empty
req2.getUrl shouldEqual hreq2.req.uri.show
req2.getMethod shouldEqual "POST"
req2.getHeaders.asScala.size shouldEqual hreq2.req.headers.size
req2.getHeaders.asScala.find(_.getKey == "header1").map(_.getValue) shouldEqual Some("value1")
req3.getUrl shouldEqual hreq3.req.uri.show
req3.getMethod shouldEqual "PUT"
req3.getHeaders.asScala.size shouldEqual hreq3.req.headers.size
req3.getHeaders.asScala.find(_.getKey == "header1").map(_.getValue) shouldEqual Some("value1")
req3.getHeaders.asScala.find(_.getKey == "header2").map(_.getValue) shouldEqual Some("value2")
req3.getStringData shouldEqual "the body"
}
"map responses correctly" in {
def genAHCResponse(contentType: String, headers: HttpHeaders, body: String, statusCode: Int) = new Response {
def getContentType(): String = contentType
def getCookies(): java.util.List[Cookie] = ???
def getHeader(x$1: CharSequence): String = ???
def getHeaders(): HttpHeaders = headers
def getHeaders(x$1: CharSequence): java.util.List[String] = ???
def getLocalAddress(): java.net.SocketAddress = ???
def getRemoteAddress(): java.net.SocketAddress = ???
def getResponseBody(): String = body
def getResponseBody(x$1: java.nio.charset.Charset): String = ???
def getResponseBodyAsByteBuffer(): java.nio.ByteBuffer = ???
def getResponseBodyAsBytes(): Array[Byte] = body.toCharArray.map(_.toByte)
def getResponseBodyAsStream(): java.io.InputStream = ???
def getStatusCode(): Int = statusCode
def getStatusText(): String = ???
def getUri(): org.asynchttpclient.uri.Uri = ???
def hasResponseBody(): Boolean = true
def hasResponseHeaders(): Boolean = ???
def hasResponseStatus(): Boolean = ???
def isRedirected(): Boolean = ???
}
val ahcResponse1 =
genAHCResponse("text/plain", new DefaultHttpHeaders().add("header", "value"), "this is the body", 200)
val hammockResponse1 = HttpResponse(Status.OK, Map("header" -> "value"), Entity.StringEntity("this is the body"))
val ahcResponse2 =
genAHCResponse(
"application/json",
new DefaultHttpHeaders().add("Content-type", "application/json"),
"[1,2,3,4]",
200)
val hammockResponse2 =
HttpResponse(Status.OK, Map("Content-type" -> "application/json"), Entity.StringEntity("[1,2,3,4]"))
val ahcResponse3 =
genAHCResponse(
"application/octet-stream",
new DefaultHttpHeaders().add("Content-type", "application/octet-stream"),
"[1,2,3,4]",
200)
val hammockResponse3 =
HttpResponse(
Status.OK,
Map("Content-type" -> "application/octet-stream"),
Entity.ByteArrayEntity("[1,2,3,4]".toCharArray.map(_.toByte)))
val tests = List(
ahcResponse1 -> hammockResponse1,
ahcResponse2 -> hammockResponse2,
ahcResponse3 -> hammockResponse3
)
tests foreach {
case (a, h) =>
(mapResponse[IO](a).unsafeRunSync(), h) match {
case (HttpResponse(s1, h1, e1), HttpResponse(s2, h2, e2)) =>
s1 shouldEqual s2
h1 shouldEqual h2
e1.cata(showStr, showByt, showEmpty) shouldEqual e2.cata(showStr, showByt, showEmpty)
}
}
}
}
def showStr(s: Entity.StringEntity) = s.content
def showByt(b: Entity.ByteArrayEntity) = b.content.mkString("[", ",", "]")
val showEmpty = (_: Entity.EmptyEntity.type) => ""
}
| pepegar/hammock | hammock-asynchttpclient/src/test/scala/hammock/asynchttpclient/AsyncHttpClientInterpreterSpec.scala | Scala | mit | 5,369 |
package org.scalatest.examples.freespec.info
import collection.mutable
import org.scalatest._
class SetSpec extends FreeSpec with GivenWhenThen {
"A mutable Set" - {
"should allow an element to be added" in {
Given("an empty mutable Set")
val set = mutable.Set.empty[String]
When("an element is added")
set += "clarity"
Then("the Set should have size 1")
assert(set.size === 1)
And("the Set should contain the added element")
assert(set.contains("clarity"))
info("That's all folks!")
}
}
}
| hubertp/scalatest | examples/src/main/scala/org/scalatest/examples/freespec/info/SetSpec.scala | Scala | apache-2.0 | 565 |
package io.scalac.newspaper.crawler.fetching
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import io.scalac.newspaper.crawler.fetching.FetchingFlow._
import io.scalac.newspaper.crawler.fetching.HttpFetchingFlow.FetchingConfig
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import play.api.libs.ws.StandaloneWSClient
import scala.concurrent.duration._
import scala.collection.immutable.{Seq, Set}
import scala.concurrent.{ExecutionContext, Future, TimeoutException}
class HttpFetchingFlowSpec extends TestKit(ActorSystem("test-system")) with WordSpecLike with Matchers with ScalaFutures with BeforeAndAfterAll {
implicit val materializer = ActorMaterializer()
val illegalArgumentException = new IllegalArgumentException()
override def afterAll(): Unit = TestKit.shutdownActorSystem(system)
val cut = new HttpFetchingFlow {
override def fetchingConfig: FetchingConfig = FetchingConfig(3, 10 seconds)
override def get(url: String): Future[URLFetchingResult] = url match {
case "timeout" => Future.failed(new TimeoutException())
case "exception" => Future.failed(illegalArgumentException)
case "badRequest" => Future.successful(URLNotFetched(url, 404, "BadRequest"))
case url: String => Future.successful(url2URLFetched(url))
}
override implicit val ec: ExecutionContext = system.dispatcher
override def wsClient: StandaloneWSClient = ???
}
"HttpFetchingFlow" should {
"fetch provided urls" in {
val urls = Seq("url1", "url2", "url3")
val result = Source(urls)
.via(cut.fetchURLs)
.runWith(Sink.seq)
.futureValue
result.toSet shouldEqual urls.map(url2URLFetched).toSet
}
"handle timeouts and exceptions" in {
val urls = Seq("url1", "timeout", "exception", "url2", "badRequest", "url3")
val result = Source(urls)
.via(cut.fetchURLs)
.runWith(Sink.seq)
.futureValue
result.toSet shouldEqual Set(
url2URLFetched("url1"),
url2URLFetched("url2"),
url2URLFetched("url3"),
URLNotFetched("badRequest", 404, "BadRequest"),
URLFetchingException("exception", illegalArgumentException),
URLFetchingTimeout("timeout")
)
}
}
private def url2URLFetched(url: String): URLFetched = URLFetched(url, "CONTENT")
}
| ScalaConsultants/newspaper | crawler/src/test/scala/io/scalac/newspaper/crawler/fetching/HttpFetchingFlowSpec.scala | Scala | agpl-3.0 | 2,482 |
package models
import javax.inject.{Inject, Singleton}
import models.db.Tables
import play.api.data.{Form, Mapping}
import play.api.data.Forms._
import play.api.data.validation.Constraints._
import play.api.data.validation.{Constraint, Invalid, Valid}
import services.DBService
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import utils.db.TetraoPostgresDriver.api._
case class FormDataLogin(email: String, password: String)
case class FormDataAccount(name:String, email: String, password: String, passwordAgain:String)
@Singleton
class FormData @Inject()(val database: DBService) {
val login = Form(
mapping(
"email" -> email,
"password" -> nonEmptyText
)(FormDataLogin.apply)(FormDataLogin.unapply)
)
val addMessage = Form(
mapping(
"content" -> nonEmptyText,
"tags" -> text
)(Message.formApply)(Message.formUnapply)
)
val uniqueEmail = Constraint[String] { email: String =>
//Valid
val q = Tables.Account.filter { row =>
row.email === email
}
val userFuture = database.runAsync(q.result.headOption)
Await.result(userFuture, Duration.Inf) match {
case Some(user) => Invalid("email already taken")
case None => Valid
}
}
private[this] def accountForm(passwordMapping:Mapping[String]) = Form(
mapping(
"name" -> nonEmptyText,
"email" -> email.verifying(maxLength(250), uniqueEmail),
"password" -> passwordMapping,
"passwordAgain" -> passwordMapping
)(FormDataAccount.apply)(FormDataAccount.unapply)
)
val updateAccount = accountForm(text)
val addAccount = accountForm(nonEmptyText)
}
| asciiu/halo | arbiter/app/models/FormData.scala | Scala | mit | 1,658 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.runtime
package graph
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import java.io.File
import scala.concurrent.{ Await, Future }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkConf
import com.asakusafw.runtime.directio.hadoop.HadoopDataSource
import com.asakusafw.spark.runtime._
import com.asakusafw.spark.runtime.graph.{ CacheOnce, DirectOutputSetup }
@RunWith(classOf[JUnitRunner])
class DirectOutputSetupForIterativeSpecTest extends DirectOutputSetupForIterativeSpec
class DirectOutputSetupForIterativeSpec
extends FlatSpec
with SparkForAll
with JobContextSugar
with RoundContextSugar
with TempDirForAll {
import DirectOutputSetupForIterativeSpec._
behavior of classOf[DirectOutputSetupForIterative].getSimpleName
private var root: File = _
override def configure(conf: SparkConf): SparkConf = {
root = createTempDirectoryForAll("directio-").toFile()
conf.setHadoopConf("com.asakusafw.directio.test", classOf[HadoopDataSource].getName)
conf.setHadoopConf("com.asakusafw.directio.test.path", "test")
conf.setHadoopConf("com.asakusafw.directio.test.fs.path", root.getAbsolutePath)
}
it should "delete simple" in {
implicit val jobContext = newJobContext(sc)
val rounds = 0 to 1
val files = rounds.map { round =>
val file = new File(root, s"out1_${round}/testing.bin")
file.getParentFile.mkdirs()
file.createNewFile()
file
}
val setup = new SetupOnce(new Setup(Set(("id", "test/out1_${round}", Seq("*.bin")))))
val origin = newRoundContext()
val rcs = rounds.map { round =>
newRoundContext(
stageId = s"round_${round}",
batchArguments = Map("round" -> round.toString))
}
assert(files.forall(_.exists()) === true)
Await.result(setup.perform(origin, rcs), Duration.Inf)
assert(files.exists(_.exists()) === false)
}
it should "not delete out of scope" in {
implicit val jobContext = newJobContext(sc)
val rounds = 0 to 1
val files = rounds.map { round =>
val file = new File(root, s"out2_${round}/testing.bin")
file.getParentFile.mkdirs()
file.createNewFile()
file
}
val setup = new SetupOnce(new Setup(Set(("id", "test/out2_${round}", Seq("*.txt")))))
val origin = newRoundContext()
val rcs = rounds.map { round =>
newRoundContext(
stageId = s"round_${round}",
batchArguments = Map("round" -> round.toString))
}
assert(files.forall(_.exists()) === true)
Await.result(setup.perform(origin, rcs), Duration.Inf)
assert(files.forall(_.exists()) === true)
}
it should "not delete out of scope round" in {
implicit val jobContext = newJobContext(sc)
val rounds = 0 to 1
val files = rounds.map { round =>
val file = new File(root, s"out3_${round}/testing.bin")
file.getParentFile.mkdirs()
file.createNewFile()
file
}
val setup = new SetupOnce(new Setup(Set(("id", "test/out3_${round}", Seq("*.bin")))))
val origin = newRoundContext()
val rcs = rounds.map { round =>
newRoundContext(
stageId = s"round_${round}",
batchArguments = Map("round" -> round.toString))
}
assert(files.forall(_.exists()) === true)
Await.result(setup.perform(origin, Seq(rcs.head)), Duration.Inf)
assert(files.head.exists() === false)
assert(files.tail.forall(_.exists()) === true)
Await.result(setup.perform(origin, rcs.tail), Duration.Inf)
assert(files.exists(_.exists()) === false)
}
}
object DirectOutputSetupForIterativeSpec {
class Setup(
val specs: Set[(String, String, Seq[String])])(
implicit jobContext: JobContext)
extends DirectOutputSetup with CacheAlways[RoundContext, Future[Unit]]
class SetupOnce(
setup: DirectOutputSetup)(
implicit jobContext: JobContext)
extends DirectOutputSetupForIterative(setup)
with CacheAlways[Seq[RoundContext], Future[Unit]]
}
| asakusafw/asakusafw-spark | extensions/iterativebatch/runtime/core/src/test/scala/com/asakusafw/spark/extensions/iterativebatch/runtime/graph/DirectOutputSetupForIterativeSpec.scala | Scala | apache-2.0 | 4,784 |
package moe.nightfall.srails.client.renderer.block
import moe.nightfall.srails.SRails
import net.minecraft.block.Block
import net.minecraft.client.Minecraft
import net.minecraft.client.resources.model.ModelResourceLocation
import net.minecraft.item.Item
import scala.collection.mutable
object ModelInitialization {
private val meshableItems = mutable.ArrayBuffer.empty[Item]
def init {
registerItems()
}
def registerModel(instance: Item, id: String): Unit = {
meshableItems += instance
}
def registerModel(instance: Block, id: String): Unit = {
val item = Item.getItemFromBlock(instance)
registerModel(item, id)
}
private def registerItems(): Unit = {
val modelMeshes = Minecraft.getMinecraft.getRenderItem.getItemModelMesher
for (item: Item <- meshableItems) {
val ressourceString = s"${SRails.ID}:${item.getUnlocalizedName.substring(5)}"
modelMeshes.register(item, 0, new ModelResourceLocation(ressourceString, "inventory"))
SRails.log.info(s"registering ${item.getUnlocalizedName} $ressourceString")
}
meshableItems.clear()
}
}
| Nightfall/SRails | src/main/scala/moe/nightfall/srails/client/renderer/block/ModelInitialization.scala | Scala | bsd-2-clause | 1,108 |
package com.rcirka.play.dynamodb.models.indexes
import com.rcirka.play.dynamodb.models.{ProvisionedThroughput, Projection}
import play.api.libs.json.Json
case class TableIndex (
indexName: String,
keySchema: Seq[AttributeIndex],
projection: Option[Projection] = None,
provisionedThroughput: Option[ProvisionedThroughput] = None
)
object TableIndex {
implicit val tableIndexWrites = Json.writes[TableIndex]
}
| rcirka/Play-DynamoDB | src/main/scala/com/rcirka/play/dynamodb/models/indexes/TableIndex.scala | Scala | mit | 421 |
package com.sksamuel.elastic4s.mappings
import com.sksamuel.elastic4s.anaylzers.Analyzer
import org.elasticsearch.common.xcontent.{ XContentFactory, XContentBuilder }
import scala.collection.mutable.ListBuffer
class MappingDefinition(val `type`: String) {
var _all: Option[Boolean] = None
var _source: Option[Boolean] = None
var _sourceExcludes: Iterable[String] = Nil
var date_detection: Option[Boolean] = None
var numeric_detection: Option[Boolean] = None
var _size: Option[Boolean] = None
var dynamic_date_formats: Iterable[String] = Nil
val _fields = new ListBuffer[TypedFieldDefinition]
var _analyzer: Option[String] = None
var _boostName: Option[String] = None
var _boostValue: Double = 0
var _parent: Option[String] = None
var _dynamic: Option[DynamicMapping] = None
var _meta: Map[String, Any] = Map.empty
var _routing: Option[RoutingDefinition] = None
var _timestamp: Option[TimestampDefinition] = None
var _ttl: Option[Boolean] = None
var _templates: Iterable[DynamicTemplateDefinition] = Nil
@deprecated("no longer used, simply set ttl or not", "1.5.4")
def useTtl(useTtl: Boolean): this.type = {
this
}
def all(enabled: Boolean): this.type = {
_all = Option(enabled)
this
}
def analyzer(analyzer: String): this.type = {
_analyzer = Option(analyzer)
this
}
def analyzer(analyzer: Analyzer): this.type = {
_analyzer = Option(analyzer.name)
this
}
def boost(name: String): this.type = {
_boostName = Option(name)
this
}
def boostNullValue(value: Double): this.type = {
_boostValue = value
this
}
def parent(parent: String): this.type = {
_parent = Some(parent)
this
}
def dynamic(dynamic: DynamicMapping): this.type = {
_dynamic = Option(dynamic)
this
}
@deprecated("use the DynamicMapping enum version", "1.5.5")
def dynamic(dynamic: Boolean): this.type = {
_dynamic = dynamic match {
case true => Some(DynamicMapping.Dynamic)
case false => Some(DynamicMapping.False)
}
this
}
def timestamp(enabled: Boolean,
path: Option[String] = None,
format: Option[String] = None,
default: Option[String] = None): this.type = {
this._timestamp = Some(TimestampDefinition(enabled, path, format, default))
this
}
def timestamp(timestampDefinition: TimestampDefinition): this.type = {
this._timestamp = Option(timestampDefinition)
this
}
def ttl(enabled: Boolean): this.type = {
_ttl = Option(enabled)
this
}
def dynamicDateFormats(dynamic_date_formats: String*): this.type = {
this.dynamic_date_formats = dynamic_date_formats
this
}
def meta(map: Map[String, Any]): this.type = {
this._meta = map
this
}
def routing(required: Boolean, path: Option[String] = None): this.type = {
this._routing = Some(RoutingDefinition(required, path))
this
}
def source(source: Boolean): this.type = {
this._source = Option(source)
this
}
def sourceExcludes(excludes:String*)= {
this._sourceExcludes = excludes
this
}
def dateDetection(date_detection: Boolean): this.type = {
this.date_detection = Some(date_detection)
this
}
def numericDetection(numeric_detection: Boolean): this.type = {
this.numeric_detection = Some(numeric_detection)
this
}
def fields(fields: Iterable[TypedFieldDefinition]): this.type = as(fields)
def as(iterable: Iterable[TypedFieldDefinition]): this.type = {
_fields ++= iterable
this
}
def fields(fields: TypedFieldDefinition*): this.type = as(fields: _*)
def as(fields: TypedFieldDefinition*): this.type = as(fields.toIterable)
def size(size: Boolean): this.type = {
_size = Option(size)
this
}
def dynamicTemplates(temps: Iterable[DynamicTemplateDefinition]): this.type = templates(temps)
def dynamicTemplates(temps: DynamicTemplateDefinition*): this.type = templates(temps)
def templates(temps: Iterable[DynamicTemplateDefinition]): this.type = templates(temps.toSeq:_*)
def templates(temps: DynamicTemplateDefinition*): this.type = {
_templates = temps
this
}
def build: XContentBuilder = {
val builder = XContentFactory.jsonBuilder().startObject()
build(builder)
builder.endObject()
}
def buildWithName: XContentBuilder = {
val builder = XContentFactory.jsonBuilder().startObject()
builder.startObject(`type`)
build(builder)
builder.endObject()
builder.endObject()
}
def build(json: XContentBuilder): Unit = {
for (all <- _all) json.startObject("_all").field("enabled", all).endObject()
(_source, _sourceExcludes) match{
case (_, l) if l.nonEmpty => json.startObject("_source").field("excludes", l.toArray:_*).endObject()
case (Some(source), _) => json.startObject("_source").field("enabled", source).endObject()
case _ =>
}
if (dynamic_date_formats.nonEmpty)
json.field("dynamic_date_formats", dynamic_date_formats.toArray: _*)
for (dd <- date_detection) json.field("date_detection", dd)
for (nd <- numeric_detection) json.field("numeric_detection", nd)
_dynamic.foreach(dynamic => {
json.field("dynamic", dynamic match {
case DynamicMapping.Strict => "strict"
case DynamicMapping.False => "false"
case _ => "dynamic"
})
})
_boostName.foreach(x => json.startObject("_boost").field("name", x).field("null_value", _boostValue).endObject())
_analyzer.foreach(x => json.startObject("_analyzer").field("path", x).endObject())
_parent.foreach(x => json.startObject("_parent").field("type", x).endObject())
_size.foreach(x => json.startObject("_size").field("enabled", x).endObject())
_timestamp.foreach(_.build(json))
for (ttl <- _ttl) json.startObject("_ttl").field("enabled", ttl).endObject()
if (_fields.nonEmpty) {
json.startObject("properties")
for (field <- _fields) {
field.build(json)
}
json.endObject() // end properties
}
if (_meta.nonEmpty) {
json.startObject("_meta")
for (meta <- _meta) {
json.field(meta._1, meta._2)
}
json.endObject()
}
_routing.foreach(routing => {
json.startObject("_routing").field("required", routing.required)
routing.path.foreach(path => json.field("path", path))
json.endObject()
})
if (_templates.nonEmpty) {
json.startArray("dynamic_templates")
for (template <- _templates) template.build(json)
json.endArray()
}
}
}
| tototoshi/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/mappings/MappingDefinition.scala | Scala | apache-2.0 | 6,573 |
package cgta.oscala
package util
import java.io.File
import java.io.FileOutputStream
import cgta.otest.FunSuite
//////////////////////////////////////////////////////////////
// Copyright (c) 2015 Ben Jackman
// All Rights Reserved
// please contact ben@jackman.biz
// for licensing inquiries
// Created by bjackman @ 8/12/15 3:46 PM
//////////////////////////////////////////////////////////////
object TestFileCopier extends FunSuite {
test("basic") {
val a = File.createTempFile("unit-test", "in")
a.delete()
val b = File.createTempFile("unit-test", "out")
b.delete()
def s = FileCopier.partialSync(a, b)
Closing(new FileOutputStream(a)) { os =>
s
Assert.isEquals("", Slurp.asString(b))
os.write("1".getBytesUTF8)
s
s
Assert.isEquals("1", Slurp.asString(b))
os.write("2".getBytesUTF8)
s
s
Assert.isEquals("12", Slurp.asString(b))
s
Assert.isEquals("12", Slurp.asString(b))
}
}
} | cgta/open | oscala/jvm/src/test/scala/cgta/oscala/util/TestFileCopier.scala | Scala | mit | 994 |
package aggregation
import com.github.nscala_time.time.Imports._
import com.github.tototoshi.csv.CSVReader
import spray.json._
import spray.json.DefaultJsonProtocol._
import java.io.File
import scala.collection.mutable.{Map => MuMap}
import scala.collection.immutable.{Map => ImmuMap}
import utils.DateUtils._
case class Engine(interval: Int) {
val validTimeSpan: ValidDateTimeSpan = fetchAvailableData()
val cachedData: MuMap[String, List[Itinerary]] = MuMap()
def fetchAvailableData(): ValidDateTimeSpan = {
val dataDir = new File(s"../data")
if (!dataDir.exists() || !dataDir.isDirectory()) {
throw new IllegalStateException("No data folder present!" +
" Make sure you have it and non empty!")
}
// This ensure us that .DS_STORE or other files are not taken into account
val allData = dataDir.list.withFilter(
_.startsWith("2017")).map(_.split("_")(0))
if (allData.isEmpty) {
throw new IllegalStateException("No data available! Make sure the" +
" `data` folder is non empty!")
}
val allDates = allData.map(date => DateTime.parse(date, formatterDateOnly))
// Find min and max dates that can be queried
val firstDate :: otherDates = allDates.toList
val (minDate, maxDayDate) = otherDates.foldLeft((firstDate, firstDate)){
case ((min, max), curr) =>
(if (curr < min) curr else min, if (curr > max) curr else max)
}
val maxDate = maxDayDate.lastStartIntervalOfDay(interval)
ValidDateTimeSpan(minDate, maxDate, interval)
}
def aggregate(startDate: DateTime): JsValue = {
assert(validTimeSpan.dateIsInTimeSpan(startDate))
println("Result not yet computed, needs to compute")
val dateToLoad = startDate.toLocalDate().toString()
val itineraries =
cachedData.getOrElse(dateToLoad, computeCacheValue(dateToLoad))
val res = aggregateItineraries(itineraries, startDate)
res.toJson
}
def computeCacheValue(dateToLoad: String): List[Itinerary] = {
val stops = loadDataFile(dateToLoad)
val itineraries = computeItineraries(stops)
cachedData += (dateToLoad -> itineraries)
itineraries
}
def loadDataFile(dateToLoad: String): List[Stop] = {
println("Data for day not loaded!")
val fileName = "../data/" + dateToLoad + "_out.csv"
// we need to drop the header
CSVReader.open(new File(fileName)).all().drop(1).map(row => Stop(row))
}
def computeItineraries(stops: List[Stop]): List[Itinerary] = {
val allItineraries = for {
tripUnsorted <- stops.groupBy(s => s.tripId).values.toList
} yield {
val start :: trip = tripUnsorted.sortBy(_.departureTime)
val (itineraries, _) = trip.foldLeft(List[Itinerary](), start){
case ((acc, beg), end) =>
(acc :+ Itinerary(beg, end), end)
}
itineraries
}
allItineraries.flatten.filterNot(_.path.isEmpty)
}
def aggregateItineraries(itineraries: List[Itinerary],
intStart: DateTime): ImmuMap[String, Int] = {
val res: MuMap[String, Int] = MuMap()
val intEnd = intStart + interval.minutes
itineraries.foreach {
case Itinerary(start, stop, path, amnt) =>
if (intStart < stop && intEnd > start) {
path.foreach { seg =>
val newAmount = res.getOrElse(seg, 0) + amnt
res += (seg -> newAmount)
}
}
}
res.toMap
}
}
| tOverney/ADA-Project | aggregator/src/main/scala/aggregation/Engine.scala | Scala | apache-2.0 | 3,412 |
package com.taig.tmpltr.engine.html
import com.taig.tmpltr._
import play.api.mvc.Content
class div( val attributes: Attributes, val content: Content )
extends markup.div
with Tag.Body[div, Content]
object div
extends Tag.Body.Appliable[div, Content] | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/div.scala | Scala | mit | 253 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.util
import java.io.Serializable
import java.util.{PriorityQueue => JPriorityQueue}
import scala.collection.generic.Growable
import scala.collection.JavaConverters._
/**
* Bounded priority queue. This class wraps the original PriorityQueue
* class and modifies it such that only the top K elements are retained.
* The top K elements are defined by an implicit Ordering[A].
*/
class BoundedPriorityQueue[A](maxSize: Int)(implicit ord: Ordering[A])
extends Iterable[A] with Growable[A] with Serializable {
private val underlying = new JPriorityQueue[A](maxSize, ord)
override def iterator: Iterator[A] = underlying.iterator.asScala
override def ++=(xs: TraversableOnce[A]): this.type = {
xs.foreach { this += _ }
this
}
override def +=(elem: A): this.type = {
if (size < maxSize) underlying.offer(elem)
else maybeReplaceLowest(elem)
this
}
override def +=(elem1: A, elem2: A, elems: A*): this.type = {
this += elem1 += elem2 ++= elems
}
override def clear() { underlying.clear() }
private def maybeReplaceLowest(a: A): Boolean = {
val head = underlying.peek()
if (head != null && ord.gt(a, head)) {
underlying.poll()
underlying.offer(a)
} else false
}
}
| wgpshashank/spark | core/src/main/scala/spark/util/BoundedPriorityQueue.scala | Scala | apache-2.0 | 2,054 |
/*
* Copyright 2015 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.test.week6
import akka.actor.{ PoisonPill, Props, ActorPath }
import akka.event.LoggingReceive
import akka.persistence.{ AtLeastOnceDelivery, PersistentActor }
import com.test.TestSpec
import akka.pattern.ask
class AtLeastOnceDeliveryTest extends TestSpec {
/**
* - Guaranteeing delivery means retrying until successful
* - Retries are the sender's responsibility
* - The recipient needs to acknowledge receipt
* - Lost receipts lead to duplicate deliveries
* => at-least-once, so 0, or more times delivery by the sender,
* so 0, or more times acknowledge by the recipient
*/
/**
* - At-least-once delivery needs the sender and the receiver to collaborate
* - Retrying means taking note that the message needs to be sent
* - Acknowledgement means taking note of the receipt of the confirmation
*/
/**
* - Performing the effect and persisting that it was done cannot be atomic
* - Perform it before persisting for at-least-once semantic
* - Perform it after persisting for at-most-once semantic
*
* - The choice needs to be made based on the underlying business model.
* - A processing is idempotent then using at-least-once semantic achieves
* effectively exactly-once processing
*/
sealed trait Protocol
case class PublishPost(text: String, id: Long) extends Protocol
case class PostPublished(id: Long) extends Protocol
sealed trait Event
case class PostCreated(text: String) extends Event
// the test thread will play a user that posts a message
sealed trait Api
case class NewPost(text: String, id: Long) extends Api
case class BlogPosted(id: Long) extends Api
case object NrPosted
case class NrPostedResponse(posted: Long)
// The userActor will instruct the publisher to publish a post,
// but the publisher will only do so, when it receives the PublishPost command
class UserActor(subscriber: ActorPath) extends PersistentActor with AtLeastOnceDelivery {
override def persistenceId: String = "userActor"
override def receiveCommand: Receive = LoggingReceive {
case NewPost(text, id) ⇒
persist(PostCreated(text)) { e ⇒
deliver(subscriber)(PublishPost(text, _))
sender() ! BlogPosted(id)
}
case PostPublished(id) ⇒
confirmDelivery(id)
persist(PostPublished(id))(_ ⇒ ())
}
override def receiveRecover: Receive = LoggingReceive {
case PostCreated(text) ⇒ deliver(subscriber)(PublishPost(text, _))
case PostPublished(id) ⇒ confirmDelivery(id)
}
}
class Publisher extends PersistentActor {
override val persistenceId: String = "publisher"
var expectedId = 1L
var nrPosted = 0L
override def receiveRecover: Receive = LoggingReceive {
case PostPublished(id) ⇒
expectedId = id + 1
nrPosted += 1
}
override def receiveCommand: Receive = LoggingReceive {
case PublishPost(text, id) if id > expectedId ⇒
// ignore the message, the sender will retry
case PublishPost(text, id) if id < expectedId ⇒
// already received, just confirm
sender() ! PostPublished(id)
case PublishPost(text, id) if id == expectedId ⇒
persist(PostPublished(id)) { e ⇒
sender() ! e
// modify the website
nrPosted += 1
expectedId += 1
}
case NrPosted ⇒ sender() ! NrPostedResponse(nrPosted)
}
}
"UserActor" should "Retry sending PublishPost command to Publisher" in {
var publisher = system.actorOf(Props(new Publisher), "publisher")
var userActor = system.actorOf(Props(new UserActor(publisher.path)), "userActor")
val tp = probe
tp watch publisher
tp watch userActor
(publisher ? NrPosted).futureValue shouldBe NrPostedResponse(posted = 0)
(userActor ? NewPost("foo", 1)).futureValue shouldBe BlogPosted(1)
(userActor ? NewPost("bar", 2)).futureValue shouldBe BlogPosted(2)
(publisher ? NrPosted).futureValue shouldBe NrPostedResponse(posted = 2)
publisher ! PoisonPill
tp.expectTerminated(publisher)
userActor ! PoisonPill
tp.expectTerminated(userActor)
publisher = system.actorOf(Props(new Publisher), "publisher")
userActor = system.actorOf(Props(new UserActor(publisher.path)), "userActor")
(publisher ? NrPosted).futureValue shouldBe NrPostedResponse(posted = 2)
}
}
| dnvriend/reactive-programming | src/test/scala/com/test/week6/AtLeastOnceDeliveryTest.scala | Scala | apache-2.0 | 5,037 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.kernel.socket
import java.nio.charset.Charset
import akka.actor.{ActorSelection, ActorRef, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import akka.util.ByteString
import com.ibm.spark.communication.ZMQMessage
import com.ibm.spark.kernel.protocol.v5._
import com.ibm.spark.kernel.protocol.v5.kernel.{ActorLoader, Utilities}
import com.ibm.spark.kernel.protocol.v5Test._
import Utilities._
import com.typesafe.config.ConfigFactory
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
object ShellSpec {
val config ="""
akka {
loglevel = "WARNING"
}"""
}
class ShellSpec extends TestKit(ActorSystem("ShellActorSpec", ConfigFactory.parseString(ShellSpec.config)))
with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
describe("Shell") {
val socketFactory = mock[SocketFactory]
val actorLoader = mock[ActorLoader]
val socketProbe : TestProbe = TestProbe()
when(socketFactory.Shell(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(socketProbe.ref)
val relayProbe : TestProbe = TestProbe()
val relaySelection : ActorSelection = system.actorSelection(relayProbe.ref.path)
when(actorLoader.load(SystemActorType.KernelMessageRelay)).thenReturn(relaySelection)
val shell = system.actorOf(Props(classOf[Shell], socketFactory, actorLoader))
describe("#receive") {
it("( KernelMessage ) should reply with a ZMQMessage via the socket") {
// Use the implicit to convert the KernelMessage to ZMQMessage
val MockZMQMessage : ZMQMessage = MockKernelMessage
shell ! MockKernelMessage
socketProbe.expectMsg(MockZMQMessage)
}
it("( ZMQMessage ) should forward ZMQ Strings and KernelMessage to Relay") {
// Use the implicit to convert the KernelMessage to ZMQMessage
val MockZMQMessage : ZMQMessage = MockKernelMessage
shell ! MockZMQMessage
// Should get the last four (assuming no buffer) strings in UTF-8
val zmqStrings = MockZMQMessage.frames.map((byteString: ByteString) =>
new String(byteString.toArray, Charset.forName("UTF-8"))
).takeRight(4)
val kernelMessage: KernelMessage = MockZMQMessage
relayProbe.expectMsg((zmqStrings, kernelMessage))
}
}
}
}
| codeaudit/spark-kernel | kernel/src/test/scala/com/ibm/spark/kernel/protocol/v5/kernel/socket/ShellSpec.scala | Scala | apache-2.0 | 3,034 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import java.util
import java.util.concurrent.locks.ReentrantReadWriteLock
import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener}
import kafka.network.RequestChannel.Session
import kafka.security.auth.SimpleAclAuthorizer.VersionedAcls
import kafka.server.KafkaConfig
import kafka.utils.CoreUtils.{inReadLock, inWriteLock}
import kafka.utils._
import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException}
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.security.auth.KafkaPrincipal
import scala.collection.JavaConverters._
import org.apache.log4j.Logger
import scala.util.Random
object SimpleAclAuthorizer {
//optional override zookeeper cluster configuration where acls will be stored, if not specified acls will be stored in
//same zookeeper where all other kafka broker info is stored.
val ZkUrlProp = "authorizer.zookeeper.url"
val ZkConnectionTimeOutProp = "authorizer.zookeeper.connection.timeout.ms"
val ZkSessionTimeOutProp = "authorizer.zookeeper.session.timeout.ms"
//List of users that will be treated as super users and will have access to all the resources for all actions from all hosts, defaults to no super users.
val SuperUsersProp = "super.users"
//If set to true when no acls are found for a resource , authorizer allows access to everyone. Defaults to false.
val AllowEveryoneIfNoAclIsFoundProp = "allow.everyone.if.no.acl.found"
/**
* The root acl storage node. Under this node there will be one child node per resource type (Topic, Cluster, Group).
* under each resourceType there will be a unique child for each resource instance and the data for that child will contain
* list of its acls as a json object. Following gives an example:
*
* <pre>
* /kafka-acl/Topic/topic-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
* /kafka-acl/Cluster/kafka-cluster => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
* /kafka-acl/Group/group-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
* </pre>
*/
val AclZkPath = ZkUtils.KafkaAclPath
//notification node which gets updated with the resource name when acl on a resource is changed.
val AclChangedZkPath = ZkUtils.KafkaAclChangesPath
//prefix of all the change notification sequence node.
val AclChangedPrefix = "acl_changes_"
private case class VersionedAcls(acls: Set[Acl], zkVersion: Int)
}
class SimpleAclAuthorizer extends Authorizer with Logging {
private val authorizerLogger = Logger.getLogger("kafka.authorizer.logger")
private var superUsers = Set.empty[KafkaPrincipal]
private var shouldAllowEveryoneIfNoAclIsFound = false
private var zkUtils: ZkUtils = null
private var aclChangeListener: ZkNodeChangeNotificationListener = null
private val aclCache = new scala.collection.mutable.HashMap[Resource, VersionedAcls]
private val lock = new ReentrantReadWriteLock()
// The maximum number of times we should try to update the resource acls in zookeeper before failing;
// This should never occur, but is a safeguard just in case.
protected[auth] var maxUpdateRetries = 10
private val retryBackoffMs = 100
private val retryBackoffJitterMs = 50
/**
* Guaranteed to be called before any authorize call is made.
*/
override def configure(javaConfigs: util.Map[String, _]) {
val configs = javaConfigs.asScala
val props = new java.util.Properties()
configs.foreach { case (key, value) => props.put(key, value.toString) }
superUsers = configs.get(SimpleAclAuthorizer.SuperUsersProp).collect {
case str: String if str.nonEmpty => str.split(";").map(s => KafkaPrincipal.fromString(s.trim)).toSet
}.getOrElse(Set.empty[KafkaPrincipal])
shouldAllowEveryoneIfNoAclIsFound = configs.get(SimpleAclAuthorizer.AllowEveryoneIfNoAclIsFoundProp).exists(_.toString.toBoolean)
// Use `KafkaConfig` in order to get the default ZK config values if not present in `javaConfigs`. Note that this
// means that `KafkaConfig.zkConnect` must always be set by the user (even if `SimpleAclAuthorizer.ZkUrlProp` is also
// set).
val kafkaConfig = KafkaConfig.fromProps(props, doLog = false)
val zkUrl = configs.get(SimpleAclAuthorizer.ZkUrlProp).map(_.toString).getOrElse(kafkaConfig.zkConnect)
val zkConnectionTimeoutMs = configs.get(SimpleAclAuthorizer.ZkConnectionTimeOutProp).map(_.toString.toInt).getOrElse(kafkaConfig.zkConnectionTimeoutMs)
val zkSessionTimeOutMs = configs.get(SimpleAclAuthorizer.ZkSessionTimeOutProp).map(_.toString.toInt).getOrElse(kafkaConfig.zkSessionTimeoutMs)
zkUtils = ZkUtils(zkUrl,
sessionTimeout = zkSessionTimeOutMs,
connectionTimeout = zkConnectionTimeoutMs,
JaasUtils.isZkSecurityEnabled())
zkUtils.makeSurePersistentPathExists(SimpleAclAuthorizer.AclZkPath)
loadCache()
zkUtils.makeSurePersistentPathExists(SimpleAclAuthorizer.AclChangedZkPath)
aclChangeListener = new ZkNodeChangeNotificationListener(zkUtils, SimpleAclAuthorizer.AclChangedZkPath, SimpleAclAuthorizer.AclChangedPrefix, AclChangedNotificationHandler)
aclChangeListener.init()
}
override def authorize(session: Session, operation: Operation, resource: Resource): Boolean = {
val principal = session.principal
val host = session.clientAddress.getHostAddress
val acls = getAcls(resource) ++ getAcls(new Resource(resource.resourceType, Resource.WildCardResource))
//check if there is any Deny acl match that would disallow this operation.
val denyMatch = aclMatch(session, operation, resource, principal, host, Deny, acls)
//if principal is allowed to read, write or delete we allow describe by default, the reverse does not apply to Deny.
val ops = if (Describe == operation)
Set[Operation](operation, Read, Write, Delete)
else
Set[Operation](operation)
//now check if there is any allow acl that will allow this operation.
val allowMatch = ops.exists(operation => aclMatch(session, operation, resource, principal, host, Allow, acls))
//we allow an operation if a user is a super user or if no acls are found and user has configured to allow all users
//when no acls are found or if no deny acls are found and at least one allow acls matches.
val authorized = isSuperUser(operation, resource, principal, host) ||
isEmptyAclAndAuthorized(operation, resource, principal, host, acls) ||
(!denyMatch && allowMatch)
logAuditMessage(principal, authorized, operation, resource, host)
authorized
}
def isEmptyAclAndAuthorized(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String, acls: Set[Acl]): Boolean = {
if (acls.isEmpty) {
authorizerLogger.debug(s"No acl found for resource $resource, authorized = $shouldAllowEveryoneIfNoAclIsFound")
shouldAllowEveryoneIfNoAclIsFound
} else false
}
def isSuperUser(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String): Boolean = {
if (superUsers.contains(principal)) {
authorizerLogger.debug(s"principal = $principal is a super user, allowing operation without checking acls.")
true
} else false
}
private def aclMatch(session: Session, operations: Operation, resource: Resource, principal: KafkaPrincipal, host: String, permissionType: PermissionType, acls: Set[Acl]): Boolean = {
acls.find { acl =>
acl.permissionType == permissionType &&
(acl.principal == principal || acl.principal == Acl.WildCardPrincipal) &&
(operations == acl.operation || acl.operation == All) &&
(acl.host == host || acl.host == Acl.WildCardHost)
}.exists { acl =>
authorizerLogger.debug(s"operation = $operations on resource = $resource from host = $host is $permissionType based on acl = $acl")
true
}
}
override def addAcls(acls: Set[Acl], resource: Resource) {
if (acls != null && acls.nonEmpty) {
inWriteLock(lock) {
updateResourceAcls(resource) { currentAcls =>
currentAcls ++ acls
}
}
}
}
override def removeAcls(aclsTobeRemoved: Set[Acl], resource: Resource): Boolean = {
inWriteLock(lock) {
updateResourceAcls(resource) { currentAcls =>
currentAcls -- aclsTobeRemoved
}
}
}
override def removeAcls(resource: Resource): Boolean = {
inWriteLock(lock) {
val result = zkUtils.deletePath(toResourcePath(resource))
updateCache(resource, VersionedAcls(Set(), 0))
updateAclChangedFlag(resource)
result
}
}
override def getAcls(resource: Resource): Set[Acl] = {
inReadLock(lock) {
aclCache.get(resource).map(_.acls).getOrElse(Set.empty[Acl])
}
}
override def getAcls(principal: KafkaPrincipal): Map[Resource, Set[Acl]] = {
inReadLock(lock) {
aclCache.mapValues { versionedAcls =>
versionedAcls.acls.filter(_.principal == principal)
}.filter { case (_, acls) =>
acls.nonEmpty
}.toMap
}
}
override def getAcls(): Map[Resource, Set[Acl]] = {
inReadLock(lock) {
aclCache.mapValues(_.acls).toMap
}
}
def close() {
if (aclChangeListener != null) aclChangeListener.close()
if (zkUtils != null) zkUtils.close()
}
private def loadCache() {
inWriteLock(lock) {
val resourceTypes = zkUtils.getChildren(SimpleAclAuthorizer.AclZkPath)
for (rType <- resourceTypes) {
val resourceType = ResourceType.fromString(rType)
val resourceTypePath = SimpleAclAuthorizer.AclZkPath + "/" + resourceType.name
val resourceNames = zkUtils.getChildren(resourceTypePath)
for (resourceName <- resourceNames) {
val versionedAcls = getAclsFromZk(Resource(resourceType, resourceName.toString))
updateCache(new Resource(resourceType, resourceName), versionedAcls)
}
}
}
}
def toResourcePath(resource: Resource): String = {
SimpleAclAuthorizer.AclZkPath + "/" + resource.resourceType + "/" + resource.name
}
private def logAuditMessage(principal: KafkaPrincipal, authorized: Boolean, operation: Operation, resource: Resource, host: String) {
val permissionType = if (authorized) "Allowed" else "Denied"
authorizerLogger.debug(s"Principal = $principal is $permissionType Operation = $operation from host = $host on resource = $resource")
}
/**
* Safely updates the resources ACLs by ensuring reads and writes respect the expected zookeeper version.
* Continues to retry until it succesfully updates zookeeper.
*
* Returns a boolean indicating if the content of the ACLs was actually changed.
*
* @param resource the resource to change ACLs for
* @param getNewAcls function to transform existing acls to new ACLs
* @return boolean indicating if a change was made
*/
private def updateResourceAcls(resource: Resource)(getNewAcls: Set[Acl] => Set[Acl]): Boolean = {
val path = toResourcePath(resource)
var currentVersionedAcls =
if (aclCache.contains(resource))
getAclsFromCache(resource)
else
getAclsFromZk(resource)
var newVersionedAcls: VersionedAcls = null
var writeComplete = false
var retries = 0
while (!writeComplete && retries <= maxUpdateRetries) {
val newAcls = getNewAcls(currentVersionedAcls.acls)
val data = Json.encode(Acl.toJsonCompatibleMap(newAcls))
val (updateSucceeded, updateVersion) =
if (newAcls.nonEmpty) {
updatePath(path, data, currentVersionedAcls.zkVersion)
} else {
trace(s"Deleting path for $resource because it had no ACLs remaining")
(zkUtils.conditionalDeletePath(path, currentVersionedAcls.zkVersion), 0)
}
if (!updateSucceeded) {
trace(s"Failed to update ACLs for $resource. Used version ${currentVersionedAcls.zkVersion}. Reading data and retrying update.")
Thread.sleep(backoffTime)
currentVersionedAcls = getAclsFromZk(resource)
retries += 1
} else {
newVersionedAcls = VersionedAcls(newAcls, updateVersion)
writeComplete = updateSucceeded
}
}
if(!writeComplete)
throw new IllegalStateException(s"Failed to update ACLs for $resource after trying a maximum of $maxUpdateRetries times")
if (newVersionedAcls.acls != currentVersionedAcls.acls) {
debug(s"Updated ACLs for $resource to ${newVersionedAcls.acls} with version ${newVersionedAcls.zkVersion}")
updateCache(resource, newVersionedAcls)
updateAclChangedFlag(resource)
true
} else {
debug(s"Updated ACLs for $resource, no change was made")
updateCache(resource, newVersionedAcls) // Even if no change, update the version
false
}
}
/**
* Updates a zookeeper path with an expected version. If the topic does not exist, it will create it.
* Returns if the update was successful and the new version.
*/
private def updatePath(path: String, data: String, expectedVersion: Int): (Boolean, Int) = {
try {
zkUtils.conditionalUpdatePersistentPathIfExists(path, data, expectedVersion)
} catch {
case _: ZkNoNodeException =>
try {
debug(s"Node $path does not exist, attempting to create it.")
zkUtils.createPersistentPath(path, data)
(true, 0)
} catch {
case _: ZkNodeExistsException =>
debug(s"Failed to create node for $path because it already exists.")
(false, 0)
}
}
}
private def getAclsFromCache(resource: Resource): VersionedAcls = {
aclCache.getOrElse(resource, throw new IllegalArgumentException(s"ACLs do not exist in the cache for resource $resource"))
}
private def getAclsFromZk(resource: Resource): VersionedAcls = {
val (aclJson, stat) = zkUtils.readDataMaybeNull(toResourcePath(resource))
VersionedAcls(aclJson.map(Acl.fromJson).getOrElse(Set()), stat.getVersion)
}
private def updateCache(resource: Resource, versionedAcls: VersionedAcls) {
if (versionedAcls.acls.nonEmpty) {
aclCache.put(resource, versionedAcls)
} else {
aclCache.remove(resource)
}
}
private def updateAclChangedFlag(resource: Resource) {
zkUtils.createSequentialPersistentPath(SimpleAclAuthorizer.AclChangedZkPath + "/" + SimpleAclAuthorizer.AclChangedPrefix, resource.toString)
}
private def backoffTime = {
retryBackoffMs + Random.nextInt(retryBackoffJitterMs)
}
object AclChangedNotificationHandler extends NotificationHandler {
override def processNotification(notificationMessage: String) {
val resource: Resource = Resource.fromString(notificationMessage)
inWriteLock(lock) {
val versionedAcls = getAclsFromZk(resource)
updateCache(resource, versionedAcls)
}
}
}
}
| ijuma/kafka | core/src/main/scala/kafka/security/auth/SimpleAclAuthorizer.scala | Scala | apache-2.0 | 15,877 |
import sbt._
class SBinaryProject(info: ProjectInfo) extends ParentProject(info) with NoPublish
{
// publishing
override def managedStyle = ManagedStyle.Maven
val publishTo = "Scala Tools Nexus" at "http://nexus.scala-tools.org/content/repositories/releases/"
Credentials(Path.userHome / ".ivy2" / ".credentials", log)
lazy val core = project("core", "SBinary", new CoreProject(_))
lazy val treeExample = project("examples" / "bt", "Binary Tree Example", new ExampleProject(_), core)
class ExampleProject(info: ProjectInfo) extends DefaultProject(info) with NoPublish
{
override def scratch = true
}
class CoreProject(info: ProjectInfo) extends DefaultProject(info) with TemplateProject
{
val sc =
if(buildScalaVersion.startsWith("2.7")) "org.scala-tools.testing" %% "scalacheck" % "1.6" % "test"
else "org.scala-tools.testing" % "scalacheck" % "1.7-SNAPSHOT" % "test" from("http://scalacheck.googlecode.com/files/scalacheck_2.8.0.Beta1-1.7-SNAPSHOT.jar")
override def mainResources = super.mainResources +++ "LICENSE"
}
}
trait NoPublish extends BasicManagedProject
{
override def deliverAction = publishAction
override def publishAction = task { None }
} | mikegoatly/sbinary | project/build/SBinaryProject.scala | Scala | mit | 1,215 |
package org.machine.engine.encoder.json
import org.machine.engine.graph.commands.EngineCmdResult
import org.machine.engine.graph.nodes.ElementDefinition
object ElementDefinitionJSONSerializer extends JSONSerializer[ElementDefinition]{
import net.liftweb.json._
import net.liftweb.json.JsonDSL._
def serialize(result: EngineCmdResult, results: Seq[ElementDefinition]): String = {
val json =
(
("status" -> result.status.value) ~
("errorMessage" -> result.errorMessage) ~
("ElementDefinitions" ->
results.map{ ed =>
(
("id" -> ed.id) ~
("name" -> ed.name) ~
("description" -> ed.description) ~
("properties" ->
ed.properties.map{ p =>
(
("id" -> p.id)~
("name" -> p.name)~
("type" -> p.propertyType)~
("description" -> p.description)
)
}
)
)
})
)
return prettyRender(json)
}
}
| sholloway/graph-engine | src/main/scala/org/machine/engine/encoder/json/ElementDefinitionJSONSerializer.scala | Scala | mit | 1,057 |
/*
* Copyright 2017 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.featran
import simulacrum.typeclass
import scala.annotation.implicitNotFound
/** Type class for floating point primitives. */
@implicitNotFound("Could not find an instance of FloatingPoint for ${T}")
@typeclass trait FloatingPoint[@specialized(Float, Double) T] extends Serializable {
def fromDouble(x: Double): T
}
object FloatingPoint {
implicit val floatFP: FloatingPoint[Float] = new FloatingPoint[Float] {
override def fromDouble(x: Double): Float = x.toFloat
}
implicit val doubleFP: FloatingPoint[Double] = new FloatingPoint[Double] {
override def fromDouble(x: Double): Double = x
}
/* ======================================================================== */
/* THE FOLLOWING CODE IS MANAGED BY SIMULACRUM; PLEASE DO NOT EDIT!!!! */
/* ======================================================================== */
/** Summon an instance of [[FloatingPoint]] for `T`. */
@inline def apply[T](implicit instance: FloatingPoint[T]): FloatingPoint[T] = instance
object ops {
implicit def toAllFloatingPointOps[T](target: T)(implicit tc: FloatingPoint[T]): AllOps[T] {
type TypeClassType = FloatingPoint[T]
} = new AllOps[T] {
type TypeClassType = FloatingPoint[T]
val self: T = target
val typeClassInstance: TypeClassType = tc
}
}
trait Ops[@specialized(Float, Double) T] extends Serializable {
type TypeClassType <: FloatingPoint[T]
def self: T
val typeClassInstance: TypeClassType
}
trait AllOps[@specialized(Float, Double) T] extends Ops[T]
trait ToFloatingPointOps extends Serializable {
implicit def toFloatingPointOps[T](target: T)(implicit tc: FloatingPoint[T]): Ops[T] {
type TypeClassType = FloatingPoint[T]
} = new Ops[T] {
type TypeClassType = FloatingPoint[T]
val self: T = target
val typeClassInstance: TypeClassType = tc
}
}
object nonInheritedOps extends ToFloatingPointOps
/* ======================================================================== */
/* END OF SIMULACRUM-MANAGED CODE */
/* ======================================================================== */
}
| spotify/featran | core/src/main/scala/com/spotify/featran/FloatingPoint.scala | Scala | apache-2.0 | 2,800 |
package org.fuckboi.ast
import org.objectweb.asm.MethodVisitor
import org.fuckboi.{SymbolTable}
import org.objectweb.asm.Opcodes._
import org.parboiled.errors.ParsingException
case class DeclareIntNode(variable: String, value: OperandNode) extends StatementNode {
def generate(mv: MethodVisitor, symbolTable: SymbolTable) = {
symbolTable.putVariable(variable)
value.generate(mv, symbolTable)
if (value.isInstanceOf[NumberNode] || value.isInstanceOf[VariableNode]) {
mv.visitVarInsn(ISTORE, symbolTable.getVariableAddress(variable))
}
else throw new ParsingException("CANNOT INITIALIZE INT WITH BOOLEAN VALUE")
}
} | VirenMohindra/Fuckboi | src/main/scala/org/fuckboi/ast/DeclareIntNode.scala | Scala | mit | 648 |
package colossus
import akka.util.ByteString
import colossus.metrics.MetricAddress
import org.scalatest.concurrent.Eventually
import scala.concurrent.duration._
import colossus.protocols.redis.{IntegerReply, Command}
class RedisITSpec extends BaseRedisITSpec with Eventually {
val keyPrefix = "colossusKeyIT"
"Redis key and string commands" should {
val value = ByteString("value")
"append" in {
val appKey = getKey()
val res = for {
x <- client.append(appKey, value) //should create if key doesn't exist
y <- client.append(appKey, value) //should append
} yield { (x, y) }
res.futureValue must be((5, 10))
}
"decr" in {
val key = getKey()
val res = for {
x <- client.decr(key) //should create if key doesn't exist
y <- client.decr(key)
z <- client.get(key)
} yield { (x, y, z) }
res.futureValue must be((-1, -2, Some(ByteString("-2"))))
}
"decrby" in {
val key = getKey()
val res = for {
x <- client.decrBy(key, 3) //should create if key doesn't exist
y <- client.decrBy(key, 3)
z <- client.get(key)
} yield { (x, y, z) }
res.futureValue must be((-3, -6, Some(ByteString("-6"))))
}
"del" in {
val delKey = getKey()
val res = for {
w <- client.del(delKey)
x <- client.set(delKey, value)
y <- client.del(delKey)
} yield { (w, x, y) }
res.futureValue must be((0, true, 1))
}
"exists" in {
val exKey = getKey()
val res = for {
x <- client.exists(exKey)
y <- client.set(exKey, value)
z <- client.exists(exKey)
} yield { (x, y, z) }
res.futureValue must be((false, true, true))
}
"expire" in {
val key = getKey()
val res = for {
x <- client.expire(key, 1.second) //doesn't exist, should get false
y <- client.set(key, value) //set key
z <- client.expire(key, 10.seconds) //create expiration
ttl <- client.ttl(key) //get ttl
} yield { (x, y, z, ttl >= 0) }
//not checking for the actual TTL, that value will be hard to pin down, just its existence is good
res.futureValue must be((false, true, true, true))
}
"expireat" in {
val key = getKey()
val tomorrow = (System.currentTimeMillis() / 1000) + 86400
val res = for {
x <- client.expireat(key, tomorrow)
y <- client.set(key, value)
z <- client.expireat(key, tomorrow)
ttl <- client.ttl(key)
} yield { (x, y, z, ttl >= 0) }
//not checking for the actual TTL, that value will be hard to pin down, just its existence is good
res.futureValue must be((false, true, true, true))
}
"get && set" in {
val setKey = getKey()
val res = for {
x <- client.set(setKey, value)
y <- client.get(setKey)
} yield { (x, y) }
res.futureValue must be((true, Some(value)))
}
"getOption && set" in {
val setKey = getKey()
val res = for {
x <- client.get(setKey)
y <- client.set(setKey, value)
z <- client.get(setKey)
} yield { (x, y, z) }
res.futureValue must be((None, true, Some(value)))
}
"getrange" in {
val setKey = getKey()
val res = for {
w <- client.getrange(setKey, 0, 1)
x <- client.set(setKey, value)
y <- client.getrange(setKey, 0, 1)
} yield { (w, x, y) }
res.futureValue must be((ByteString(""), true, ByteString("va")))
}
"getset" in {
val key = getKey()
val res = for {
x <- client.set(key, value)
y <- client.getset(key, ByteString("value2"))
z <- client.get(key)
} yield {
(x, y, z)
}
res.futureValue must be((true, Some(value), Some(ByteString("value2"))))
}
"incr" in {
val key = getKey()
val res = for {
x <- client.incr(key)
y <- client.incr(key)
z <- client.get(key)
} yield { (x, y, z) }
res.futureValue must be((1, 2, Some(ByteString("2"))))
}
"incrby" in {
val key = getKey()
val res = for {
x <- client.incrby(key, 10)
y <- client.incrby(key, 10)
z <- client.get(key)
} yield { (x, y, z) }
res.futureValue must be((10, 20, Some(ByteString("20"))))
}
"incrbyfloat" in {
val key = getKey()
val res = for {
x <- client.incrbyfloat(key, 10.25)
y <- client.incrbyfloat(key, 10.25)
z <- client.get(key)
} yield { (x, y, z) }
res.futureValue must be((10.25, 20.5, Some(ByteString("20.5"))))
}
"keys" in {
val key1 = getKey("colossusKeysKeysIT")
val key2 = getKey("colossusKeysKeysIT")
val res = for {
_ <- client.set(key1, value)
_ <- client.set(key2, value)
x <- client.keys(ByteString("colossusKeysKeysIT*"))
} yield {
x
}
res.futureValue.toSet must be(Set(key1, key2))
}
"mget" in {
val key1 = getKey()
val key2 = getKey()
val key3 = getKey() //intentionally left blank
val res = for {
_ <- client.set(key1, value)
_ <- client.set(key2, value)
x <- client.mget(key1, key2, key3)
} yield {
x
}
res.futureValue must be(Seq(Some(value), Some(value), None))
}
"mset" in {
val key1 = getKey()
val key2 = getKey()
val res = for {
x <- client.mset(key1, value, key2, value)
y <- client.mget(key1, key2)
} yield {
(x, y)
}
res.futureValue must be((true, Seq(Some(value), Some(value))))
}
"msetnx" in {
val key1 = getKey()
val key2 = getKey()
val key3 = getKey()
val res = for {
x <- client.msetnx(key1, value, key2, value) //should work, both keys don't exist
y <- client.msetnx(key1, value, key3, value) //should not work, one key is already set
z <- client.mget(key1, key2, key3)
} yield {
(x, y, z)
}
res.futureValue must be((true, false, Seq(Some(value), Some(value), None)))
}
"persist" in {
val key1 = getKey()
val res = for {
w <- client.persist(key1) //non existent, should be false
x <- client.setex(key1, value, 10.minutes) //set value
y <- client.persist(key1) //should remove ttl
z <- client.ttl(key1) //should not be set
} yield {
(w, x, y, z >= 0)
}
res.futureValue must be((false, true, true, false))
}
"pexpire" in {
val key = getKey()
val res = for {
x <- client.pexpire(key, 1.second) //doesn't exist, should get false
y <- client.set(key, value) //set key
z <- client.pexpire(key, 10.seconds) //create expiration
ttl <- client.ttl(key) //get ttl
} yield { (x, y, z, ttl >= 0) }
//not checking for the actual TTL, that value will be hard to pin down, just its existence is good
res.futureValue must be((false, true, true, true))
}
"pexpireat" in {
val key = getKey()
val tomorrow = System.currentTimeMillis() + 86400
val res = for {
x <- client.pexpireat(key, tomorrow)
y <- client.set(key, value)
z <- client.pexpireat(key, tomorrow)
ttl <- client.ttl(key)
} yield { (x, y, z, ttl >= 0) }
//not checking for the actual TTL, that value will be hard to pin down, just its existence is good
res.futureValue must be((false, true, true, true))
}
"psetex" in {
val setexKey = getKey()
val res = for {
w <- client.ttl(setexKey)
x <- client.psetex(setexKey, value, 10.seconds)
y <- client.get(setexKey)
z <- client.ttl(setexKey) //can't test for exact value
} yield { (w >= 0, x, y, z >= 0) }
res.futureValue must be((false, true, Some(value), true))
}
"pttl" in {
val key = getKey()
val res = for {
w <- client.pttl(key)
x <- client.setex(key, value, 10.seconds)
y <- client.get(key)
z <- client.pttl(key) //can't test for exact value
} yield { (w >= 0, x, y, z >= 0) }
res.futureValue must be((false, true, Some(value), true))
}
"randomkey" in {
val key = getKey()
val key2 = getKey()
val res = for {
_ <- client.mset(key, value, key2, value)
x <- client.randomkey()
} yield {
x.isDefined
}
res.futureValue must be(true)
}
"rename" in {
val key = getKey()
val key2 = getKey()
val res = for {
_ <- client.set(key, value)
x <- client.rename(key, key2)
y <- client.mget(key, key2)
} yield {
(x, y)
}
res.futureValue must be((true, Seq(None, Some(value))))
}
"renamenx" in {
val key = getKey()
val key2 = getKey()
val key3 = getKey()
val res = for {
_ <- client.mset(key, value, key2, value)
x <- client.renamenx(key, key2) //should fail
y <- client.renamenx(key, key3)
z <- client.mget(key, key2, key3)
} yield {
(x, y, z)
}
res.futureValue must be((false, true, Seq(None, Some(value), Some(value))))
}
"set (basic)" in {
val key = getKey()
client.set(key, value).futureValue must be(true)
}
"set (with NX)" in {
val key = getKey()
val key2 = getKey()
val res = for {
x <- client.set(key, value)
y <- client.set(key, value, notExists = true)
z <- client.set(key2, value, notExists = true)
} yield (x, y, z)
res.futureValue must be((true, false, true))
}
"set (with EX)" in {
val key = getKey()
val key2 = getKey()
val res = for {
x <- client.set(key, value)
y <- client.set(key, value, exists = true)
z <- client.set(key2, value, exists = true)
} yield (x, y, z)
res.futureValue must be((true, true, false))
}
"set (with ttl)" in {
val key = getKey()
val res = for {
x <- client.set(key, value, ttl = 10.seconds)
y <- client.ttl(key)
} yield (x, y >= 0 && y <= 10)
res.futureValue must be((true, true))
}
"setnx" in {
val setnxKey = getKey()
val res = for {
x <- client.setnx(setnxKey, value)
y <- client.get(setnxKey)
z <- client.setnx(setnxKey, value)
} yield { (x, y, z) }
res.futureValue must be((true, Some(value), false))
}
"setex && ttl" in {
val setexKey = getKey()
val res = for {
w <- client.ttl(setexKey)
x <- client.setex(setexKey, value, 10.seconds)
y <- client.get(setexKey)
z <- client.ttl(setexKey) //can't test for exact value
} yield { (w >= 0, x, y, z >= 0) }
res.futureValue must be((false, true, Some(value), true))
}
"strlen" in {
val strlenKey = getKey()
val res = for {
w <- client.strlen(strlenKey)
x <- client.set(strlenKey, value)
y <- client.strlen(strlenKey)
} yield { (w, x, y) }
res.futureValue must be(0, true, 5)
}
"generic command" in {
client.send(Command.c("DBSIZE")).map {
case IntegerReply(x) => //sweet
case other => throw new Exception(s"bad response! $other")
}
}
"tag requests with operation" in {
client.set(getKey(), value).futureValue
eventually {
val requests = metricSystem
.collectionIntervals(1.second)
.last(MetricAddress("redis/requests/count"))
assert(requests.nonEmpty)
requests.keys.foreach { tags =>
assert(tags.contains("op"))
}
}
}
}
}
| tumblr/colossus | colossus-tests/src/it/scala/colossus/RedisITSpec.scala | Scala | apache-2.0 | 11,985 |
object MainProperties extends BuildConf {
val fileName = ".main.build.conf"
val organization = getString("main.organization", "nooostab")
val name = getString("main.name", "spark-notebook")
} | radek1st/spark-notebook | project/MainProperties.scala | Scala | apache-2.0 | 206 |
package com.twitter.util.validation
import com.twitter.util.validation.engine.ConstraintViolationHelper
import jakarta.validation.ConstraintViolation
import java.lang.annotation.Annotation
import org.junit.runner.RunWith
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.junit.JUnitRunner
import scala.reflect.runtime.universe._
object AssertViolationTest {
case class WithViolation[T](
path: String,
message: String,
invalidValue: Any,
rootBeanClazz: Class[T],
root: T,
annotation: Option[Annotation] = None)
}
@RunWith(classOf[JUnitRunner])
abstract class AssertViolationTest extends AnyFunSuite with Matchers {
import AssertViolationTest._
protected def validator: ScalaValidator
protected def assertViolations[T: TypeTag](
obj: T,
groups: Seq[Class[_]] = Seq.empty,
withViolations: Seq[WithViolation[T]] = Seq.empty
): Unit = {
val violations: Set[ConstraintViolation[T]] = validator.validate(obj, groups: _*)
assertViolations(violations, withViolations)
}
protected def assertViolations[T: TypeTag](
violations: Set[ConstraintViolation[T]],
withViolations: Seq[WithViolation[T]]
): Unit = {
violations.size should equal(withViolations.size)
val sortedViolations: Seq[ConstraintViolation[T]] =
ConstraintViolationHelper.sortViolations(violations)
for ((constraintViolation, index) <- sortedViolations.zipWithIndex) {
val withViolation = withViolations(index)
constraintViolation.getMessage should equal(withViolation.message)
if (constraintViolation.getPropertyPath == null) withViolation.path should be(null)
else constraintViolation.getPropertyPath.toString should equal(withViolation.path)
constraintViolation.getInvalidValue should equal(withViolation.invalidValue)
constraintViolation.getRootBeanClass should equal(withViolation.rootBeanClazz)
withViolation.root.getClass.getName == constraintViolation.getRootBean
.asInstanceOf[T].getClass.getName should be(true)
constraintViolation.getLeafBean should not be null
withViolation.annotation.foreach { ann =>
constraintViolation.getConstraintDescriptor.getAnnotation should equal(ann)
}
}
}
}
| twitter/util | util-validator/src/test/scala/com/twitter/util/validation/AssertViolationTest.scala | Scala | apache-2.0 | 2,288 |
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.jubaql_server.processor
import RunMode.Development
import scala.concurrent.future
import scala.concurrent.ExecutionContext.Implicits.global
import org.apache.spark.SparkContext
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.hadoop.io.{Text, LongWritable}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.hadoop.fs.Path
import com.typesafe.scalalogging.slf4j.LazyLogging
import scala.util.matching.Regex
import org.apache.spark.streaming.dstream.{OrderedFileInputDStream, ConstantInputDStream, DStream}
import org.apache.spark.rdd.RDD
import java.io.File
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.SparkContext._
import kafka.serializer.StringDecoder
import scala.collection.mutable.Queue
import org.apache.spark.sql.{SchemaRDD, SQLContext}
import org.apache.spark.sql.catalyst.types.StructType
import org.json4s.JValue
import org.json4s.native.JsonMethods._
// "struct" holding the number of processed items, runtime in ms and largest seen id
case class ProcessingInformation(itemCount: Long, runtime: Long, maxId: Option[String])
// an object describing the state of the processor
sealed trait ProcessorState
case object Initialized extends ProcessorState
case object Running extends ProcessorState
case object Finished extends ProcessorState
class HybridProcessor(sc: SparkContext,
sqlc: SQLContext,
storageLocation: String,
streamLocations: List[String],
runMode: RunMode = RunMode.Development,
checkpointDir: String = "file:///tmp/spark")
extends LazyLogging {
/*
* We want to do processing of static data first, then continue with
* stream data. Various approaches are thinkable:
* 1. Create a HybridDStream as a subclass of InputDStream,
* 2. create a HybridReceiver as a subclass of Receiver and turn
* it into a DStream by means of StreamingContext.receiveStream(),
* 3. process static and stream data one after another using two
* different StreamingContexts.
*
* A receiver must implement onStart(), onStop() and write the received data
* to Spark's pipeline from a separate thread using the store() method. This
* works nicely with the existing receivers such as KafkaReceiver, but custom
* code is necessary to work with HDFS files and it might be tough to get the
* parallel reading done right.
* <http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.streaming.receiver.Receiver>
*
* An InputDStream must implement start(), stop(), and compute(time) to generate
* an RDD with data collected in a certain interval. However, there seems to be
* a subtle difference between an InputDStream running on a driver and a
* ReceiverInputDStream that runs a receiver on worker nodes. It seems difficult
* to write one DStream class that gets the parallelism in HDFS and stream
* processing right.
* <http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.streaming.dstream.InputDStream>
* <http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.streaming.dstream.ReceiverInputDStream>
*
* Therefore we use two different StreamingContexts with one doing processing
* of static data, the other one streaming data.
*/
require(streamLocations.size <= 1,
"More than one stream location is not supported at the moment.")
// find the number of workers available to us.
val _runCmd = scala.util.Properties.propOrElse("sun.java.command", "")
val _master = sc.getConf.get("spark.master", "")
val numCoresRe = ".*--executor-cores ([0-9]+) --num-executors ([0-9]+).*".r
val totalNumCores = _runCmd match {
case numCoresRe(coresPerExecutor, numExecutors) =>
coresPerExecutor.toInt * numExecutors.toInt
case _ =>
0
}
if (totalNumCores > 0)
logger.debug("total number of cores: " + totalNumCores)
else
logger.warn("could not extract number of cores from run command: " + _runCmd)
/// define the STORAGE sources that we can use
// a file in the local file system (must be accessible by all executors)
val fileRe = """file://(.+)""".r
// a file in HDFS
val hdfsRe = """(hdfs://.+)""".r
// an empty data set
val emptyRe = """^empty(.?)""".r
/// define the STREAM sources that we can use
// a Kafka message broker (host:port/topic/groupid)
val kafkaRe = """kafka://([^/]+)/([^/]+)/([^/]+)$""".r
// endless dummy JSON data
val dummyRe = """^dummy(.?)""".r
val validStaticLocations: List[Regex] = emptyRe :: fileRe :: hdfsRe :: Nil
val validStreamLocations: List[Regex] = dummyRe :: kafkaRe :: Nil
// check if storageLocation matches one of the valid regexes
if (!validStaticLocations.exists(_.findFirstIn(storageLocation).isDefined)) {
throw new IllegalArgumentException(s"'$storageLocation' is not a valid storage " +
"specification")
}
// check if all given streamLocations match one of the valid regexes
val badStreamLocations = streamLocations.filter(loc =>
!validStreamLocations.exists(_.findFirstIn(loc).isDefined))
badStreamLocations collectFirst {
case loc =>
throw new IllegalArgumentException(s"'$loc' is not a valid stream specification")
}
type IdType = String
// Holds the current streaming context
protected var ssc_ : StreamingContext = null
def currentStreamingContext() = ssc_
// Flag that stores whether static data processing completed successfully
protected var staticProcessingComplete = false
// Flag that stores whether user stopped data processing manually
protected var userStoppedProcessing = false
// state of the processor
protected var _state: ProcessorState = Initialized
protected def setState(newState: ProcessorState) = synchronized {
_state = newState
}
def state: ProcessorState = synchronized {
_state
}
/**
* Start hybrid processing using the given RDD[JValue] operation.
*
* The stream data will be parsed into a JValue (if possible) and the
* transformation is expected to act on the resulting RDD[JValue].
* Note that *as opposed to* the `start(SchemaRDD => SchemaRDD)` version,
* if the input RDD is empty, the function will still be executed.
*
* @param process an RDD operation that will be performed on each batch
* @return one function to stop processing and one to get the highest IDs seen so far
*/
def startJValueProcessing(process: RDD[JValue] => Unit): (() => (ProcessingInformation, ProcessingInformation),
() => Option[IdType]) = {
val parseJsonStringIntoOption: (String => Traversable[JValue]) = line => {
val maybeJson = parseOpt(line)
if (maybeJson.isEmpty) {
// logger is not serializable, therefore use println
println("[ERROR] unparseable JSON: " + line)
}
maybeJson
}
// parse DStream[String] into DStream[JValue] item by item,
// skipping unparseable strings
val parseJsonDStream = (stream: DStream[String]) =>
stream.flatMap(parseJsonStringIntoOption)
val processJsonDStream: DStream[JValue] => Unit =
_.foreachRDD(process)
// start processing
_start(parseJsonDStream, processJsonDStream)
}
/**
* Start hybrid processing using the given SchemaRDD operation.
*
* The stream data will be equipped with a schema (either as passed
* as a parameter or as inferred by `SQLContext.jsonRDD()`) and the
* operation is expected to act on the resulting SchemaRDD.
* Note that if the RDD is empty, the given function will not be
* executed at all (not even with an empty RDD as a parameter).
*
* @param process an RDD operation that will be performed on each batch
* @return one function to stop processing and one to get the highest IDs seen so far
*/
def startTableProcessing(process: SchemaRDD => Unit,
schema: Option[StructType]): (() => (ProcessingInformation, ProcessingInformation),
() => Option[IdType]) = {
// parse DStream[String] into a row/column shaped stream
val parseJson: DStream[String] => SchemaDStream = schema match {
case Some(givenSchema) =>
SchemaDStream.fromStringStreamWithSchema(sqlc, _, givenSchema, None)
case None =>
SchemaDStream.fromStringStream(sqlc, _, None)
}
// We must only execute the process function if the RDD is non-empty.
// For inferred schema method, if the RDD is empty then the schema
// will be empty, too. For given schema method, we have to check
// the actual count (which is more expensive).
val processIfNotEmpty: SchemaRDD => Unit = schema match {
case Some(givenSchema) =>
rdd => if (rdd.count() > 0) process(rdd)
case None =>
rdd => if (rdd.schema.fields.size > 0) process(rdd)
}
val processStream: SchemaDStream => Unit =
_.foreachRDD(processIfNotEmpty)
_start[SchemaDStream](parseJson, processStream)
}
/**
* Start hybrid processing using the given SchemaRDD operation.
*
* The stream data will be equipped with a schema (either as passed
* as a parameter or as inferred by `SQLContext.jsonRDD()`) and the
* operation is expected to act on the resulting SchemaDStream.
* The function is responsible for triggering output operations.
*
* @param process a function to transform and operate on the main DStream
* @return one function to stop processing and one to get the highest IDs seen so far
*/
def startTableProcessingGeneral(process: SchemaDStream => Unit,
schema: Option[StructType],
inputStreamName: String): (() => (ProcessingInformation,
ProcessingInformation), () => Option[IdType]) = {
// parse DStream[String] into a row/column shaped stream
val parseJson: DStream[String] => SchemaDStream = schema match {
case Some(givenSchema) =>
SchemaDStream.fromStringStreamWithSchema(sqlc, _, givenSchema, Some(inputStreamName))
case None =>
SchemaDStream.fromStringStream(sqlc, _, Some(inputStreamName))
}
_start[SchemaDStream](parseJson, process)
}
/**
* Start hybrid processing using the given operation.
*
* The function passed in must operate on an RDD[String] (the stream data
* to be processed in a single batch), where each item of the RDD can be
* assumed to be JSON-encoded. The function *itself* is responsible to
* start computation (e.g. by using `rdd.foreach()` or `rdd.count()`).
* As that function can do arbitrary (nested and chained) processing, the
* notion of "number of processed items" makes only limited sense; we
* work with the "number of input items" instead.
*
* @param parseJson a function to get the input stream into something processable,
* like `DStream[String] => DStream[JValue]` or
* `DStream[String] => SchemaDStream`. "processable" means
* that there is a `foreachRDD()` method matching the
* parameter type of the `process()` function.
* (This is applied duck typing!)
* @param process the actual operations on the parsed data stream. Note that
* this function is responsible for calling an output operation.
* @tparam T the type of RDD that the parsed stream will allow processing on,
* like `RDD[JValue]` or `SchemaRDD`
* @return one function to stop processing and one to get the highest IDs seen so far
*/
protected def _start[T](parseJson: DStream[String] => T,
process: T => Unit):
(() => (ProcessingInformation, ProcessingInformation), () => Option[IdType]) = {
if (state != Initialized) {
val msg = "processor cannot be started in state " + state
logger.error(msg)
throw new RuntimeException(msg)
}
setState(Running)
logger.debug("creating StreamingContext for static data")
/* In order for updateStreamByKey() to work, we need to enable RDD checkpointing
* by setting a checkpoint directory. Note that this is different from enabling
* Streaming checkpointing (which would be needed for driver fault-tolerance),
* which would require the whole state of the application (in particular, all
* functions in stream.foreachRDD(...) calls) to be serializable. This would
* mean a rewrite of large parts of code, if it is possible at all.
* Also see <https://www.mail-archive.com/user%40spark.apache.org/msg22150.html>.
*/
sc.setCheckpointDir(checkpointDir)
ssc_ = new StreamingContext(sc, Seconds(2))
// this has to match our jubaql_timestamp inserted by fluentd
val timestampInJsonRe = """ *"jubaql_timestamp": ?"([0-9\\-:.T]+)" *""".r
// Extract a jubaql_timestamp field from a JSON-shaped string and return it.
val extractId: String => IdType = item => {
timestampInJsonRe.findFirstMatchIn(item) match {
case Some(aMatch) =>
val id = aMatch.group(1)
id
case None =>
""
}
}
// create the static data source
val staticData: DStream[String] = storageLocation match {
/* Notes:
* 1. We have to use fileStream instead of textFileStream because we need
* to pass in newFilesOnly=false.
* 2. The implementation of fileStream will process all existing files
* in the first batch (which will maybe take a very long time). If
* a new file appears during that processing, it will be added to the
* batch of the time when it appeared. It may be worth considering a
* different implementation using a Queue that only enqueues new files
* when all previous processing is done, but we need to closely examine
* the behavior for very long batch processing times before deciding
* on that.
* 3. We have no guarantee about the order of files when using the standard
* FileInputDStream, since it uses o.a.h.f.FileSystem.listStatus() under
* the hood (that is knowledge we should not actually use) and there
* doesn't seem to be any contract about order of files. Our custom
* OrderedFileInputDStream adds that ordering.
* 4. Files that are currently being appended to seem to be read as well
* by the standard FileInputDStream. We do *not* want that, since
* such a file would be marked as "processed" and the next file that
* appears would be picked up, even though we did not process all
* its contents. Therefore we use a custom OrderedFileInputDStream
* that ignores files that received updates recently.
*/
case emptyRe(something) =>
val queue: Queue[RDD[String]] = new Queue()
ssc_.queueStream(queue)
case fileRe(filepath) =>
val realpath = if (filepath.startsWith("/")) {
filepath
} else {
(new File(".")).getAbsolutePath + "/" + filepath
}
new OrderedFileInputDStream[LongWritable, Text, TextInputFormat](ssc_,
"file://" + realpath,
(path: Path) => true,
false).map(_._2.toString)
case hdfsRe(filepath) =>
new OrderedFileInputDStream[LongWritable, Text, TextInputFormat](ssc_,
filepath,
(path: Path) => true,
false).map(_._2.toString)
}
logger.debug("static data DStream: " + staticData)
// keep track of the maximal ID seen during processing
val maxStaticId = sc.accumulator[Option[IdType]](None)(new MaxOptionAccumulatorParam[IdType])
val countStatic = sc.accumulator(0L)
val maxStreamId = sc.accumulator[Option[IdType]](None)(new MaxOptionAccumulatorParam[IdType])
val countStream = sc.accumulator(0L)
// processing of static data
val repartitionedData = if (_master == "yarn-cluster" && totalNumCores > 0) {
// We repartition by (numExecutors * executorCores) to get just the
// right level of parallelism.
logger.info(s"repartitioning for $totalNumCores workers")
staticData.repartition(totalNumCores)
} else {
logger.debug("not repartitioning")
staticData
}
// first find the maximal ID in the data and count it
repartitionedData.map(item => {
val id = extractId(item)
// update maximal ID
maxStaticId += Some(id)
id
}).foreachRDD(rdd => {
val count = rdd.count()
// we count the number of total input rows (on the driver)
countStatic += count
// stop processing of static data if there are no new files
if (count == 0) {
logger.info(s"processed $count (static) lines, looks like done")
synchronized {
staticProcessingComplete = true
}
} else {
logger.info(s"processed $count (static) lines")
}
})
// now do the actual processing
val mainStream = parseJson(repartitionedData)
process(mainStream)
// start first StreamingContext
logger.info("starting static data processing")
val staticStartTime = System.currentTimeMillis()
var staticRunTime = 0L
var streamStartTime = -1L
var streamRunTime = 0L
ssc_.start()
val staticStreamingContext = ssc_
// start one thread that waits for static data processing to complete
future {
logger.debug("hello from thread to wait for completion of static processing")
// If *either* the static data processing completed successfully,
// *or* the staticStreamingContext finished for some other reason
// (we measure this by the execution time of awaitTermination(timeout))
// we stop the streaming context.
val timeToWait = 200L
val logEveryNLoops = 5
var i = 0
var staticProcessingStillRunning = true
while (!staticProcessingComplete && staticProcessingStillRunning) {
val timeBeforeWaiting = System.currentTimeMillis()
if (i == logEveryNLoops) {
logger.debug("waiting for static data processing to complete")
i = 0
} else {
i += 1
}
staticStreamingContext.awaitTermination(timeToWait)
val timeAfterWaiting = System.currentTimeMillis()
val actuallyWaitedTime = timeAfterWaiting - timeBeforeWaiting
staticProcessingStillRunning = actuallyWaitedTime >= timeToWait
}
if (staticProcessingComplete) {
logger.info("static data processing completed successfully, " +
"stopping StreamingContext")
} else {
logger.warn("static data processing ended, but did not complete")
}
staticStreamingContext.stop(stopSparkContext = false, stopGracefully = true)
logger.debug("bye from thread to wait for completion of static processing")
} onFailure {
case error: Throwable =>
logger.error("Error while waiting for static processing end", error)
}
// start one thread that waits for the first StreamingContext to terminate
future {
// NB. This is a separate thread. In functions that will be serialized,
// you cannot necessarily use variables from outside this thread.
val localExtractId = extractId
val localCountStream = countStream
val localMaxStreamId = maxStreamId
logger.debug("hello from thread to start stream processing")
staticStreamingContext.awaitTermination()
// If we arrive here, the static processing is done, either by failure
// or user termination or because all processing was completed. We want
// to continue with real stream processing only if the static processing
// was completed successfully.
val largestStaticItemId = maxStaticId.value
staticRunTime = System.currentTimeMillis() - staticStartTime
logger.debug("static processing ended after %d items and %s ms, largest seen ID: %s".format(
countStatic.value, staticRunTime, largestStaticItemId))
logger.debug("sleeping a bit to allow Spark to settle")
runMode match {
case Development =>
Thread.sleep(200)
case _ =>
// If we don't sleep long enough here, then old/checkpointed RDDs
// won't be cleaned up in time before the next process starts. For
// some reason, this happens only with YARN.
Thread.sleep(8000)
}
if (staticProcessingComplete && !userStoppedProcessing) {
logger.info("static processing completed successfully, setting up stream")
streamLocations match {
case streamLocation :: Nil =>
// set up stream processing
logger.debug("creating StreamingContext for stream data")
ssc_ = new StreamingContext(sc, Seconds(2))
val allStreamData: DStream[(IdType, String)] = (streamLocation match {
case dummyRe(nothing) =>
// dummy JSON data emitted over and over (NB. the timestamp
// is not increasing over time)
val dummyData = sc.parallelize(
"""{"gender":"m","age":26,"jubaql_timestamp":"2014-11-21T15:52:21.943321112"}""" ::
"""{"gender":"f","age":24,"jubaql_timestamp":"2014-11-21T15:52:22"}""" ::
"""{"gender":"m","age":31,"jubaql_timestamp":"2014-11-21T15:53:21.12345"}""" ::
Nil)
new ConstantInputDStream(ssc_, dummyData)
case kafkaRe(zookeeper, topics, groupId) =>
// connect to the given Kafka instance and receive data
val kafkaParams = Map[String, String](
"zookeeper.connect" -> zookeeper, "group.id" -> groupId,
"auto.offset.reset" -> "smallest")
KafkaUtils.createStream[String, String,
StringDecoder, StringDecoder](ssc_, kafkaParams,
Map(topics -> 2),
// With MEMORY_ONLY, we seem to run out of memory quickly
// when processing is slow. Much worse: There is no space
// left for broadcast variables, so we cannot communicate
// our "runState = false" information.
StorageLevel.DISK_ONLY).map(_._2)
}).map(item => (localExtractId(item), item))
val streamData = (largestStaticItemId match {
case Some(largestId) =>
// only process items with a strictly larger id than what we
// have seen so far
logger.info("filtering for items with an id larger than " + largestId)
allStreamData.filter(itemWithId => {
itemWithId._1 > largestId
})
case None =>
// don't do any ID filtering if there is no "largest id"
logger.info("did not see any items in static processing, " +
"processing whole stream")
allStreamData
}).map(itemWithId => {
// remember the largest seen ID
localMaxStreamId += Some(itemWithId._1)
itemWithId._2
})
logger.debug("stream data DStream: " + streamData)
streamData.foreachRDD(rdd => {
val count = rdd.count()
// we count the number of total processed rows (on the driver)
localCountStream += count
logger.info(s"processed $count (stream) lines")
})
// now do the actual processing
val mainStream = parseJson(streamData)
process(mainStream)
// start stream processing
synchronized {
if (userStoppedProcessing) {
logger.info("processing was stopped by user during stream setup, " +
"not starting")
setState(Finished)
} else {
logger.info("starting stream processing")
streamStartTime = System.currentTimeMillis()
ssc_.start()
}
}
case Nil =>
logger.info("not starting stream processing " +
"(no stream source given)")
setState(Finished)
case _ =>
logger.error("not starting stream processing " +
"(multiple streams not implemented)")
setState(Finished)
}
} else if (staticProcessingComplete && userStoppedProcessing) {
logger.info("static processing was stopped by user, " +
"not setting up stream")
setState(Finished)
} else {
logger.warn("static processing did not complete successfully, " +
"not setting up stream")
setState(Finished)
}
logger.debug("bye from thread to start stream processing")
} onFailure {
case error: Throwable =>
logger.error("Error while setting up stream processing", error)
setState(Finished)
}
// return a function to stop the data processing
(() => {
logger.info("got shutdown request from user")
synchronized {
userStoppedProcessing = true
}
logger.debug("now stopping the StreamingContext")
currentStreamingContext.stop(stopSparkContext = false, stopGracefully = true)
logger.debug("done stopping the StreamingContext")
// if stream processing was not started or there was a runtime already
// computed, we don't update the runtime
if (streamStartTime > 0 && streamRunTime == 0) {
streamRunTime = System.currentTimeMillis() - streamStartTime
}
logger.info(("processed %s items in %s ms (static) and %s items in " +
"%s ms (stream)").format(countStatic.value, staticRunTime,
countStream.value, streamRunTime))
setState(Finished)
(ProcessingInformation(countStatic.value, staticRunTime, maxStaticId.value),
ProcessingInformation(countStream.value, streamRunTime, maxStreamId.value))
}, () => maxStaticId.value)
}
/**
* Allows the user to wait for termination of the processing.
* If an exception happens during processing, an exception will be thrown here.
*/
def awaitTermination() = {
logger.debug("user is waiting for termination ...")
try {
ssc_.awaitTermination()
setState(Finished)
} catch {
case e: Throwable =>
logger.warn("StreamingContext threw an exception (\\"%s\\"), shutting down".format(
e.getMessage))
// when we got an exception, clean up properly
ssc_.stop(stopSparkContext = false, stopGracefully = true)
setState(Finished)
logger.info(s"streaming context was stopped after exception")
throw e
}
}
}
| jubatus/jubaql-server | processor/src/main/scala/us/jubat/jubaql_server/processor/HybridProcessor.scala | Scala | lgpl-2.1 | 27,725 |
package com.houseofmoran.twitter.lang
import org.apache.spark.sql.{SaveMode, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
object ConsolidateSavedTweetsApp {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("SummariseSavedTweetsApp").setMaster("local[*]")
val sc = new SparkContext(conf)
val sqlContext = SQLContext.getOrCreate(sc)
val tweetsFile = sqlContext.read.parquet("tweetsN/*.parquet")
val tweetsDF = tweetsFile.coalesce(100).toDF()
println(s"Count of tweets: ${tweetsDF.count()}")
println("schema:");
println(tweetsDF.schema)
tweetsDF.write.mode(SaveMode.Overwrite).format("parquet").save("tweets.consolidated.parquet")
}
}
| mikemoraned/twitter-lang | src/main/scala/com/houseofmoran/twitter/lang/ConsolidateSavedTweetsApp.scala | Scala | apache-2.0 | 724 |
package nl.jappieklooster.gapl.lib.controller
import akka.actor.Actor
import groovy.lang.Closure
import nl.jappieklooster.gapl.Log
import nl.jappieklooster.gapl.lib.dsl.Delegator
import nl.jappieklooster.gapl.lib.dsl.execution.{BelieveExecutionDsl, GoalExecutionDsl}
import nl.jappieklooster.gapl.lib.model.{Agent, Command, Update}
/**
* Execute a single agent.
*/
class AgentController(private var agent:Agent, environment:AnyRef) extends Actor with Delegator{
val log = Log.get[AgentController]
def execute(): Unit = {
var resultingGoals = agent.goals
for((name, value) <- agent.goals){
if(goalComplete(name)){
resultingGoals -= name
}else{
delegate(value, new GoalExecutionDsl(agent, environment))
}
}
agent = agent.copy(goals = resultingGoals)
}
def goalComplete(name:String):Boolean = {
for(believeValueArray <- agent.believes get name){
if(believeValueArray.size > 1){
return false
}
for(believe <- believeValueArray.find((a:Any) => true)){
believe match{
case deduction:Closure[Boolean] =>
return delegate(deduction, new BelieveExecutionDsl(agent))
}
}
}
return false
}
private var isStopped = false
override def receive: Receive = {
case Command.Stop =>
isStopped = true
case Command.Start =>
isStopped = false
self ! Update(1)
case Update(tickNumber) => {
execute()
if(!isStopped){
self ! Update(tickNumber+1)
}
}
}
}
| jappeace/Gapl | library/src/main/scala/nl/jappieklooster/gapl/lib/controller/AgentController.scala | Scala | gpl-3.0 | 1,436 |
package net.ssanj.dabble
import scala.util.Try
import ammonite.ops._
import scalaz._
import scalaz.Id.Id
import scalaz.syntax.std.`try`._
import DabbleDslDef._
class DabbleConsoleInterpreter extends (DabbleDsl ~> Id) {
def apply[A](dsl: DabbleDsl[A]): Id[A] = dsl match {
case ReadFile(filename: String) =>
Try(read.lines(Path(filename), "UTF-8")).
toDisjunction.
leftMap(x => s"Failed to read $filename due to: ${x.getMessage}")
case WriteFile(filename: String, lines: Seq[String]) =>
Try(write.over(Path(filename), lines.mkString(newline))).
toDisjunction.
leftMap(x => s"Failed to write to $filename due to: ${x.getMessage}")
case FileExists(filename: String) =>
Try(exists(Path(filename))).toOption.fold(false)(identity)
case Log(message: String) => println(message)
case ReadInput(prompt: String) =>
println(prompt)
scala.io.StdIn.readLine
case SystemProp(key: String) =>
Try(System.getProperty(key)).
toDisjunction.
leftMap(x => s"Could not read system property: $key due to: ${x.getMessage}")
//This could return an IoError of type CallError(errorCode, throwable)
case CallProcess(filename: String, arguments: String, workingDir: String) =>
Try(%(filename, arguments)(Path(workingDir))).
toDisjunction.
leftMap(x => s"Could not run dabble due to: ${x.getMessage}. See sbt log for details.")
case Exit(er: ExecutionResult2) => System.exit(er.code.code)
case NoOp =>
}
}
| ssanj/dabble | src/main/scala/net/ssanj/dabble/DabbleConsoleInterpreter.scala | Scala | mit | 1,544 |
package home.yang.dataflow
import scala.collection.mutable.ArrayBuffer
/**
* Created by Administrator on 2016/5/2 0002.
*/
trait DataContainer{
def get(key:String):DataHolder
}
class SimpleData(var filterable: Map[String,DataHolder]) extends DataContainer{
override def get(key: String): DataHolder = {
filterable.getOrElse(key,null)
}
}
| wjingyao2008/firsttry | dataflow/src/main/scala/home/yang/dataflow/SimpleData.scala | Scala | apache-2.0 | 354 |
package com.sksamuel.elastic4s.mappings
import com.sksamuel.elastic4s.JsonSugar
import com.sksamuel.elastic4s.analyzers.SpanishLanguageAnalyzer
import com.sksamuel.elastic4s.mappings.FieldType.StringType
import org.scalatest.{Matchers, WordSpec}
class DynamicTemplateDslTest extends WordSpec with Matchers with JsonSugar {
import com.sksamuel.elastic4s.ElasticDsl._
"dynamic templates" should {
"generate correct json" in {
val temp = DynamicTemplateDefinition("es",
dynamicTemplateMapping(StringType) analyzer SpanishLanguageAnalyzer
) matchMappingType "string" matching "*_es"
temp.build.string should matchJsonResource("/json/mappings/dynamic_template.json")
}
}
}
| ulric260/elastic4s | elastic4s-core-tests/src/test/scala/com/sksamuel/elastic4s/mappings/DynamicTemplateDslTest.scala | Scala | apache-2.0 | 712 |
package us.feliscat.ir.fulltext.indri.en
import us.feliscat.ir.fulltext.indri.MultiLingualTrecText
import us.feliscat.m17n.English
/**
* <pre>
* Created on 2017/02/11.
* </pre>
*
* @author K.Sakamoto
*/
object EnglishTrecText extends MultiLingualTrecText with English {
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/ir/fulltext/indri/en/EnglishTrecText.scala | Scala | apache-2.0 | 287 |
// package turksem.iqa
// import cats.implicits._
// import akka.actor.Props
// import akka.stream.scaladsl.{Flow, Source}
// import com.amazonaws.services.mturk.model.QualificationRequirement
// import nlpdata.util.Text
// import nlpdata.util.HasTokens
// import nlpdata.util.HasTokens.ops._
// import nlpdata.util.LowerCaseStrings._
// import nlpdata.datasets.wiktionary.Inflections
// import spacro.HITInfo
// import spacro.HITType
// import spacro.HIT
// import spacro.Assignment
// import spacro.tasks.TaskConfig
// import spacro.tasks.TaskSpecification
// import spacro.tasks.TaskManager
// import spacro.tasks.HITManager
// import spacro.tasks.Server
// import spacro.tasks.Service
// import spacro.tasks.SetNumHITsActive
// import turksem._
// import turksem.util._
// import qamr.AnnotationDataService
// import upickle.default._
// import scala.concurrent.duration._
// import scala.language.postfixOps
// import AdaptiveQuestionGuesser.ops._
// /**
// * Annotation pipeline object: construct one of these to start running an annotation job.
// */
// class IQAAnnotationPipeline[SID : Reader : Writer : HasTokens](
// val _prompts: Vector[IQAPrompt[SID]],
// val initialQuestionGuesser: CountBasedQuestionGuesser,
// frozenGenerationHITTypeID: Option[String] = None)(
// implicit config: TaskConfig, // determines production/sandbox, how to store HIT data, etc.
// annotationDataService: AnnotationDataService,
// inflections: Inflections) { // inflections object constructed by the caller for all tokens in the inputs
// def getInflectedTokens(sid: SID) = {
// val tokens = sid.tokens
// PosTagger.posTag(tokens).map(w =>
// InflectionalWord(
// token = w.token,
// pos = w.pos,
// index = w.index,
// inflectedFormsOpt = inflections.getInflectedForms(tokens(w.index).lowerCase)))
// }
// val allPrompts = _prompts.filter { p =>
// QuestioningState.initFromSentence(getInflectedTokens(p.id)).triggerGroups.nonEmpty
// }
// val genHITType = HITType(
// title = s"Help write and answer a series of questions about a sentence",
// description = s"""
// You will be shown an English sentence and some simple questions about it.
// You will answer these questions, and aid an automated system in asking more questions
// and answering those as well, until you have covered as much of the sentence structure as possible.
// """.trim.replace("\\\\s+", " "),
// reward = 0.05,
// keywords = "language,english,question answering",
// qualRequirements = Array.empty[QualificationRequirement],
// autoApprovalDelay = 2592000L, // 30 days
// assignmentDuration = 600L)
// lazy val genAjaxService = new Service[IQAAjaxRequest[SID]] {
// override def processRequest(request: IQAAjaxRequest[SID]) = request match {
// case IQAAjaxRequest(id) =>
// val response = IQAAjaxResponse(
// getInflectedTokens(id),
// genManagerPeek.questionGuesser // this is bad hehe oopsie
// )
// response
// }
// }
// lazy val (taskPageHeadLinks, taskPageBodyLinks) = {
// import scalatags.Text.all._
// val headLinks = List(
// link(
// rel := "stylesheet",
// href := "https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css",
// attr("integrity") := "sha384-rwoIResjU2yc3z8GV/NPeZWAv56rSmLldC3R/AZzGRnGxQQKnKkoFVhFQhNUwEyJ",
// attr("crossorigin") := "anonymous"))
// val bodyLinks = List(
// script(
// src := "https://code.jquery.com/jquery-3.1.1.slim.min.js",
// attr("integrity") := "sha384-A7FZj7v+d/sdmMqp/nOQwliLvUsJfDHW+k9Omg/a/EheAdgtzNs3hpfag6Ed950n",
// attr("crossorigin") := "anonymous"),
// script(
// src := "https://cdnjs.cloudflare.com/ajax/libs/tether/1.4.0/js/tether.min.js",
// attr("integrity") := "sha384-DztdAPBWPRXSA/3eYEEUWrWCy7G5KFbe8fFjk5JAIxUYHKkDx6Qin1DkWx51bBrb",
// attr("crossorigin") := "anonymous"),
// script(
// src := "https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/js/bootstrap.min.js",
// attr("integrity") := "sha384-vBWWzlZJ8ea9aCX4pEW3rVHjgjt7zpkNpZk+02D9phzyeVkE+jo0ieGizqPLForn",
// attr("crossorigin") := "anonymous"))
// (headLinks, bodyLinks)
// }
// // this object holds the necessary information to start uploading tasks to Turk.
// // TaskSpecifications in an annotation run should be in 1-to-1 correspondence with HIT Type IDs.
// lazy val genTaskSpec = TaskSpecification.NoWebsockets[IQAPrompt[SID], IQAResponse, IQAAjaxRequest[SID]](
// iqaTaskKey, genHITType, genAjaxService, allPrompts,
// frozenHITTypeId = frozenGenerationHITTypeID,
// taskPageHeadElements = taskPageHeadLinks,
// taskPageBodyElements = taskPageBodyLinks)
// import config.actorSystem
// // this is here just so you can peek from the console into what's going on in the HIT manager.
// // do NOT mutate fields of the HIT manager through this object---that is not thread-safe!
// // Instead, define some message types and send those messages to it.
// var genManagerPeek: IQAHITManager[SID] = null
// // The HIT Manager keeps track of what's running on MTurk and reviews assignments.
// // You would implement any interesting quality control, coordination between tasks, etc., in a custom HITManager.
// // See the QAMR code for examples of more interesting HITManagers.
// // Here we are using a simple default implementation from the turkey library.
// lazy val genHelper = new HITManager.Helper(genTaskSpec)
// lazy val genManager = actorSystem.actorOf(
// Props{
// genManagerPeek = new IQAHITManager(
// helper = genHelper,
// initQuestionGuesser = initialQuestionGuesser,
// numAssignmentsForPrompt = (p: IQAPrompt[SID]) => 1,
// initNumHITsToKeepActive = 3,
// _promptSource = allPrompts.iterator)
// genManagerPeek
// })
// // instantiating this object starts the webserver that hosts the task & previews.
// lazy val server = new Server(List(genTaskSpec))
// // this actor is the way we generally communicate directly with the HIT manager (telling it to start polling turk, stop, etc.)
// lazy val genActor = actorSystem.actorOf(Props(new TaskManager(genHelper, genManager)))
// // these functions are for you to run on the console to manage the task live.
// def setGenHITsActive(n: Int) =
// genManager ! SetNumHITsActive(n)
// import TaskManager.Message._
// def start(interval: FiniteDuration = 30 seconds) = {
// server
// genActor ! Start(interval, delay = 0 seconds)
// }
// def stop() = {
// genActor ! Stop
// }
// def disable() = {
// genActor ! Delete
// }
// def expire() = {
// genActor ! Expire
// }
// def update() = {
// server
// genActor ! Update
// }
// def allGenInfos = config.hitDataService.getAllHITInfo[IQAPrompt[SID], IQAResponse](genTaskSpec.hitTypeId).get
// def workerGenInfos(workerId: String) = for {
// hi <- allGenInfos
// assignment <- hi.assignments
// if assignment.workerId == workerId
// } yield HITInfo(hi.hit, List(assignment))
// def currentGenSentences: List[(SID, String)] = {
// genHelper.activeHITInfosByPromptIterator.map(_._1.id).map(id =>
// id -> Text.render(id.tokens)
// ).toList
// }
// }
| julianmichael/mturk-semantics | turksem/jvm/src/main/scala/turksem/iqa/IQAAnnotationPipeline.scala | Scala | mit | 7,469 |
package sample
import java.util.concurrent.atomic.AtomicInteger
class TestCountingService extends CountingService {
private val called = new AtomicInteger(0);
override def increment( count : Int) = {
called.incrementAndGet()
super.increment(count)
}
/**
* How many times we have called this service.
*/
def getNumberOfCalls() = called.get()
}
| alanktwong/typesafe_activators | akka-scala-spring/test/sample/TestCountingService.scala | Scala | mit | 360 |
package com.twitter.finatra.http.tests.response
import com.twitter.finagle.http.{
Fields,
MediaType,
Request,
Response,
Status,
Cookie => FinagleCookie
}
import com.twitter.finatra.http.marshalling.MessageBodyFlags
import com.twitter.finatra.http.modules.ResponseBuilderModule
import com.twitter.finatra.http.response.ResponseBuilder
import com.twitter.finatra.modules.FileResolverFlags
import com.twitter.inject.app.TestInjector
import com.twitter.inject.{Injector, IntegrationTest}
import com.twitter.util.mock.Mockito
import java.io.{File, FileWriter}
class ResponseBuilderTest extends IntegrationTest with Mockito {
override protected val injector: Injector =
TestInjector(
modules = Seq(ResponseBuilderModule),
flags = Map(
FileResolverFlags.LocalDocRoot -> "src/main/webapp/",
MessageBodyFlags.ResponseCharsetEnabled -> "true"
)
).create
private lazy val responseBuilder = injector.instance[ResponseBuilder]
test("handle simple response body") {
val content = "test body"
val response = responseBuilder.ok(content)
response.getContentString() should equal(content)
response.contentLengthOrElse(0) should equal(content.length)
}
test("handle simple response body with request") {
val content = "test body"
val request = Request()
val response = responseBuilder.ok(request, content)
response.getContentString() should equal(content)
}
test("handle file type as response body") {
val expectedContent = """{"id": "foo"}"""
val tempFile = File.createTempFile("temp", ".json")
tempFile.deleteOnExit()
val writer = new FileWriter(tempFile)
writer.write(expectedContent)
writer.close()
val response = responseBuilder.ok(tempFile)
response.getContentString() should equal(expectedContent)
response.contentLengthOrElse(0) should equal(expectedContent.length)
response.headerMap(Fields.ContentType) should equal(MediaType.JsonUtf8)
}
test("convert to an exception") {
val e = responseBuilder.notFound.header("foo", "bar").toException
e.response.status should equal(Status.NotFound)
e.response.headerMap("foo") should equal("bar")
}
test("cookies") {
assertFooBarCookie(responseBuilder.ok.cookie("foo", "bar"))
assertFooBarCookie(responseBuilder.ok.cookie(new FinagleCookie("foo", "bar")))
}
test("appropriate response content type") {
// we should only return the charset on appropriate content types
val bytes: Array[Byte] = Array[Byte](10, -32, 17, 22)
var response = responseBuilder.ok(bytes)
response.headerMap(Fields.ContentType) should be(
MediaType.OctetStream
) // does not include charset
response.contentLengthOrElse(0) should equal(bytes.length)
response = responseBuilder.ok("""Hello, world""")
response.headerMap(Fields.ContentType) should be(MediaType.PlainTextUtf8) // includes charset
response.contentLengthOrElse(0) should equal("""Hello, world""".length)
val toMapValue = Map("key1" -> "value1", "key2" -> "value2")
response = responseBuilder.ok(toMapValue)
response.headerMap(Fields.ContentType) should be(MediaType.JsonUtf8) // includes charset
response.contentLengthOrElse(0) > 0 should be(true)
}
test("properly return responses") {
responseBuilder.noContent.status should equal(Status.NoContent)
responseBuilder.notAcceptable.status should equal(Status.NotAcceptable)
assertResponseWithFooBody(responseBuilder.notAcceptable("foo"), Status.NotAcceptable)
responseBuilder.accepted.status should equal(Status.Accepted)
responseBuilder.movedPermanently.status should equal(Status.MovedPermanently)
assertResponseWithFooBody(responseBuilder.movedPermanently("foo"), Status.MovedPermanently)
responseBuilder.notModified.status should equal(Status.NotModified)
assertResponseWithFooBody(responseBuilder.badRequest("foo"), Status.BadRequest)
assertResponseWithFooBody(responseBuilder.notFound("foo"), Status.NotFound)
responseBuilder.gone.status should equal(Status.Gone)
assertResponseWithFooBody(responseBuilder.gone("foo"), Status.Gone)
responseBuilder.preconditionFailed.status should equal(Status.PreconditionFailed)
assertResponseWithFooBody(responseBuilder.preconditionFailed("foo"), Status.PreconditionFailed)
responseBuilder.requestEntityTooLarge.status should equal(Status.RequestEntityTooLarge)
assertResponseWithFooBody(
responseBuilder.requestEntityTooLarge("foo"),
Status.RequestEntityTooLarge
)
assertResponseWithFooBody(
responseBuilder.internalServerError("foo"),
Status.InternalServerError
)
assertResponseWithFooBody(responseBuilder.ok.html("foo"), Status.Ok)
responseBuilder.notImplemented.status should equal(Status.NotImplemented)
responseBuilder.clientClosed.statusCode should equal(499)
responseBuilder.ok
.location(1.asInstanceOf[Any])
.asInstanceOf[Response]
.location
.get should equal("1")
responseBuilder.ok
.header("Content-Type", MediaType.JsonUtf8)
.asInstanceOf[Response]
.contentType
.get should equal(MediaType.JsonUtf8)
responseBuilder.ok
.headers(Map("Content-Type" -> "Foo"))
.asInstanceOf[Response]
.contentType
.get should equal("Foo")
responseBuilder.ok
.headers(("Content-Type", "Foo"), ("A", "B"))
.asInstanceOf[Response]
.contentType
.get should equal("Foo")
await(responseBuilder.ok.toFuture).status should equal(Status.Ok)
}
def assertFooBarCookie(response: Response): Unit = {
val cookie = response.getCookies().next()
cookie.name should equal("foo")
cookie.value should equal("bar")
}
def assertResponseWithFooBody(response: Response, expectedStatus: Status): Unit = {
response.status should equal(expectedStatus)
response.contentString should equal("foo")
}
}
| twitter/finatra | http-server/src/test/scala/com/twitter/finatra/http/tests/response/ResponseBuilderTest.scala | Scala | apache-2.0 | 5,945 |
import org.specs2.mutable.Specification
import play.api.test._
import play.api.test.Helpers._
import controllers._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/*
* User: Martin
* Date: 01.12.13
* Time: 02:05
*/
class ProfileSpec extends Specification {
"A user profile" should {
"not be accessible from a not logged in user" in {
running(FakeApplication()) {
val future = route(FakeRequest(GET, "/profile")).get
val location = Await.result(future, Duration.Inf).header.headers("Location")
val flash = Await.result(future, Duration.Inf).header.headers("Set-Cookie")
println(location)
println(flash)
"test" mustEqual "test "
}
}
}
}
| GyrosOfWar/a-z-challenge-log | test/ProfileSpec.scala | Scala | apache-2.0 | 738 |
package gitbucket.core.util
import java.net.InetAddress
import gitbucket.core.service.SystemSettingsService
import org.apache.commons.net.util.SubnetUtils
import org.apache.http.HttpHost
import org.apache.http.auth.{AuthScope, UsernamePasswordCredentials}
import org.apache.http.impl.client.{BasicCredentialsProvider, CloseableHttpClient, HttpClientBuilder}
object HttpClientUtil {
def withHttpClient[T](proxy: Option[SystemSettingsService.Proxy])(f: CloseableHttpClient => T): T = {
val builder = HttpClientBuilder.create.useSystemProperties
proxy.foreach { proxy =>
builder.setProxy(new HttpHost(proxy.host, proxy.port))
for (user <- proxy.user; password <- proxy.password) {
val credential = new BasicCredentialsProvider()
credential.setCredentials(
new AuthScope(proxy.host, proxy.port),
new UsernamePasswordCredentials(user, password)
)
builder.setDefaultCredentialsProvider(credential)
}
}
val httpClient = builder.build()
try {
f(httpClient)
} finally {
httpClient.close()
}
}
def isPrivateAddress(address: String): Boolean = {
val ipAddress = InetAddress.getByName(address)
ipAddress.isSiteLocalAddress || ipAddress.isLinkLocalAddress || ipAddress.isLoopbackAddress
}
def inIpRange(ipRange: String, ipAddress: String): Boolean = {
if (ipRange.contains('/')) {
val utils = new SubnetUtils(ipRange)
utils.setInclusiveHostCount(true)
utils.getInfo.isInRange(ipAddress)
} else {
ipRange == ipAddress
}
}
}
| xuwei-k/gitbucket | src/main/scala/gitbucket/core/util/HttpClientUtil.scala | Scala | apache-2.0 | 1,587 |
package dsmoq.services
import java.io.{ ByteArrayInputStream, Closeable, File, FileOutputStream, InputStream, SequenceInputStream }
import java.nio.charset.{ Charset, StandardCharsets }
import java.nio.file.{ Files, Path, Paths, StandardCopyOption }
import java.util.{ ResourceBundle, UUID }
import java.util.zip.ZipInputStream
import com.github.tototoshi.csv.CSVReader
import com.typesafe.scalalogging.LazyLogging
import dsmoq.exceptions.{ AccessDeniedException, BadRequestException, NotFoundException }
import dsmoq.logic.{ AppManager, FileManager, ImageSaveLogic, StringUtil, ZipUtil }
import dsmoq.{ AppConf, ResourceNames, persistence }
import dsmoq.persistence.PostgresqlHelper.{ PgConditionSQLBuilder, PgSQLSyntaxType }
import dsmoq.persistence.{ Annotation, Dataset, DatasetAnnotation, DatasetImage, DefaultAccessLevel, GroupAccessLevel, GroupType, OwnerType, Ownership, PresetType, UserAccessLevel, ZipedFiles }
import dsmoq.services.json.DatasetData.{ CopiedDataset, DatasetOwnership, DatasetTask, DatasetZipedFile }
import dsmoq.services.json.{ DatasetData, Image, RangeSlice, RangeSliceSummary, SearchDatasetCondition }
import org.apache.commons.io.input.BoundedInputStream
import org.joda.time.DateTime
import org.json4s.{ JBool, JInt }
import org.json4s.JsonDSL.{ jobject2assoc, pair2Assoc, string2jvalue }
import org.json4s.jackson.JsonMethods.{ compact, render }
import org.scalatra.servlet.FileItem
import org.slf4j.MarkerFactory
import scalikejdbc.interpolation.Implicits.{ scalikejdbcSQLInterpolationImplicitDef, scalikejdbcSQLSyntaxToStringImplicitDef }
import scalikejdbc.{ ConditionSQLBuilder, DB, DBSession, SQLSyntax, SelectSQLBuilder, SubQuery, delete, select, sqls, update, withSQL }
import scala.collection.mutable.{ ArrayBuffer, HashSet }
import scala.math.BigInt.int2bigInt
import scala.util.{ Failure, Success, Try }
/**
* データセット関連の操作を取り扱うサービスクラス
*
* @param resource リソースバンドルのインスタンス
*/
class DatasetService(resource: ResourceBundle) extends LazyLogging {
/**
* ログマーカー
*/
val LOG_MARKER = MarkerFactory.getMarker("DATASET_LOG")
private val datasetImageDownloadRoot = AppConf.imageDownloadRoot + "datasets/"
/** デフォルトの検索上限 */
val DEFALUT_LIMIT = 20
/**
* データセットを新規作成します。
*
* @param files データセットに追加するファイルのリスト
* @param saveLocal データセットをLocalに保存するか否か
* @param saveS3 データセットをS3に保存するか否か
* @param name データセット名
* @param user ユーザ情報
* @return
* Success(DatasetData.Dataset) 作成に成功した場合、作成したデータセットオブジェクト
* Failure(NullPointerException) 引数がnullの場合
*/
def create(
files: Seq[FileItem],
saveLocal: Boolean,
saveS3: Boolean,
name: String,
user: User
): Try[DatasetData.Dataset] = {
Try {
CheckUtil.checkNull(files, "files")
CheckUtil.checkNull(saveLocal, "saveLocal")
CheckUtil.checkNull(saveS3, "saveS3")
CheckUtil.checkNull(name, "name")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
val myself = persistence.User.find(user.id).get
val myGroup = getPersonalGroup(myself.id).get
val datasetId = UUID.randomUUID().toString
val timestamp = DateTime.now()
val f = files.map { f =>
// 拡張子を含み、大文字小文字を区別しない
val isZip = f.getName.toLowerCase.endsWith(".zip")
val fileId = UUID.randomUUID.toString
val historyId = UUID.randomUUID.toString
FileManager.uploadToLocal(datasetId, fileId, historyId, f)
val path = Paths.get(AppConf.fileDir, datasetId, fileId, historyId)
val file = persistence.File.create(
id = fileId,
datasetId = datasetId,
historyId = historyId,
name = f.name,
description = "",
fileType = 0,
fileMime = "application/octet-stream",
fileSize = f.size,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp,
localState = if (saveLocal) { SaveStatus.SAVED } else { SaveStatus.DELETING },
s3State = if (saveS3) { SaveStatus.SYNCHRONIZING } else { SaveStatus.NOT_SAVED }
)
val realSize = if (isZip) {
createZipedFiles(path, historyId, timestamp, myself).getOrElse {
// 新規採番されたファイルヒストリIDに紐づくエラーIDを、新規登録する
persistence.FileHistoryError.create(
id = UUID.randomUUID().toString,
historyId = historyId,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
// 展開できないZIPファイルのため、サイズはそのままとする
f.size
}
} else {
f.size
}
val histroy = persistence.FileHistory.create(
id = historyId,
fileId = file.id,
fileType = 0,
fileMime = "application/octet-stream",
filePath = "/" + datasetId + "/" + file.id + "/" + historyId,
fileSize = f.size,
isZip = isZip,
realSize = realSize,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
(file, histroy)
}
val dataset = persistence.Dataset.create(
id = datasetId,
name = if (name.isEmpty) { f.head._1.name } else { name },
description = "",
licenseId = AppConf.defaultLicenseId,
filesCount = f.length,
filesSize = f.map(x => x._2.fileSize).sum,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp,
localState = if (saveLocal) { SaveStatus.SAVED } else { SaveStatus.DELETING },
s3State = if (saveS3) { SaveStatus.SYNCHRONIZING } else { SaveStatus.NOT_SAVED }
)
if (saveS3 && !f.isEmpty) {
createTask(datasetId, MoveToStatus.S3, myself.id, timestamp, saveLocal)
}
val ownership = persistence.Ownership.create(
id = UUID.randomUUID.toString,
datasetId = datasetId,
groupId = myGroup.id,
accessLevel = persistence.UserAccessLevel.Owner,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
val datasetImage = persistence.DatasetImage.create(
id = UUID.randomUUID.toString,
datasetId = dataset.id,
imageId = AppConf.defaultDatasetImageId,
isPrimary = true,
isFeatured = true,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
for (id <- AppConf.defaultFeaturedImageIds) {
persistence.DatasetImage.create(
id = UUID.randomUUID.toString,
datasetId = dataset.id,
imageId = id,
isPrimary = false,
isFeatured = false,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
}
DatasetData.Dataset(
id = dataset.id,
meta = DatasetData.DatasetMetaData(
name = dataset.name,
description = dataset.description,
license = dataset.licenseId,
attributes = Seq.empty
),
filesCount = dataset.filesCount,
filesSize = dataset.filesSize,
files = f.map { x =>
DatasetData.DatasetFile(
id = x._1.id,
name = x._1.name,
description = x._1.description,
size = Some(x._2.fileSize),
url = Some(AppConf.fileDownloadRoot + datasetId + "/" + x._1.id),
createdBy = Some(user),
createdAt = timestamp.toString(),
updatedBy = Some(user),
updatedAt = timestamp.toString(),
isZip = x._2.isZip,
zipedFiles = Seq.empty,
zipCount = if (x._2.isZip) {
getZippedFileAmounts(Seq(x._2.id)).headOption.map(x => x._2).getOrElse(0)
} else {
0
}
)
},
images = Seq(Image(
id = AppConf.defaultDatasetImageId,
url = datasetImageDownloadRoot + datasetId + "/" + AppConf.defaultDatasetImageId
)),
primaryImage = AppConf.defaultDatasetImageId,
featuredImage = AppConf.defaultDatasetImageId,
ownerships = Seq(DatasetData.DatasetOwnership(
id = myself.id,
name = myself.name,
fullname = myself.fullname,
organization = myself.organization,
title = myself.title,
description = myself.description,
image = AppConf.imageDownloadRoot + "user/" + myself.id + "/" + myself.imageId,
accessLevel = ownership.accessLevel,
ownerType = OwnerType.User
)),
defaultAccessLevel = persistence.DefaultAccessLevel.Deny,
permission = ownership.accessLevel,
accessCount = 0,
localState = dataset.localState,
s3State = dataset.s3State,
fileLimit = AppConf.fileLimit
)
}
}
}
/**
* ZipファイルからZip内ファイルを登録・作成する。
*
* @param path ファイルパス
* @param historyId ファイル履歴ID
* @param timestamp タイムスタンプ
* @param myself ログインユーザオブジェクト
* @return
* Success(Long) 作成に成功した場合、非圧縮サイズの合計値
*/
private def createZipedFiles(
path: Path,
historyId: String,
timestamp: DateTime,
myself: persistence.User
)(implicit s: DBSession): Try[Long] = {
Try {
val zipInfos = ZipUtil.read(path)
val zfs = for {
zipInfo <- zipInfos.filter(!_.fileName.endsWith("/"))
} yield {
val centralHeader = zipInfo.centralHeader.clone
// DL時には単独のZIPファイルとして扱うため、
// Central Header内のLocal Headerへの参照を先頭に書き換える必要がある
centralHeader(42) = 0
centralHeader(43) = 0
centralHeader(44) = 0
centralHeader(45) = 0
persistence.ZipedFiles.create(
id = UUID.randomUUID().toString,
historyId = historyId,
name = zipInfo.fileName,
description = "",
fileSize = zipInfo.uncompressSize,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp,
cenSize = zipInfo.centralHeader.length,
dataStart = zipInfo.localHeaderOffset,
dataSize = zipInfo.dataSizeWithLocalHeader,
cenHeader = centralHeader
)
}
zfs.map(_.fileSize).sum
}.recoverWith {
case e: Exception =>
logger.warn(LOG_MARKER, "error occurred in createZipedFiles.", e)
Failure(e)
}
}
/**
* タスクを作成する。
*
* @param datasetId データセットID
* @param commandType LocalあるいはS3の保存先変更を表すコマンド値(@see dsmoq.services.MoveToStatus)
* @param userId ユーザID
* @param timestamp タイムスタンプ
* @param isSave 移動前のディレクトリを保存したままにしておくか否か
* @param s DBセッション
* @return タスクID
*/
private def createTask(
datasetId: String,
commandType: Int,
userId: String,
timestamp: DateTime,
isSave: Boolean
)(implicit s: DBSession): String = {
val id = UUID.randomUUID.toString
persistence.Task.create(
id = id,
taskType = 0,
parameter = compact(
render(
("commandType" -> JInt(commandType))
~ ("datasetId" -> datasetId)
~ ("withDelete" -> JBool(!isSave))
)
),
executeAt = timestamp,
status = 0,
createdBy = userId,
createdAt = timestamp,
updatedBy = userId,
updatedAt = timestamp
)
id
}
/**
* データセットを検索し、該当するデータセットの一覧を取得する。
*
* @param query 検索条件
* @param limit 検索上限
* @param offset 検索オフセット
* @param user ユーザ情報
* @return
* Success(RangeSlice[DatasetData.DatasetsSummary]) 検索成功時、検索結果
* Failure(NullPointerException) 引数がnullの場合
*/
def search(
query: SearchDatasetCondition,
limit: Option[Int],
offset: Option[Int],
user: User
): Try[RangeSlice[DatasetData.DatasetsSummary]] = {
Try {
CheckUtil.checkNull(query, "query")
CheckUtil.checkNull(limit, "limit")
CheckUtil.checkNull(offset, "offset")
CheckUtil.checkNull(user, "user")
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val limit_ = limit.getOrElse(DEFALUT_LIMIT)
val offset_ = offset.getOrElse(0)
DB.readOnly { implicit s =>
val joinedGroups = getJoinedGroups(user) :+ AppConf.guestGroupId
val datasetIds = findMatchedDatasetIds(query, joinedGroups).distinct
val count = datasetIds.size
val records =
if (count == 0) {
Seq.empty
} else {
val ds = withSQL {
select(d.result.*)
.from(persistence.Dataset as d)
.where.in(d.id, datasetIds.map(x => sqls.uuid(x)))
.orderBy(d.updatedAt.desc)
.offset(offset_)
.limit(limit_)
}.map(persistence.Dataset(d.resultName)).list.apply()
toDataset(ds, joinedGroups)
}
RangeSlice(RangeSliceSummary(count, limit_, offset_), records)
}
}
}
def findMatchedDatasetIds(query: SearchDatasetCondition, joinedGroups: Seq[String])(implicit s: DBSession): Seq[String] = {
query match {
case SearchDatasetCondition.Query(value, contains) => {
// ベーシック検索
findBasicMatchedDatasetIds(value, contains, joinedGroups)
}
case SearchDatasetCondition.Container(operator, value) => {
// アドバンスド検索
findAdvancedMatchedDatasetIds(operator, value, joinedGroups)
}
case _ => {
// 想定外のため、例外
throw new RuntimeException
}
}
}
def createFindMatchedDatasetIdsFromQueryBlank(joinedGroups: Seq[String]): SQLSyntax = {
// 全表示
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val q = select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.where.isNull(d.deletedAt)
.toSQLSyntax
q
}
def createFindMatchedDatasetIdsFromQueryLike(value: String, joinedGroups: Seq[String]): SQLSyntax = {
// キーワード検索
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val g = persistence.Group.g
val f = persistence.File.f
val fh = persistence.FileHistory.fh
val zf = persistence.ZipedFiles.zf
val q: SQLSyntax =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.where.isNull(d.deletedAt)
.and.append(sqls"(UPPER(${d.name}) like CONCAT('%', UPPER(${value}), '%') or UPPER(${d.description}) like CONCAT('%', UPPER(${value}), '%'))")
.union(
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.innerJoin(persistence.File as f)
.on(sqls.eq(f.datasetId, d.id)
.and.isNull(f.deletedAt)
.and.append(sqls"UPPER(${f.name}) like CONCAT('%', UPPER(${value}), '%')"))
.where.isNull(d.deletedAt)
)
.union(
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.innerJoin(persistence.File as f)
.on(sqls.eq(f.datasetId, d.id)
.and.isNull(f.deletedAt))
.innerJoin(persistence.FileHistory as fh)
.on(sqls.eq(fh.fileId, f.id)
.and.isNull(fh.deletedAt))
.innerJoin(persistence.ZipedFiles as zf)
.on(sqls.eq(zf.historyId, fh.id)
.and.isNull(zf.deletedAt)
.and.append(sqls"UPPER(${zf.name}) like CONCAT('%', UPPER(${value}), '%')"))
.where.isNull(d.deletedAt)
)
.toSQLSyntax
q
}
def createFindMatchedDatasetIdsFromOwner(value: String, joinedGroups: Seq[String]): SQLSyntax = {
// オーナー検索
val d = persistence.Dataset.d
val o_access = persistence.Ownership.syntax("o1") // 閲覧権限があるかどうかをチェックするため
val o_group = persistence.Ownership.syntax("o2") // グループとの結合のため
val g = persistence.Group.g
val m = persistence.Member.m
val u = persistence.User.u
val q: SQLSyntax =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o_access)
.on(sqls.eq(o_access.datasetId, d.id)
.and.isNull(o_access.deletedAt)
.and.gt(o_access.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o_access.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.innerJoin(persistence.Ownership as o_group)
.on(sqls.eq(o_group.datasetId, d.id)
.and.isNull(o_group.deletedAt)
.and.eq(o_group.accessLevel, UserAndGroupAccessLevel.OWNER_OR_PROVIDER))
.innerJoin(persistence.Group as g)
.on(sqls.eq(g.id, o_group.groupId)
.and.isNull(g.deletedAt)
.and.eq(g.groupType, GroupType.Public)
.and.eq(g.name, value))
.where.isNull(d.deletedAt)
.union(
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o_access)
.on(sqls.eq(o_access.datasetId, d.id)
.and.isNull(o_access.deletedAt)
.and.gt(o_access.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o_access.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.innerJoin(persistence.Ownership as o_group)
.on(sqls.eq(o_group.datasetId, d.id)
.and.isNull(o_group.deletedAt)
.and.eq(o_group.accessLevel, UserAndGroupAccessLevel.OWNER_OR_PROVIDER))
.innerJoin(persistence.Group as g)
.on(sqls.eq(g.id, o_group.groupId)
.and.isNull(g.deletedAt)
.and.eq(g.groupType, GroupType.Personal))
.innerJoin(persistence.Member as m)
.on(sqls.eq(m.groupId, g.id)
.and.isNull(m.deletedAt))
.innerJoin(persistence.User as u)
.on(sqls.eq(u.id, m.userId)
.and.eq(u.disabled, false)
.and.eq(u.name, value))
.where.isNull(d.deletedAt)
)
.toSQLSyntax
q
}
def createFindMatchedDatasetIdsFromTag(value: String, joinedGroups: Seq[String]): SQLSyntax = {
// タグ検索
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.da
val q: SQLSyntax =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.innerJoin(persistence.DatasetAnnotation as da)
.on(sqls.eq(da.datasetId, d.id)
.and.isNull(da.deletedAt)
.and.eq(da.data, "$tag"))
.innerJoin(persistence.Annotation as a)
.on(sqls.eq(a.id, da.annotationId)
.and.isNull(a.deletedAt)
.and.eq(a.name, value))
.where.isNull(d.deletedAt)
.toSQLSyntax
q
}
def createFindMatchedDatasetIdsFromAttribute(key: String, value: String, joinedGroups: Seq[String]): SQLSyntax = {
// 属性検索
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.da
val q: SQLSyntax =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
.innerJoin(persistence.DatasetAnnotation as da)
.on(sqls.eq(da.datasetId, d.id)
.and.isNull(da.deletedAt))
.innerJoin(persistence.Annotation as a)
.on(sqls.eq(a.id, da.annotationId)
.and.isNull(a.deletedAt))
.where(
sqls.toAndConditionOpt(
Some(sqls.eq(da.datasetId, d.id)),
if (key.isEmpty) None else Some(sqls.eq(a.name, key)),
if (value.isEmpty) None else Some(sqls.eq(da.data, value)),
Some(sqls.isNull(d.deletedAt))
)
)
.toSQLSyntax
q
}
def createFindMatchedDatasetIdsFromTotalSize(compare: SearchDatasetCondition.Operators.Compare, value: Double, unit: SearchDatasetCondition.SizeUnit, joinedGroups: Seq[String]): SQLSyntax = {
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val size = (value * unit.magnification).toLong
val q =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
compare match {
case SearchDatasetCondition.Operators.Compare.GE => {
q.where.ge(d.filesSize, size)
.and.isNull(d.deletedAt)
.toSQLSyntax
}
case SearchDatasetCondition.Operators.Compare.LE => {
q.where.le(d.filesSize, size)
.and.isNull(d.deletedAt)
.toSQLSyntax
}
}
}
def createFindMatchedDatasetIdsFromNumOfFiles(compare: SearchDatasetCondition.Operators.Compare, value: Int, joinedGroups: Seq[String]): SQLSyntax = {
val d = persistence.Dataset.d
val o = persistence.Ownership.o
val q =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o)
.on(sqls.eq(o.datasetId, d.id)
.and.isNull(o.deletedAt)
.and.gt(o.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
compare match {
case SearchDatasetCondition.Operators.Compare.GE => {
q.where.ge(d.filesCount, value)
.and.isNull(d.deletedAt)
.toSQLSyntax
}
case SearchDatasetCondition.Operators.Compare.LE => {
q.where.le(d.filesCount, value)
.and.isNull(d.deletedAt)
.toSQLSyntax
}
}
}
def createFindMatchedDatasetIdsFromPublic(value: Boolean, joinedGroups: Seq[String]): SQLSyntax = {
val d = persistence.Dataset.d
val o_access = persistence.Ownership.syntax("o1") // 閲覧権限があるかどうかをチェックするため
val o_public = persistence.Ownership.syntax("o2") // 公開/非公開のチェックのため
val q =
select(d.id)
.from(persistence.Dataset as d)
.innerJoin(persistence.Ownership as o_access)
.on(sqls.eq(o_access.datasetId, d.id)
.and.isNull(o_access.deletedAt)
.and.gt(o_access.accessLevel, GroupAccessLevel.Deny)
.and.append(sqls"${o_access.groupId} in ( ${sqls.join(joinedGroups.map(x => sqls.uuid(x)), sqls",")} )"))
if (value) {
q.innerJoin(persistence.Ownership as o_public)
.on(sqls.eq(o_public.datasetId, d.id)
.and.isNull(o_public.deletedAt)
.and.gt(o_public.accessLevel, GroupAccessLevel.Deny)
.and.eq(o_public.groupId, sqls.uuid(AppConf.guestGroupId)))
.where.isNull(d.deletedAt)
.toSQLSyntax
} else {
q.where(
sqls.notExists(
select
.from(persistence.Ownership as o_public)
.where.eq(o_public.datasetId, d.id)
.and.isNull(o_public.deletedAt)
.and.gt(o_public.accessLevel, GroupAccessLevel.Deny)
.and.eqUuid(o_public.groupId, AppConf.guestGroupId)
.toSQLSyntax
)
).and.isNull(d.deletedAt).toSQLSyntax
}
}
def findBasicMatchedDatasetIds(keyword: String, contains: Boolean, joinedGroups: Seq[String])(implicit s: DBSession): Seq[String] = {
// ベーシック検索
// ここでは以下の構造のみを想定する
// Query([キーワード],true)
val q: SQLSyntax = (keyword, contains) match {
case ("", _) => {
// 全表示
createFindMatchedDatasetIdsFromQueryBlank(joinedGroups)
}
case (value, true) => {
// キーワード検索
createFindMatchedDatasetIdsFromQueryLike(value, joinedGroups)
}
case _ => {
throw new RuntimeException
}
}
withSQL {
new SelectSQLBuilder(q)
}.map(_.string(1)).list().apply()
}
def findInternalMatchedDatasetIds(subCondition: SearchDatasetCondition, joinedGroups: Seq[String])(implicit s: DBSession): Seq[String] = {
subCondition match {
case SearchDatasetCondition.Query("", _) => {
// キーワード検索だが、なにも指定されていないため、全表示
// 全表示
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromQueryBlank(joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.Query(value, true) => {
// キーワード検索(LIKE)
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromQueryLike(value, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.Query(value, false) => {
// 全て
val allIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromQueryBlank(joinedGroups))
}.map(_.string(1)).list().apply()
// キーワードを含む
val ignoreIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromQueryLike(value, joinedGroups))
}.map(_.string(1)).list().apply()
// ユーザーが閲覧できるデータセットから、検索条件を含むものを除外する
val findIds = allIds.filterNot(v => ignoreIds.contains(v))
findIds
}
case SearchDatasetCondition.Owner(value, true) => {
// オーナー検索
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromOwner(value, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.Owner(value, false) => {
// 全て
val allIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromQueryBlank(joinedGroups))
}.map(_.string(1)).list().apply()
// オーナー検索
val ignoreIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromOwner(value, joinedGroups))
}.map(_.string(1)).list().apply()
// ユーザーが閲覧できるデータセットから、検索条件を含むものを除外する
val findIds = allIds.filterNot(v => ignoreIds.contains(v))
findIds
}
case SearchDatasetCondition.Tag(value) => {
// タグ検索
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromTag(value, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.Attribute("", "") => {
// 属性検索だが、なにも指定されていないため、全表示
// 全表示
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromQueryBlank(joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.Attribute(key, value) => {
// 属性検索
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromAttribute(key, value, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.TotalSize(compare, value, unit) => {
// データセットについている総ファイルサイズ検索
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromTotalSize(compare, value, unit, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.NumOfFiles(compare, value) => {
// データセットについている総ファイル数検索
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromNumOfFiles(compare, value, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case SearchDatasetCondition.Public(value) => {
// 公開・非公開
val findIds = withSQL {
new SelectSQLBuilder(createFindMatchedDatasetIdsFromPublic(value, joinedGroups))
}.map(_.string(1)).list().apply()
findIds
}
case _ => {
throw new IllegalArgumentException
}
}
}
def findAdvancedMatchedDatasetIds(operator: SearchDatasetCondition.Operators.Container, conditions: Seq[SearchDatasetCondition], joinedGroups: Seq[String])(implicit s: DBSession): Seq[String] = {
// アドバンスト検索
// ここでは以下の構造のみを想定する(親ORコンテナ1つに対し、子ANDコンテナ複数)
// 想定外の構造の場合、例外をスロー
// Container(OR,List(
// Container(AND,List( // List内にはContainerを含まない
// Query(json,true),
// Query(json,false), ...
// )), Container(AND,List(
// Query(java,true),
// Query(java,false), ...
// )), Container(AND,List(
// Query(expo,true),
// Query(expo,false), ...
// )), ...
// ))
if (SearchDatasetCondition.Operators.Container.AND.equals(operator)) {
// 対象外の構造のため、例外
throw new IllegalArgumentException
}
// 親構造がORのため、子構造のANDでヒットしたIDを保持し、返却するためのSet
val ids: HashSet[String] = HashSet.empty
conditions foreach {
condition =>
condition match {
case SearchDatasetCondition.Container(SearchDatasetCondition.Operators.Container.AND, subConditions) => {
var subIds: Set[String] = null
var alreadyAddedSubIds = false
// 以下のすべての条件にマッチするIDのみを返却する
subConditions foreach {
subCondition =>
(alreadyAddedSubIds, (subIds == null || subIds.size == 0)) match {
case (false, _) => {
// 未検索のため、検索し、すべてを保持する
subIds = findInternalMatchedDatasetIds(subCondition, joinedGroups).toSet
alreadyAddedSubIds = true
}
case (true, true) => {
// すでに検索済み、かつ、検索済みIDが空のため、検索しない
}
case (true, false) => {
// すでに検索済み、かつ、検索済みIDがあるため、検索し、含まれるもののみを保持する
subIds = findInternalMatchedDatasetIds(subCondition, joinedGroups)
.toStream.filter(v => subIds.contains(v)).toSet
}
}
}
// AND条件でヒットしたデータセットIDを追加
subIds.foreach(v => ids += v)
}
case _ => {
throw new IllegalArgumentException
}
}
}
ids.toList
}
/**
* DBの検索結果からデータセット検索結果を作成する。
*
* @param ds DBの検索結果
* @param joinedGroups ユーザが所属しているグループ
* @return データセット検索結果
*/
def toDataset(
ds: Seq[persistence.Dataset],
joinedGroups: Seq[String]
)(implicit s: DBSession): Seq[DatasetData.DatasetsSummary] = {
CheckUtil.checkNull(ds, "ds")
CheckUtil.checkNull(joinedGroups, "joinedGroups")
val ids = ds.map(_.id)
val isGuest = joinedGroups.filter(_ != AppConf.guestGroupId).isEmpty
val ownerMap = if (isGuest) Map.empty[String, Seq[DatasetData.DatasetOwnership]] else getOwnerMap(ids)
val accessLevelMap = getAccessLevelMap(ids, joinedGroups)
val guestAccessLevelMap = getGuestAccessLevelMap(ids)
val imageIdMap = getImageIdMap(ids)
val featuredImageIdMap = getFeaturedImageIdMap(ids)
val attributeMap = getAttributeMap(ids)
ds.map { d =>
val imageUrl = imageIdMap.get(d.id).map { x =>
datasetImageDownloadRoot + d.id + "/" + x
}.getOrElse("")
val featuredImageUrl = featuredImageIdMap.get(d.id).map { x =>
datasetImageDownloadRoot + d.id + "/" + x
}.getOrElse("")
DatasetData.DatasetsSummary(
id = d.id,
name = d.name,
description = d.description,
image = imageUrl,
featuredImage = featuredImageUrl,
attributes = attributeMap.getOrElse(d.id, Seq.empty),
ownerships = ownerMap.getOrElse(d.id, Seq.empty),
files = d.filesCount,
dataSize = d.filesSize,
defaultAccessLevel = guestAccessLevelMap.getOrElse(d.id, DefaultAccessLevel.Deny),
permission = accessLevelMap.getOrElse(d.id, DefaultAccessLevel.Deny),
localState = d.localState,
s3State = d.s3State
)
}
}
/**
* データセットを検索し、該当するデータセットの一覧を取得する。
*
* @param query 検索文字列
* @param owners 所有者
* @param groups 検索するグループ
* @param attributes 検索する属性
* @param limit 検索上限
* @param offset 検索オフセット
* @param orderby ソート条件を規定する文字列
* @param user ユーザ情報
* @return
* Success(RangeSlice[DatasetData.DatasetsSummary]) 検索成功時、検索結果
* Failure(NullPointerException) 引数がnullの場合
*/
def search(
query: Option[String],
owners: Seq[String],
groups: Seq[String],
attributes: Seq[DataSetAttribute],
limit: Option[Int],
offset: Option[Int],
orderby: Option[String],
user: User
): Try[RangeSlice[DatasetData.DatasetsSummary]] = {
Try {
CheckUtil.checkNull(query, "query")
CheckUtil.checkNull(owners, "owners")
CheckUtil.checkNull(groups, "groups")
CheckUtil.checkNull(attributes, "attributes")
CheckUtil.checkNull(limit, "limit")
CheckUtil.checkNull(offset, "offset")
CheckUtil.checkNull(orderby, "orderby")
CheckUtil.checkNull(user, "user")
val limit_ = limit.getOrElse(DEFALUT_LIMIT)
val offset_ = offset.getOrElse(0)
DB.readOnly { implicit s =>
val joinedGroups = getJoinedGroups(user)
val ids = for {
userGroupIds <- getGroupIdsByUserName(owners)
groupIds <- getGroupIdsByGroupName(groups)
} yield {
(userGroupIds, groupIds)
}
ids match {
case None => {
RangeSlice(RangeSliceSummary(0, limit_, offset_), Seq.empty)
}
case Some((userGroupIds, groupIds)) => {
val count = countDataSets(joinedGroups, query, userGroupIds, groupIds, attributes)
val records = if (count > 0) {
findDataSets(joinedGroups, query, userGroupIds, groupIds, attributes, limit_, offset_, orderby, user)
} else {
Seq.empty
}
RangeSlice(RangeSliceSummary(count, limit_, offset_), records)
}
}
}
}
}
/**
* ユーザアカウント名から対応するPersonalグループIDを取得する。
*
* @param names ユーザアカウント名のリスト
* @param s DBセッション
* @return 取得結果
*/
private def getGroupIdsByUserName(names: Seq[String])(implicit s: DBSession): Option[Seq[String]] = {
if (names.nonEmpty) {
val g = persistence.Group.g
val m = persistence.Member.m
val u = persistence.User.u
val groups = withSQL {
select.apply(g.id)
.from(persistence.Group as g)
.innerJoin(persistence.Member as m).on(m.groupId, g.id)
.innerJoin(persistence.User as u).on(u.id, m.userId)
.where
.in(u.name, names)
.and
.eq(g.groupType, GroupType.Personal)
.and
.isNull(g.deletedAt)
.and
.isNull(m.deletedAt)
.and
.eq(u.disabled, false)
}.map(rs => rs.string(1)).list.apply()
if (groups.nonEmpty) {
Some(groups)
} else {
None
}
} else {
Some(Seq.empty)
}
}
/**
* グループ名からグループIDを取得する。
*
* @param names グループ名のリスト
* @param s DBセッション
* @return 取得結果
*/
private def getGroupIdsByGroupName(names: Seq[String])(implicit s: DBSession): Option[Seq[String]] = {
if (names.nonEmpty) {
val g = persistence.Group.g
val groups = withSQL {
select.apply(g.id)
.from(persistence.Group as g)
.where
.in(g.name, names)
.and
.eq(g.groupType, GroupType.Public)
.and
.isNull(g.deletedAt)
}.map(rs => rs.string(1)).list.apply()
if (groups.nonEmpty) {
Some(groups)
} else {
None
}
} else {
Some(Seq.empty)
}
}
/**
* 検索結果のデータセット件数を取得する。
*
* @param joindGroups ログインユーザが所属しているグループIDのリスト
* @param query 検索文字列
* @param ownerUsers オーナーのユーザIDのリスト
* @param ownerGroups ProviderのグループIDのリスト
* @param attributes 属性のリスト
* @param s DBセッション
* @return データセット件数
*/
private def countDataSets(
joindGroups: Seq[String],
query: Option[String],
ownerUsers: Seq[String],
ownerGroups: Seq[String],
attributes: Seq[DataSetAttribute]
)(implicit s: DBSession): Int = {
withSQL {
createDatasetSql(
select.apply[Int](sqls.countDistinct(persistence.Dataset.d.id)),
joindGroups, query, ownerUsers, ownerGroups, attributes
)
}.map(implicit rs => rs.int(1)).single.apply().get
}
/**
* データセットを検索する。
*
* @param joindGroups ログインユーザが所属しているグループIDのリスト
* @param query 検索文字列
* @param ownerUsers オーナーのユーザIDのリスト
* @param ownerGroups ProviderのグループIDのリスト
* @param attributes 属性のリスト
* @param limit 検索上限
* @param offset 検索オフセット
* @param orderby ソート条件を規定する文字列
* @param user ユーザ情報
* @param s DBセッション
* @return 検索結果
*/
private def findDataSets(
joindGroups: Seq[String],
query: Option[String],
ownerUsers: Seq[String],
ownerGroups: Seq[String],
attributes: Seq[DataSetAttribute],
limit: Int,
offset: Int,
orderby: Option[String],
user: User
)(implicit s: DBSession): Seq[DatasetData.DatasetsSummary] = {
val ds = persistence.Dataset.d
val o = persistence.Ownership.o
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.syntax("da")
val xda2 = SubQuery.syntax("xda2", da.resultName, a.resultName)
val selects = orderby match {
case Some(ord) if ord == "attribute" => {
select.apply[Any](
ds.resultAll,
sqls.max(o.accessLevel).append(sqls"access_level"),
xda2(da).data
)
}
case _ => {
select.apply[Any](ds.resultAll, sqls.max(o.accessLevel).append(sqls"access_level"))
}
}
val datasets = orderby match {
case Some(ord) if ord == "attribute" => {
withSQL {
createDatasetSql(selects, joindGroups, query, ownerUsers, ownerGroups, attributes)
.groupBy(ds.id, xda2(da).data)
.orderBy(xda2(da).data)
.offset(offset)
.limit(limit)
}.map(rs => (persistence.Dataset(ds.resultName)(rs), rs.int("access_level"))).list.apply()
}
case _ => {
withSQL {
createDatasetSql(selects, joindGroups, query, ownerUsers, ownerGroups, attributes)
.groupBy(ds.id)
.orderBy(ds.updatedAt).desc
.offset(offset)
.limit(limit)
}.map(rs => (persistence.Dataset(ds.resultName)(rs), rs.int("access_level"))).list.apply()
}
}
val datasetIds = datasets.map(_._1.id)
val ownerMap = getOwnerMap(datasetIds)
val guestAccessLevelMap = getGuestAccessLevelMap(datasetIds)
val imageIdMap = getImageIdMap(datasetIds)
val featuredImageIdMap = getFeaturedImageIdMap(datasetIds)
datasets.map { x =>
val ds = x._1
val permission = x._2
val imageUrl = imageIdMap.get(ds.id).map { x =>
datasetImageDownloadRoot + ds.id + "/" + x
}.getOrElse("")
val featuredImageUrl = featuredImageIdMap.get(ds.id).map { x =>
datasetImageDownloadRoot + ds.id + "/" + x
}.getOrElse("")
val accessLevel = guestAccessLevelMap.get(ds.id).getOrElse(DefaultAccessLevel.Deny)
DatasetData.DatasetsSummary(
id = ds.id,
name = ds.name,
description = ds.description,
image = imageUrl,
featuredImage = featuredImageUrl,
attributes = getAttributes(ds.id), //TODO 非効率
ownerships = if (user.isGuest) { Seq.empty } else { ownerMap.get(ds.id).getOrElse(Seq.empty) },
files = ds.filesCount,
dataSize = ds.filesSize,
defaultAccessLevel = accessLevel,
permission = permission,
localState = ds.localState,
s3State = ds.s3State
)
}
}
/**
* データセットを検索するSQLを作成する。
*
* @param selectSql select部のSQL
* @param joindGroups ログインユーザが所属しているグループIDのリスト
* @param query 検索文字列
* @param ownerUsers オーナーのユーザIDのリスト
* @param ownerGroups ProviderのグループIDのリスト
* @param attributes 属性のリスト
* @return 検索SQL
*/
private def createDatasetSql[A](
selectSql: SelectSQLBuilder[A],
joinedGroups: Seq[String],
query: Option[String],
ownerUsers: Seq[String],
ownerGroups: Seq[String],
attributes: Seq[DataSetAttribute]
): ConditionSQLBuilder[A] = {
val ds = persistence.Dataset.d
val g = persistence.Group.g
val o = persistence.Ownership.o
val xo = SubQuery.syntax("xo", o.resultName, g.resultName)
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.syntax("da")
val xda = SubQuery.syntax("xda", da.resultName, a.resultName)
val xda2 = SubQuery.syntax("xda2", da.resultName, a.resultName)
val f = persistence.File.f
val fh = persistence.FileHistory.fh
val zf = persistence.ZipedFiles.zf
val xf = SubQuery.syntax("xf", ds.resultName, f.resultName, fh.resultName, zf.resultName)
selectSql
.from(persistence.Dataset as ds)
.innerJoin(persistence.Ownership as o).on(o.datasetId, ds.id)
.map { sql =>
query match {
case Some(q) => {
sql.innerJoin(
select(ds.id)
.from(persistence.Dataset as ds)
.where
.withRoundBracket(s => s.upperLikeQuery(ds.name, q).or.upperLikeQuery(ds.description, q))
.union(
select(f.datasetId.append(sqls" id"))
.from(persistence.File as f)
.where
.upperLikeQuery(f.name, q)
).union(
select(f.datasetId.append(sqls" id"))
.from(persistence.File as f)
.innerJoin(persistence.FileHistory as fh).on(fh.fileId, f.id)
.innerJoin(persistence.ZipedFiles as zf).on(zf.historyId, fh.id)
.where
.upperLikeQuery(zf.name, q)
)
.as(xf)
).on(sqls"xf.id", ds.id)
}
case None => sql
}
}
.map { sql =>
if (ownerUsers.nonEmpty || ownerGroups.nonEmpty) {
val ownerAccesses = Seq(
ownerUsers.map {
sqls.eqUuid(o.groupId, _).and.eq(o.accessLevel, UserAccessLevel.Owner)
},
ownerGroups.map {
sqls.eqUuid(o.groupId, _).and.eq(o.accessLevel, GroupAccessLevel.Provider)
}
).flatten
sql.innerJoin(
select.apply[String](o.result.datasetId)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g).on(o.groupId, g.id)
.where
.isNull(o.deletedAt).and.isNull(g.deletedAt)
.and
.withRoundBracket(
_.map { q =>
q.append(
sqls.join(ownerAccesses, sqls"or")
)
}
)
.groupBy(o.datasetId)
.having(sqls.eq(sqls.count(o.datasetId), ownerUsers.length + ownerGroups.length))
.as(xo)
).on(ds.id, xo(o).datasetId)
} else {
sql
}
}
.map { sql =>
if (attributes.nonEmpty) {
sql.innerJoin(
select.apply(da.result.datasetId)
.from(persistence.Annotation as a)
.innerJoin(persistence.DatasetAnnotation as da).on(da.annotationId, a.id)
.where
.isNull(a.deletedAt).and.isNull(da.deletedAt)
.and
.withRoundBracket(
_.append(sqls.join(attributes.map { x =>
if (x.value.isEmpty) {
sqls.eq(a.name, x.name)
} else {
sqls.eq(a.name, x.name).and.eq(da.data, x.value)
}
}, sqls"or"))
)
.groupBy(da.datasetId).having(sqls.eq(sqls.count(da.datasetId), attributes.length))
.as(xda)
).on(ds.id, xda(da).datasetId)
} else {
sql
}
}
.map { sql =>
if (attributes.nonEmpty) {
sql.leftJoin(
select.apply(da.result.datasetId, da.result.data)
.from(persistence.Annotation as a)
.innerJoin(persistence.DatasetAnnotation as da).on(da.annotationId, a.id)
.where
.isNull(a.deletedAt).and.isNull(da.deletedAt)
.and
.withRoundBracket(
_.append(sqls.join(attributes.map { x =>
sqls.eq(a.name, x.name)
}, sqls"or"))
)
.as(xda2)
).on(ds.id, xda2(da).datasetId)
} else {
sql
}
}
.where
.inUuid(o.groupId, Seq.concat(joinedGroups, Seq(AppConf.guestGroupId)))
.and
.gt(o.accessLevel, GroupAccessLevel.Deny)
.and
.isNull(ds.deletedAt)
}
/**
* 指定されたデータセットに対して、指定されたグループに所属しているユーザが持つアクセス権を取得する。
*
* @param datasetIds データセットID
* @param joinedGroups 所属しているグループ
* @return データセットに対するアクセス権
*/
private def getAccessLevelMap(
datasetIds: Seq[String],
joinedGroups: Seq[String]
)(implicit s: DBSession): Map[String, Int] = {
if (datasetIds.isEmpty) {
return Map.empty
}
val o = persistence.Ownership.syntax("o")
withSQL {
select(o.result.datasetId, sqls.max(o.accessLevel))
.from(persistence.Ownership as o)
.where
.inUuid(o.datasetId, datasetIds)
.and
.inUuid(o.groupId, joinedGroups)
.and
.isNull(o.deletedAt)
.groupBy(o.datasetId)
}.map { rs =>
(rs.string(o.resultName.datasetId), rs.int(2))
}.list.apply().toMap
}
/**
* 指定されたデータセットのゲストアクセス権を取得する。
*
* @param datasetIds データセットID
* @return データセットに対するアクセス権
*/
private def getGuestAccessLevelMap(datasetIds: Seq[String])(implicit s: DBSession): Map[String, Int] = {
if (datasetIds.nonEmpty) {
val o = persistence.Ownership.syntax("o")
withSQL {
select(o.result.datasetId, o.result.accessLevel)
.from(persistence.Ownership as o)
.where
.inUuid(o.datasetId, datasetIds)
.and
.eq(o.groupId, sqls.uuid(AppConf.guestGroupId))
.and
.isNull(o.deletedAt)
}.map { rs =>
(
rs.string(o.resultName.datasetId),
rs.int(o.resultName.accessLevel)
)
}.list.apply().toMap
} else {
Map.empty
}
}
/**
* データセットのアイコン画像のMapを作成する。
*
* @param datasetIds データセットIDのリスト
* @param s DBセッション
* @return データセットアイコン画像のMap
*/
private def getImageIdMap(datasetIds: Seq[String])(implicit s: DBSession): Map[String, String] = {
if (datasetIds.nonEmpty) {
val di = persistence.DatasetImage.syntax("di")
withSQL {
select(di.result.datasetId, di.result.imageId)
.from(persistence.DatasetImage as di)
.where
.inUuid(di.datasetId, datasetIds)
.and
.eq(di.isPrimary, true)
.and
.isNull(di.deletedAt)
}.map { rs =>
(
rs.string(di.resultName.datasetId),
rs.string(di.resultName.imageId)
)
}.list.apply().toMap
} else {
Map.empty
}
}
/**
* データセットのFeatured画像のMapを作成する。
*
* @param datasetIds データセットIDのリスト
* @param s DBセッション
* @return データセットFeatured画像のMap
*/
private def getFeaturedImageIdMap(datasetIds: Seq[String])(implicit s: DBSession): Map[String, String] = {
if (datasetIds.nonEmpty) {
val di = persistence.DatasetImage.syntax("di")
withSQL {
select(di.result.datasetId, di.result.imageId)
.from(persistence.DatasetImage as di)
.where
.inUuid(di.datasetId, datasetIds)
.and
.eq(di.isFeatured, true)
.and
.isNull(di.deletedAt)
}.map { rs =>
(
rs.string(di.resultName.datasetId),
rs.string(di.resultName.imageId)
)
}.list.apply().toMap
} else {
Map.empty
}
}
/**
* データセットのOwner/Providerのアクセス権のMapを作成する。
*
* @param datasetIds データセットIDのリスト
* @param s DBセッション
* @return データセットのOwner/Providerのアクセス権のMap
*/
private def getOwnerMap(
datasetIds: Seq[String]
)(implicit s: DBSession): Map[String, Seq[DatasetData.DatasetOwnership]] = {
if (datasetIds.nonEmpty) {
val o = persistence.Ownership.o
val g = persistence.Group.g
val m = persistence.Member.m
val u = persistence.User.u
val gi = persistence.GroupImage.syntax("gi")
val i = persistence.Image.i
withSQL {
select.apply(o.result.datasetId, g.result.id, g.result.groupType,
u.result.id, u.result.column("name"), u.result.fullname, g.result.column("name"))
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g).on(o.groupId, g.id)
.leftJoin(persistence.Member as m).on(sqls.eq(m.groupId, g.id).and.eq(g.groupType, 1))
.leftJoin(persistence.User as u).on(m.userId, u.id)
.where
.inUuid(o.datasetId, datasetIds)
.and
.append(sqls"case ${g.groupType}")
.append(sqls"when 0 then").append(sqls.eq(o.accessLevel, persistence.GroupAccessLevel.Provider))
.append(sqls"when 1 then").append(sqls.joinWithAnd(
sqls.eq(o.accessLevel, persistence.UserAccessLevel.Owner),
sqls.isNotNull(u.id),
sqls.isNull(m.deletedAt),
sqls.eq(u.disabled, false)
))
.append(sqls"end")
}.map(rs => {
val datasetId = rs.string(o.resultName.datasetId)
if (rs.int(g.resultName.groupType) == GroupType.Personal) {
(datasetId, DatasetData.DatasetOwnership(
id = rs.string(u.resultName.id),
name = rs.string(u.resultName.name),
fullname = rs.string(u.resultName.fullname),
image = "",
accessLevel = UserAccessLevel.Owner,
ownerType = OwnerType.User,
description = "",
title = "",
organization = ""
))
} else {
(datasetId, DatasetData.DatasetOwnership(
id = rs.string(g.resultName.id),
name = rs.string(g.resultName.name),
fullname = "",
image = "",
accessLevel = GroupAccessLevel.Provider,
ownerType = OwnerType.Group,
description = "",
title = "",
organization = ""
))
}
}).list.apply().groupBy(_._1).mapValues(_.map(_._2))
} else {
Map.empty
}
}
/**
* 指定されたデータセットの属性を取得する。
*
* @param datasetIds データセットID
* @return データセットが持つ属性
*/
private def getAttributeMap(
datasetIds: Seq[String]
)(implicit s: DBSession): Map[String, Seq[DatasetData.DatasetAttribute]] = {
if (datasetIds.isEmpty) {
return Map.empty
}
val da = persistence.DatasetAnnotation.da
val a = persistence.Annotation.a
withSQL {
select(da.result.*, a.result.*)
.from(persistence.DatasetAnnotation as da)
.innerJoin(persistence.Annotation as a)
.on(sqls.eq(a.id, da.annotationId).and.isNull(a.deletedAt))
.where
.inUuid(da.datasetId, datasetIds)
.and
.isNull(da.deletedAt)
}.map { rs =>
val datasetAnnotaion = persistence.DatasetAnnotation(da.resultName)(rs)
val annotation = persistence.Annotation(a.resultName)(rs)
val attribute = DatasetData.DatasetAttribute(
name = annotation.name,
value = datasetAnnotaion.data
)
(datasetAnnotaion.datasetId, attribute)
}.list.apply().groupBy(_._1).mapValues(_.map(_._2))
}
/**
* 指定したデータセットの存在をチェックします。
* @param datasetId データセットID
* @param session DBセッション
* @return 取得したデータセット情報
* @throws NotFoundException データセットが存在しなかった場合
*/
private def checkDatasetExisitence(datasetId: String)(implicit session: DBSession): persistence.Dataset = {
getDataset(datasetId).getOrElse {
// データセットが存在しない場合例外
throw new NotFoundException
}
}
/**
* ファイルの存在を確認した後、指定されたデータセットを取得します。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param user ユーザ情報
* @return データセット
* @throws NotFoundException データセットまたはファイルが存在しない場合
*/
private def checkDatasetWithFile(datasetId: String, fileId: String)(implicit s: DBSession): persistence.Dataset = {
val dataset = checkDatasetExisitence(datasetId)
if (!existsFile(datasetId, fileId)) {
throw new NotFoundException
}
dataset
}
/**
* 指定されたデータセットに対する管理権限があるかをチェックします。
*
* @param datasetId データセットID
* @param user ユーザ情報
* @throws AccessDeniedException ユーザに管理権限がない場合
*/
private def checkOwnerAccess(datasetId: String, user: User)(implicit s: DBSession): Unit = {
if (!isOwner(user.id, datasetId)) {
throw new AccessDeniedException(resource.getString(ResourceNames.ONLY_ALLOW_DATASET_OWNER), Some(user))
}
}
/**
* データセットの参照権限のチェックを行う。
*
* @param datasetId データセットID
* @param user ユーザ情報
* @return ロールの値
* @throws AccessDeniedException 権限に該当しなかった場合
* @throws NullPointerException 引数がnullの場合
*/
def checkReadPermission(datasetId: String, user: User)(implicit session: DBSession): Int = {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(user, "user")
CheckUtil.checkNull(session, "session")
val groups = getJoinedGroups(user)
val permission = getPermission(datasetId, groups)
// FIXME チェック時、user権限はUserAccessLevelクラス, groupの場合はGroupAccessLevelクラスの定数を使用する
// (UserAndGroupAccessLevel.DENY 定数を削除する)
if (permission == UserAndGroupAccessLevel.DENY) {
throw new AccessDeniedException(resource.getString(ResourceNames.NO_ACCESS_PERMISSION), Some(user))
}
permission
}
/**
* 指定したデータセットの詳細情報を取得します。
*
* @param id データセットID
* @param user ユーザ情報
* @return
* Success(DatasetData.Dataset) 取得成功時、データセット情報
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
*/
def get(id: String, user: User): Try[DatasetData.Dataset] = {
Try {
CheckUtil.checkNull(id, "id")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
val dataset = checkDatasetExisitence(id)
val permission = checkReadPermission(id, user)
val guestAccessLevel = getGuestAccessLevel(id)
val owners = getAllOwnerships(id, user)
val attributes = getAttributes(id)
val images = getImages(id)
val primaryImage = getPrimaryImageId(id).getOrElse(AppConf.defaultDatasetImageId)
val featuredImage = getFeaturedImageId(id).getOrElse(AppConf.defaultFeaturedImageIds(0))
val count = getAccessCount(id)
val dsApp = getApp(id)
val daAppUrl = getAppUrl(id, user)
DatasetData.Dataset(
id = dataset.id,
files = Seq.empty,
filesCount = dataset.filesCount,
filesSize = dataset.filesSize,
meta = DatasetData.DatasetMetaData(
name = dataset.name,
description = dataset.description,
license = dataset.licenseId,
attributes = attributes
),
images = images,
primaryImage = primaryImage,
featuredImage = featuredImage,
ownerships = if (user.isGuest) { Seq.empty } else { owners },
defaultAccessLevel = guestAccessLevel,
permission = permission,
accessCount = count,
localState = dataset.localState,
s3State = dataset.s3State,
fileLimit = AppConf.fileLimit,
app = dsApp,
appUrl = daAppUrl.getOrElse("")
)
}
}
}
/**
* 指定したデータセットにファイルを追加します。
*
* @param id データセットID
* @param files ファイルリスト
* @param user ユーザ情報
* @return
* Success(DatasetData.DatasetAddFiles) 追加成功時、追加したファイルデータオブジェクト
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権権がない場合
*/
def addFiles(id: String, files: Seq[FileItem], user: User): Try[DatasetData.DatasetAddFiles] = {
Try {
CheckUtil.checkNull(id, "id")
CheckUtil.checkNull(files, "files")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
val dataset = checkDatasetExisitence(id)
checkOwnerAccess(id, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
val f = files.map { f =>
// 拡張子を含み、大文字小文字を区別しない
val isZip = f.getName.toLowerCase.endsWith(".zip")
val fileId = UUID.randomUUID.toString
val historyId = UUID.randomUUID.toString
FileManager.uploadToLocal(id, fileId, historyId, f)
val path = Paths.get(AppConf.fileDir, id, fileId, historyId)
val file = persistence.File.create(
id = fileId,
datasetId = id,
historyId = historyId,
name = f.name,
description = "",
fileType = 0,
fileMime = "application/octet-stream",
fileSize = f.size,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp,
localState = dataset.localState match {
case SaveStatus.SAVED => SaveStatus.SAVED
case SaveStatus.SYNCHRONIZING => SaveStatus.SAVED
case _ => SaveStatus.DELETING
},
s3State = dataset.s3State match {
case SaveStatus.NOT_SAVED => SaveStatus.NOT_SAVED
case SaveStatus.DELETING => SaveStatus.DELETING
case _ => SaveStatus.SYNCHRONIZING
}
)
val realSize = if (isZip) {
createZipedFiles(path, historyId, timestamp, myself).getOrElse {
// 新規採番されたファイルヒストリIDに紐づくエラーIDを、新規登録する
persistence.FileHistoryError.create(
id = UUID.randomUUID().toString,
historyId = historyId,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
// 展開できないZIPファイルのため、サイズはそのままとする
f.size
}
} else {
f.size
}
val history = persistence.FileHistory.create(
id = historyId,
fileId = file.id,
fileType = 0,
fileMime = "application/octet-stream",
filePath = "/" + id + "/" + file.id + "/" + historyId,
fileSize = f.size,
isZip = isZip,
realSize = realSize,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
(file, history)
}
if (dataset.s3State == SaveStatus.SAVED || dataset.s3State == SaveStatus.SYNCHRONIZING) {
createTask(id, MoveToStatus.S3, myself.id, timestamp, dataset.localState == SaveStatus.SAVED)
}
// datasetsのfiles_size, files_countの更新
updateDatasetFileStatus(id, myself.id, timestamp)
DatasetData.DatasetAddFiles(
files = f.map { x =>
DatasetData.DatasetFile(
id = x._1.id,
name = x._1.name,
description = x._1.description,
size = Some(x._2.fileSize),
url = Some(AppConf.fileDownloadRoot + id + "/" + x._1.id),
createdBy = Some(user),
createdAt = timestamp.toString(),
updatedBy = Some(user),
updatedAt = timestamp.toString(),
isZip = x._2.isZip,
zipedFiles = Seq.empty,
zipCount = if (x._2.isZip) {
getZippedFileAmounts(Seq(x._2.id)).headOption.map(x => x._2).getOrElse(0)
} else {
0
}
)
}
)
}
}
}
/**
* 指定したファイルを更新します。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param file 更新するファイル
* @param user ユーザ情報
* @return
* Success(DatasetData.DatasetFile) 更新成功時、更新したファイルデータオブジェクト
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、ファイルが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def updateFile(datasetId: String, fileId: String, file: FileItem, user: User): Try[DatasetData.DatasetFile] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(fileId, "fileId")
CheckUtil.checkNull(file, "file")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
val dataset = checkDatasetWithFile(datasetId, fileId)
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
val historyId = UUID.randomUUID.toString
updateFileNameAndSize(
fileId = fileId,
historyId = historyId,
file = file,
userId = myself.id,
timestamp = timestamp,
s3State = dataset.s3State match {
case SaveStatus.NOT_SAVED => SaveStatus.NOT_SAVED
case SaveStatus.DELETING => SaveStatus.DELETING
case _ => SaveStatus.SYNCHRONIZING
},
localState = dataset.localState match {
case SaveStatus.SAVED => SaveStatus.SAVED
case SaveStatus.SYNCHRONIZING => SaveStatus.SAVED
case _ => SaveStatus.DELETING
}
)
// 拡張子を含み、大文字小文字を区別しない
val isZip = file.getName.toLowerCase.endsWith(".zip")
FileManager.uploadToLocal(datasetId, fileId, historyId, file)
val path = Paths.get(AppConf.fileDir, datasetId, fileId, historyId)
val realSize = if (isZip) {
createZipedFiles(path, historyId, timestamp, myself).getOrElse {
// 直前のファイルヒストリIDに紐づくエラーIDを取得する
val prevErrorId = withSQL {
val fh = persistence.FileHistory.fh
val fhe = persistence.FileHistoryError.fhe
select(fhe.id)
.from(persistence.FileHistory as fh)
.innerJoin(persistence.FileHistoryError as fhe)
.on(sqls.eq(fhe.historyId, fh.id)
.and.isNull(fhe.deletedAt))
.where.eqUuid(fh.fileId, fileId)
.and.isNull(fh.deletedAt)
.orderBy(fh.updatedAt.desc, fhe.updatedAt.desc)
.limit(1)
}.map(_.string(1)).single().apply()
if (prevErrorId.isDefined) {
// 直前のファイルヒストリIDに紐づくエラーIDがあるため、論理削除する
withSQL {
val fhe = persistence.FileHistoryError.column
update(persistence.FileHistoryError)
.set(
fhe.deletedBy -> sqls.uuid(myself.id),
fhe.deletedAt -> timestamp
)
.where
.eq(fhe.id, sqls.uuid(prevErrorId.get))
}.update.apply()
}
// 新規採番されたファイルヒストリIDに紐づくエラーIDを、新規登録する
persistence.FileHistoryError.create(
id = UUID.randomUUID().toString,
historyId = historyId,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
// 展開できないZIPファイルのため、サイズはそのままとする
file.size
}
} else {
file.size
}
val history = persistence.FileHistory.create(
id = historyId,
fileId = fileId,
fileType = 0,
fileMime = "application/octet-stream",
filePath = "/" + datasetId + "/" + fileId + "/" + historyId,
fileSize = file.size,
isZip = isZip,
realSize = realSize,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
FileManager.uploadToLocal(datasetId, fileId, history.id, file)
if (dataset.s3State == SaveStatus.SAVED || dataset.s3State == SaveStatus.SYNCHRONIZING) {
createTask(
datasetId,
MoveToStatus.S3,
myself.id,
timestamp,
dataset.localState == SaveStatus.SAVED
)
// S3に上がっている場合は、アップロードが完了するまで、ローカルからダウンロードしなければならない
withSQL {
val d = persistence.Dataset.column
update(persistence.Dataset)
.set(
d.localState -> SaveStatus.DELETING,
d.s3State -> SaveStatus.SYNCHRONIZING
)
.where
.eq(d.id, sqls.uuid(datasetId))
}.update.apply()
}
// datasetsのfiles_size, files_countの更新
updateDatasetFileStatus(datasetId, myself.id, timestamp)
getFiles(datasetId, Seq(fileId), UserAndGroupAccessLevel.ALLOW_DOWNLOAD).head
}
}
}
/**
* ファイルの名前、サイズ、保存先を変更する。
*
* @param fileId ファイルID
* @param historyId ファイル履歴ID
* @param file 更新に使用するファイル
* @param userId 更新者のユーザID
* @param timestamp タイムスタンプ
* @param s3State S3保存状態
* @param localState ローカル保存状態
* @param s DBセッション
* @return 更新件数
*/
private def updateFileNameAndSize(
fileId: String,
historyId: String,
file: FileItem,
userId: String,
timestamp: DateTime,
s3State: Int,
localState: Int
)(implicit s: DBSession): Int = {
withSQL {
val f = persistence.File.column
update(persistence.File)
.set(
f.name -> file.getName,
f.fileSize -> file.getSize,
f.updatedBy -> sqls.uuid(userId),
f.updatedAt -> timestamp,
f.historyId -> sqls.uuid(historyId),
f.s3State -> s3State,
f.localState -> localState
)
.where
.eq(f.id, sqls.uuid(fileId))
}.update.apply()
}
/**
* 指定したファイルのメタデータを更新します。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param filename ファイル名
* @param description 説明
* @param user ログインユーザ情報
* @retur
* Success(DatasetData.DatasetFile) 更新成功時、更新したファイルデータオブジェクト
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、ファイルが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def updateFileMetadata(
datasetId: String,
fileId: String,
filename: String,
description: String,
user: User
): Try[DatasetData.DatasetFile] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(fileId, "fileId")
CheckUtil.checkNull(filename, "filename")
CheckUtil.checkNull(description, "description")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetWithFile(datasetId, fileId)
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
updateFileNameAndDescription(fileId, datasetId, filename, description, myself.id, timestamp)
getFiles(datasetId, Seq(fileId), UserAndGroupAccessLevel.ALLOW_DOWNLOAD).head
}
}
}
/**
* ファイルの名前、説明を変更する。
*
* @param fileId ファイルID
* @param datasetId データセットID
* @param fileName ファイル名
* @param description 説明
* @param userId 更新者のユーザID
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def updateFileNameAndDescription(
fileId: String,
datasetId: String,
fileName: String,
description: String,
userId: String,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val f = persistence.File.column
update(persistence.File)
.set(
f.name -> fileName,
f.description -> description,
f.updatedBy -> sqls.uuid(userId),
f.updatedAt -> timestamp
)
.where
.eq(f.id, sqls.uuid(fileId))
.and
.eq(f.datasetId, sqls.uuid(datasetId))
}.update.apply()
}
/**
* 指定したファイルを削除します。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param user ログインユーザ情報
* @return
* Success(Unit) 削除成功時
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、ファイルが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def deleteDatasetFile(datasetId: String, fileId: String, user: User): Try[Unit] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(fileId, "fileId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetWithFile(datasetId, fileId)
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
deleteFile(datasetId, fileId, myself.id, timestamp)
// datasetsのfiles_size, files_countの更新
updateDatasetFileStatus(datasetId, myself.id, timestamp)
}
}
}
/**
* ファイルを論理削除する。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param userId 更新者のユーザID
* @param timestamp タイムスタンプ
* @param s DBセッション
*/
private def deleteFile(
datasetId: String,
fileId: String,
userId: String,
timestamp: DateTime
)(implicit s: DBSession): Unit = {
withSQL {
val f = persistence.File.column
update(persistence.File)
.set(
f.deletedBy -> sqls.uuid(userId),
f.deletedAt -> timestamp,
f.updatedBy -> sqls.uuid(userId),
f.updatedAt -> timestamp
)
.where
.eq(f.id, sqls.uuid(fileId))
.and
.eq(f.datasetId, sqls.uuid(datasetId))
.and
.isNull(f.deletedAt)
}.update.apply()
}
/**
* データセットの保存先を変更する
*
* @param id データセットID
* @param saveLocal ローカルに保存するか否か
* @param saveS3 S3に保存するか否か
* @param user ユーザオブジェクト
* @return
* Success(DatasetTask) 変更成功時、保存先変更タスク情報
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、ファイルが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def modifyDatasetStorage(id: String, saveLocal: Boolean, saveS3: Boolean, user: User): Try[DatasetTask] = {
Try {
CheckUtil.checkNull(id, "id")
CheckUtil.checkNull(saveLocal, "saveLocal")
CheckUtil.checkNull(saveS3, "saveS3")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
val dataset = checkDatasetExisitence(id)
checkOwnerAccess(id, user)
val taskId = (saveLocal, saveS3, dataset.localState, dataset.s3State) match {
case (true, _, SaveStatus.NOT_SAVED, SaveStatus.SAVED)
| (true, _, SaveStatus.NOT_SAVED, SaveStatus.SYNCHRONIZING) => {
// S3 to local
updateDatasetStorage(
dataset,
myself.id,
timestamp,
SaveStatus.SYNCHRONIZING,
if (saveS3) { SaveStatus.SAVED } else { SaveStatus.DELETING }
)
createTask(id, MoveToStatus.LOCAL, myself.id, timestamp, saveS3)
}
case (_, true, SaveStatus.SAVED, SaveStatus.NOT_SAVED)
| (_, true, SaveStatus.SYNCHRONIZING, SaveStatus.NOT_SAVED) => {
// local to S3
updateDatasetStorage(
dataset,
myself.id,
timestamp,
if (saveLocal) { SaveStatus.SAVED } else { SaveStatus.DELETING },
SaveStatus.SYNCHRONIZING
)
createTask(id, MoveToStatus.S3, myself.id, timestamp, saveLocal)
}
case _ => {
def savedOrSynchronizing(state: Int): Boolean = {
state match {
case SaveStatus.SAVED => true
case SaveStatus.SYNCHRONIZING => true
case _ => false
}
}
if (saveLocal != saveS3
&& savedOrSynchronizing(dataset.localState)
&& savedOrSynchronizing(dataset.s3State)) {
// local, S3のいずれか削除
updateDatasetStorage(
dataset,
myself.id,
timestamp,
if (saveLocal) { SaveStatus.SAVED } else { SaveStatus.DELETING },
if (saveS3) { SaveStatus.SAVED } else { SaveStatus.DELETING }
)
val moveToStatus = if (saveS3) { MoveToStatus.S3 } else { MoveToStatus.LOCAL }
createTask(id, moveToStatus, myself.id, timestamp, false)
} else {
// no taskId
"0"
}
}
}
DatasetTask(taskId)
}
}
}
/**
* データセットの保存状態を更新する。
*
* @param ds データセットオブジェクト
* @param userId 更新者のユーザID
* @param timestamp タイムスタンプ
* @param localState ローカル保存状態
* @param s3State S3保存状態
* @return 更新後のデータセットオブジェクト
*/
private def updateDatasetStorage(
ds: Dataset,
userId: String,
timestamp: DateTime,
localState: Int,
s3State: Int
)(implicit s: DBSession): persistence.Dataset = {
persistence.Dataset(
id = ds.id,
name = ds.name,
description = ds.description,
licenseId = ds.licenseId,
filesCount = ds.filesCount,
filesSize = ds.filesSize,
createdBy = ds.createdBy,
createdAt = ds.createdAt,
updatedBy = userId,
updatedAt = timestamp,
localState = localState,
s3State = s3State
).save()
}
/**
* 指定したデータセットのメタデータを更新します。
*
* @param id データセットID
* @param name データセットの名前
* @param description データセットの説明
* @param license データセットのライセンス
* @param attributes データセットの属性一覧
* @param user ユーザ情報
* @return
* Success(DatasetData.DatasetMetaData) 更新成功時、更新後のデータセットのメタデータ
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、ファイルが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
* Failure(BadRequestException) ライセンスIDが不正な場合
*/
def modifyDatasetMeta(
id: String,
name: String,
description: Option[String],
license: String,
attributes: Seq[DataSetAttribute],
user: User
): Try[DatasetData.DatasetMetaData] = {
Try {
CheckUtil.checkNull(id, "id")
CheckUtil.checkNull(name, "name")
CheckUtil.checkNull(description, "description")
CheckUtil.checkNull(license, "license")
CheckUtil.checkNull(attributes, "attributes")
CheckUtil.checkNull(user, "user")
val checkedDescription = description.getOrElse("")
val trimmedAttributes = attributes.map(x => x.name -> StringUtil.trimAllSpaces(x.value))
DB.localTx { implicit s =>
checkDatasetExisitence(id)
checkOwnerAccess(id, user)
if (persistence.License.find(license).isEmpty) {
val message = resource.getString(ResourceNames.INVALID_LICENSEID).format(license)
throw new BadRequestException(message)
}
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
updateDatasetDetail(id, name, checkedDescription, license, myself.id, timestamp)
// 先に指定datasetに関連するannotation(name)を取得(あとで差分チェックするため)
val oldAnnotations = getAnnotationsRelatedByDataset(id)
// 既存DatasetAnnotation全削除
deleteDatasetAnnotation(id)
// annotation(name)が既存のものかチェック なければ作る
val annotationMap = getAvailableAnnotations.toMap
trimmedAttributes.foreach { x =>
if (x._1.length != 0) {
val annotationId = if (annotationMap.keySet.contains(x._1.toLowerCase)) {
annotationMap(x._1.toLowerCase)
} else {
val annotationId = UUID.randomUUID().toString
persistence.Annotation.create(
id = annotationId,
name = x._1,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
annotationId
}
// DatasetAnnotation再作成
persistence.DatasetAnnotation.create(
id = UUID.randomUUID().toString,
datasetId = id,
annotationId = annotationId,
data = x._2,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
}
}
// データ追加前のnameが他で使われているかチェック 使われていなければ削除
oldAnnotations.foreach { x =>
if (!trimmedAttributes.map(_._1.toLowerCase).contains(x._1)) {
val datasetAnnotations = getDatasetAnnotations(x._2)
if (datasetAnnotations.size == 0) {
deleteAnnotation(x._2)
}
}
}
}
DatasetData.DatasetMetaData(
name = name,
description = checkedDescription,
license = license,
attributes = trimmedAttributes.map {
case (name, value) => DatasetData.DatasetAttribute(name, value)
}
)
}
}
/**
* 未削除のDatasetAnnotationのID一覧を取得する。
*
* @param id アノテーションID
* @param s DBセッション
* @return 取得結果
*/
private def getDatasetAnnotations(id: String)(implicit s: DBSession): Seq[String] = {
val da = persistence.DatasetAnnotation.da
withSQL {
select(da.result.id)
.from(persistence.DatasetAnnotation as da)
.where
.eq(da.annotationId, sqls.uuid(id))
.and
.isNull(da.deletedAt)
}.map(rs => rs.string(da.resultName.id)).list.apply()
}
/**
* 未削除のAnnotationのID・名前一覧を取得する。
*
* @param s DBセッション
* @return 取得結果
*/
private def getAvailableAnnotations(implicit s: DBSession): Seq[(String, String)] = {
val a = persistence.Annotation.a
withSQL {
select(a.result.*)
.from(persistence.Annotation as a)
.where
.isNull(a.deletedAt)
}.map { rs =>
(
rs.string(a.resultName.name).toLowerCase,
rs.string(a.resultName.id)
)
}.list.apply()
}
/**
* データセットに関連づいた未削除のAnnotationのID・名前一覧を取得する。
*
* @param id データセットID
* @param s DBセッション
* @return 取得結果
*/
private def getAnnotationsRelatedByDataset(id: String)(implicit s: DBSession): Seq[(String, String)] = {
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.da
withSQL {
select(a.result.*)
.from(persistence.Annotation as a)
.innerJoin(persistence.DatasetAnnotation as da)
.on(sqls.eq(da.annotationId, a.id).and.isNull(da.deletedAt))
.where
.eq(da.datasetId, sqls.uuid(id))
.and
.isNull(a.deletedAt)
}.map(rs => (rs.string(a.resultName.name).toLowerCase, rs.string(a.resultName.id))).list.apply()
}
/**
* Annotationを物理削除する。
*
* @param id アノテーションID
* @param s DBセッション
* @return 削除件数
*/
private def deleteAnnotation(id: String)(implicit s: DBSession): Int = {
withSQL {
val a = persistence.Annotation.a
delete.from(persistence.Annotation as a)
.where
.eq(a.id, sqls.uuid(id))
}.update.apply()
}
/**
* DatasetAnnotationを物理削除する。
*
* @param id データセットID
* @param s DBセッション
* @return 削除件数
*/
private def deleteDatasetAnnotation(id: String)(implicit s: DBSession): Int = {
val da = persistence.DatasetAnnotation.da
withSQL {
delete.from(persistence.DatasetAnnotation as da)
.where
.eq(da.datasetId, sqls.uuid(id))
}.update.apply()
}
/**
* データセットの詳細を更新する。
*
* @param id データセットID
* @param name データセット名
* @param description 説明
* @param licenseId ライセンスID
* @param userId 更新者のユーザID
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def updateDatasetDetail(
id: String,
name: String,
description: String,
licenseId: String,
userId: String,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val d = persistence.Dataset.column
update(persistence.Dataset)
.set(
d.name -> name,
d.description -> description,
d.licenseId -> sqls.uuid(licenseId),
d.updatedBy -> sqls.uuid(userId),
d.updatedAt -> timestamp
)
.where
.eq(d.id, sqls.uuid(id))
}.update.apply()
}
/**
* 指定したデータセットに画像を追加します。
*
* @param datasetId データセットID
* @param images 追加する画像の一覧
* @param user ユーザ情報
* @return
* Success(DatasetData.DatasetAddImages) 追加成功時、追加した画像オブジェクト
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def addImages(datasetId: String, images: Seq[FileItem], user: User): Try[DatasetData.DatasetAddImages] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(images, "images")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
val primaryImage = getPrimaryImageId(datasetId)
var isFirst = true
val addedImages = images.map(i => {
val imageId = UUID.randomUUID().toString
val bufferedImage = javax.imageio.ImageIO.read(i.getInputStream)
val image = persistence.Image.create(
id = imageId,
name = i.getName,
width = bufferedImage.getWidth,
height = bufferedImage.getWidth,
filePath = "/" + ImageSaveLogic.uploadPath + "/" + imageId,
presetType = PresetType.Default,
createdBy = myself.id,
createdAt = DateTime.now,
updatedBy = myself.id,
updatedAt = DateTime.now
)
val datasetImage = persistence.DatasetImage.create(
id = UUID.randomUUID.toString,
datasetId = datasetId,
imageId = imageId,
isPrimary = isFirst && primaryImage.isEmpty,
isFeatured = false,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
isFirst = false
// write image
ImageSaveLogic.writeImageFile(imageId, i)
(image, datasetImage.isPrimary)
})
DatasetData.DatasetAddImages(
images = addedImages.map {
case (image, isPrimary) =>
DatasetData.DatasetGetImage(
id = image.id,
name = image.name,
url = datasetImageDownloadRoot + datasetId + "/" + image.id,
isPrimary = isPrimary
)
},
primaryImage = getPrimaryImageId(datasetId).getOrElse("")
)
}
}
}
/**
* 指定したデータセットのプライマリ画像を変更します。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param user ログインユーザ情報
* @return
* Success(DatasetData.ChangeDatasetImage) 変更成功時、変更後の画像ID
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、または画像が見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def changePrimaryImage(datasetId: String, imageId: String, user: User): Try[DatasetData.ChangeDatasetImage] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(imageId, "imageId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
if (!existsImage(datasetId, imageId)) {
throw new NotFoundException
}
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
// 対象のイメージをPrimaryに変更
turnImageToPrimary(datasetId, imageId, myself, timestamp)
// 対象以外のイメージをPrimary以外に変更
turnOffPrimaryOtherImage(datasetId, imageId, myself, timestamp)
DatasetData.ChangeDatasetImage(imageId)
}
}
}
/**
* データセットのアイコン画像を解除する。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def turnOffPrimaryOtherImage(
datasetId: String,
imageId: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val di = persistence.DatasetImage.column
update(persistence.DatasetImage)
.set(di.isPrimary -> false, di.updatedBy -> sqls.uuid(myself.id), di.updatedAt -> timestamp)
.where
.ne(di.imageId, sqls.uuid(imageId))
.and
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.isNull(di.deletedAt)
}.update.apply()
}
/**
* データセットのアイコン画像を指定する。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def turnImageToPrimary(
datasetId: String,
imageId: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val di = persistence.DatasetImage.column
update(persistence.DatasetImage)
.set(di.isPrimary -> true, di.updatedBy -> sqls.uuid(myself.id), di.updatedAt -> timestamp)
.where
.eq(di.imageId, sqls.uuid(imageId))
.and
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.isNull(di.deletedAt)
}.update.apply()
}
/**
* 指定したデータセットの画像を削除します。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param user ログインユーザ情報
* @return
* Success(DatasetData.DatasetDeleteImage) 削除成功時、削除した画像情報
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、画像が見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
* Failure(BadRequestException) デフォルト画像を削除する場合
*/
def deleteImage(datasetId: String, imageId: String, user: User): Try[DatasetData.DatasetDeleteImage] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(imageId, "imageId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
if (!existsImage(datasetId, imageId)) {
throw new NotFoundException
}
checkOwnerAccess(datasetId, user)
val cantDeleteImages = Seq(AppConf.defaultDatasetImageId) ++ AppConf.defaultFeaturedImageIds
if (cantDeleteImages.contains(imageId)) {
throw new BadRequestException(resource.getString(ResourceNames.CANT_DELETE_DEFAULTIMAGE))
}
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
deleteDatasetImage(datasetId, imageId, myself, timestamp)
val primaryImageId = getPrimaryImageId(datasetId).getOrElse({
// primaryImageの差し替え
// primaryImageとなるImageを取得
val primaryImage = findNextImage(datasetId)
primaryImage match {
case Some(x) =>
turnImageToPrimaryById(x._1, myself, timestamp)
x._2
case None => ""
}
})
val featuredImageId = getFeaturedImageId(datasetId).getOrElse {
val featuredImage = findNextImage(datasetId)
featuredImage match {
case Some(x) =>
turnImageToFeaturedById(x._1, myself, timestamp)
x._2
case None => ""
}
}
DatasetData.DatasetDeleteImage(
primaryImage = primaryImageId,
featuredImage = featuredImageId
)
}
}
}
/**
* データセットのアイコン画像を指定する。
*
* @param id DatasetImageID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def turnImageToPrimaryById(
id: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
val di = persistence.DatasetImage.column
withSQL {
update(persistence.DatasetImage)
.set(di.isPrimary -> true, di.updatedBy -> sqls.uuid(myself.id), di.updatedAt -> timestamp)
.where
.eq(di.id, sqls.uuid(id))
}.update.apply()
}
/**
* データセットのFeatured画像を指定する。
*
* @param id DatasetImageID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def turnImageToFeaturedById(
id: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
val di = persistence.DatasetImage.column
withSQL {
update(persistence.DatasetImage)
.set(di.isFeatured -> true, di.updatedBy -> sqls.uuid(myself.id), di.updatedAt -> timestamp)
.where
.eq(di.id, sqls.uuid(id))
}.update.apply()
}
/**
* データセットに関連づいているDatasetImage、ImageのIDを一件取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return 取得結果
*/
private def findNextImage(datasetId: String)(implicit s: DBSession): Option[(String, String)] = {
val di = persistence.DatasetImage.di
val i = persistence.Image.i
withSQL {
select(di.result.id, i.result.id)
.from(persistence.Image as i)
.innerJoin(persistence.DatasetImage as di).on(i.id, di.imageId)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.isNull(di.deletedAt)
.and
.isNull(i.deletedAt)
.orderBy(di.createdAt).asc
.limit(1)
}.map(rs => (rs.string(di.resultName.id), rs.string(i.resultName.id))).single.apply()
}
/**
* DatasetImageを論理削除する。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def deleteDatasetImage(
datasetId: String,
imageId: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val di = persistence.DatasetImage.column
update(persistence.DatasetImage)
.set(
di.deletedBy -> sqls.uuid(myself.id),
di.deletedAt -> timestamp,
di.isPrimary -> false,
di.updatedBy -> sqls.uuid(myself.id),
di.updatedAt -> timestamp
)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.eq(di.imageId, sqls.uuid(imageId))
.and
.isNull(di.deletedAt)
}.update.apply()
}
/**
* 指定したデータセットのアクセスコントロールを設定します。
*
* @param datasetId データセットID
* @param acl アクセスコントロール変更オブジェクトのリスト
* @param user ユーザオブジェクト
* @return
* Success(Seq[DatasetData.DatasetOwnership]) 設定成功時、変更されたアクセスコントロールのリスト
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
* Failure(BadRequestException) 更新結果でオーナーがいなくなる場合
* Failure(BadRequestException) 無効化されたユーザが指定された場合
* Failure(BadRequestException) 存在しないグループが指定された場合
*/
def setAccessControl(
datasetId: String,
acl: Seq[DataSetAccessControlItem],
user: User
): Try[Seq[DatasetData.DatasetOwnership]] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(acl, "acl")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
val ownerChanges = acl.filter { x =>
x.ownerType == OwnerType.User && x.accessLevel == UserAndGroupAccessLevel.OWNER_OR_PROVIDER
}.map(_.id).toSet
val notOwnerChanges = acl.filter { x =>
x.ownerType == OwnerType.User && x.accessLevel != UserAndGroupAccessLevel.OWNER_OR_PROVIDER
}.map(_.id).toSet
// 元々設定されているオーナーのうち、今回オーナー以外に変更されない件数
val ownerCountRemains = getOwners(datasetId).count(x => !notOwnerChanges.contains(x.id))
// 更新後のオーナーの数は元々設定されているオーナーのうち、今回オーナー以外に変更されない件数と、今回オーナーに変更された件数を足したもの
val ownerCountAfterUpdated = ownerCountRemains + ownerChanges.size
if (ownerCountAfterUpdated == 0) {
throw new BadRequestException(resource.getString(ResourceNames.NO_OWNER))
}
acl.map { x =>
x.ownerType match {
case OwnerType.User =>
val groupId = findGroupIdByUserId(x.id).getOrElse {
throw new BadRequestException(resource.getString(ResourceNames.DISABLED_USER))
}
saveOrCreateOwnerships(user, datasetId, groupId, x.accessLevel)
val user_ = persistence.User.find(x.id).get
DatasetData.DatasetOwnership(
id = user_.id,
name = user_.name,
fullname = user_.fullname,
organization = user.organization,
title = user.title,
description = user.description,
image = AppConf.imageDownloadRoot + "user/" + user_.id + "/" + user_.imageId,
accessLevel = x.accessLevel,
ownerType = OwnerType.User
)
case OwnerType.Group =>
val group = persistence.Group.find(x.id).getOrElse {
throw new BadRequestException(resource.getString(ResourceNames.INVALID_GROUP))
}
saveOrCreateOwnerships(user, datasetId, x.id, x.accessLevel)
DatasetData.DatasetOwnership(
id = x.id,
name = group.name,
fullname = "",
organization = "",
title = "",
description = group.description,
image = "",
accessLevel = x.accessLevel,
ownerType = OwnerType.Group
)
}
}
}
}
}
/**
* データセットのオーナー一覧を取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return オーナーのユーザオブジェクトのリスト
* @throws NullPointerException 引数がnullの場合
*/
private def getOwners(datasetId: String)(implicit s: DBSession): Seq[persistence.User] = {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(s, "s")
// Usersテーブルのエイリアス
val u = persistence.User.u
// Membersテーブルのエイリアス
val m = persistence.Member.m
// Groupsテーブルのエイリアス
val g = persistence.Group.g
// Ownershipsテーブルのエイリアス
val o = persistence.Ownership.o
withSQL {
select(u.result.*)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g).on(sqls.eq(g.id, o.groupId).and.eq(g.groupType, GroupType.Personal))
.innerJoin(persistence.Member as m).on(sqls.eq(m.groupId, g.id))
.innerJoin(persistence.User as u).on(sqls.eq(u.id, m.userId))
.where.eq(o.datasetId, sqls.uuid(datasetId))
.and.eq(o.accessLevel, UserAndGroupAccessLevel.OWNER_OR_PROVIDER)
}.map(persistence.User(u.resultName)).list.apply()
}
/**
* ユーザIDからPersonalグループIDを取得する。
*
* @param userId ユーザID
* @param s DBセッション
* @return 取得結果
*/
def findGroupIdByUserId(userId: String)(implicit s: DBSession): Option[String] = {
val u = persistence.User.u
val m = persistence.Member.m
val g = persistence.Group.g
withSQL {
select(g.result.id)
.from(persistence.Group as g)
.innerJoin(persistence.Member as m).on(sqls.eq(g.id, m.groupId).and.isNull(m.deletedAt))
.innerJoin(persistence.User as u).on(sqls.eq(u.id, m.userId).and.eq(u.disabled, false))
.where
.eq(u.id, sqls.uuid(userId))
.and
.eq(g.groupType, GroupType.Personal)
.and
.isNull(g.deletedAt)
.and
.isNull(m.deletedAt)
.limit(1)
}.map(rs => rs.string(g.resultName.id)).single.apply()
}
/**
* 指定したデータセットのゲストアクセスレベルを設定します。
*
* @param datasetId データセットID
* @param accessLevel 設定するゲストアクセスレベル
* @param user ユーザ情報
* @return
* Success(DatasetData.DatasetGuestAccessLevel) 設定成功時、設定したゲストアクセスレベル
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def setGuestAccessLevel(
datasetId: String,
accessLevel: Int,
user: User
): Try[DatasetData.DatasetGuestAccessLevel] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(accessLevel, "accessLevel")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
findGuestOwnership(datasetId) match {
case Some(x) =>
if (accessLevel != x.accessLevel) {
persistence.Ownership(
id = x.id,
datasetId = x.datasetId,
groupId = AppConf.guestGroupId,
accessLevel = accessLevel,
createdBy = x.createdBy,
createdAt = x.createdAt,
updatedBy = user.id,
updatedAt = DateTime.now
).save()
}
case None =>
if (accessLevel > 0) {
val ts = DateTime.now
persistence.Ownership.create(
id = UUID.randomUUID.toString,
datasetId = datasetId,
groupId = AppConf.guestGroupId,
accessLevel = accessLevel,
createdBy = user.id,
createdAt = ts,
updatedBy = user.id,
updatedAt = ts
)
}
}
DatasetData.DatasetGuestAccessLevel(accessLevel)
}
}
}
/**
* ゲストユーザに対するOwnershipを取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return 取得結果
*/
private def findGuestOwnership(datasetId: String)(implicit s: DBSession): Option[persistence.Ownership] = {
val o = persistence.Ownership.o
withSQL(
select(o.result.*)
.from(persistence.Ownership as o)
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.eq(o.groupId, sqls.uuid(AppConf.guestGroupId))
).map(persistence.Ownership(o.resultName)).single.apply()
}
/**
* 指定したデータセットを削除します。
*
* @param datasetId データセットID
* @param user ログインユーザ情報
* @return
* Success(Unit) 削除成功時
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def deleteDataset(datasetId: String, user: User): Try[Unit] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
deleteDatasetById(datasetId, user)
deleteApp(datasetId, user)
}
}
}
/**
* データセットを論理削除する。
*
* @param datasetId データセットID
* @param user ログインユーザ情報
* @param s DBセッション
* @return 更新件数
*/
private def deleteDatasetById(datasetId: String, user: User)(implicit s: DBSession): Int = {
val timestamp = DateTime.now()
val d = persistence.Dataset.column
withSQL {
update(persistence.Dataset)
.set(d.deletedAt -> timestamp, d.deletedBy -> sqls.uuid(user.id))
.where
.eq(d.id, sqls.uuid(datasetId))
}.update.apply()
}
/**
* 指定したユーザがデータセットのオーナーか否かを判定する。
*
* @param userId ユーザID
* @param datasetId データセットID
* @param s DBセッション
* @return オーナーであればtrue、それ以外の場合はfalse
*/
private def isOwner(userId: String, datasetId: String)(implicit s: DBSession): Boolean = {
val o = persistence.Ownership.o
val g = persistence.Group.g
val m = persistence.Member.m
val u = persistence.User.u
val d = persistence.Dataset.d
withSQL {
select(sqls"1")
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g).on(sqls.eq(o.groupId, g.id).and.isNull(g.deletedAt))
.innerJoin(persistence.Member as m).on(sqls.eq(g.id, m.groupId).and.isNull(m.deletedAt))
.innerJoin(persistence.User as u).on(sqls.eq(u.id, m.userId).and.eq(u.disabled, false))
.innerJoin(persistence.Dataset as d).on(sqls.eq(o.datasetId, d.id).and.isNull(d.deletedAt))
.where
.eq(u.id, sqls.uuid(userId))
.and
.eq(d.id, sqls.uuid(datasetId))
.and
.eq(g.groupType, persistence.GroupType.Personal)
.and
.eq(o.accessLevel, persistence.UserAccessLevel.Owner)
.and
.isNull(o.deletedAt)
.limit(1)
}.map(x => true).single.apply().getOrElse(false)
}
/**
* ユーザが所属するグループ(Personal/Public問わず)を取得する。
*
* @param user ログインユーザ情報
* @param s DBセッション
* @return 所属するグループIDのリスト
*/
def getJoinedGroups(user: User)(implicit s: DBSession): Seq[String] = {
if (user.isGuest) {
Seq.empty
} else {
val g = persistence.Group.syntax("g")
val m = persistence.Member.syntax("m")
withSQL {
select(g.id)
.from(persistence.Group as g)
.innerJoin(persistence.Member as m).on(m.groupId, g.id)
.where
.eq(m.userId, sqls.uuid(user.id))
.and
.isNull(g.deletedAt)
.and
.isNull(m.deletedAt)
}.map(_.string("id")).list.apply()
}
}
/**
* ファイル取得結果を表すtrait
*
* @param file ファイルオブジェクト
* @param path ファイルパス
*/
sealed trait FileResult {
val file: persistence.File
val path: String
}
/**
* ファイル取得結果:通常ファイルを表すケースクラス
*
* @param file ファイルオブジェクト
* @param path ファイルパス
*/
case class FileResultNormal(
file: persistence.File,
path: String
) extends FileResult
/**
* ファイル取得結果:Zipファイルを表すケースクラス
*
* @param file ファイルオブジェクト
* @param path ファイルパス
* @param zipFile Zipファイルオブジェクト
*/
case class FileResultZip(
file: persistence.File,
path: String,
zipFile: persistence.ZipedFiles
) extends FileResult
/**
* ファイルを取得する。
*
* @param fileId ファイルID
* @param session DBセッション
* @return 取得結果
*/
def findFile(fileId: String)(implicit session: DBSession): Option[FileResult] = {
val file = for {
file <- persistence.File.find(fileId)
if file.deletedAt.isEmpty
history <- persistence.FileHistory.find(file.historyId)
if history.deletedAt.isEmpty
} yield {
FileResultNormal(file, history.filePath)
}
lazy val zipedFile = for {
zipFile <- persistence.ZipedFiles.find(fileId)
if zipFile.deletedAt.isEmpty
history <- persistence.FileHistory.find(zipFile.historyId)
if history.deletedAt.isEmpty
file <- persistence.File.find(history.fileId)
if file.deletedAt.isEmpty
} yield {
FileResultZip(file, history.filePath, zipFile)
}
file orElse zipedFile
}
/**
* 対象のユーザが対象のデータセットのダウンロード権限を持つかをチェックする。
* @param user ユーザ情報
* @param datasetId データセットID
* @return 権限がない場合、AccessDeniedExceptionをFailureに包んで返却する。
* @throws NullPointerException 引数がnullの場合
*/
def requireAllowDownload(user: User, datasetId: String)(implicit s: DBSession): Try[Unit] = {
CheckUtil.checkNull(user, "user")
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(s, "s")
val permission = if (user.isGuest) {
getGuestAccessLevel(datasetId)
} else {
val groups = getJoinedGroups(user)
// FIXME チェック時、user権限はUserAccessLevelクラス, groupの場合はGroupAccessLevelクラスの定数を使用する
// 旧仕様ではuser/groupは同じ権限を付与していたが、
// 現仕様はuser/groupによって権限の扱いが異なる(groupには編集権限は付与しない)
// 実装時間の都合と現段階の実装でも問題がない(値が同じ)ため対応していない
getPermission(datasetId, groups)
}
if (permission < UserAndGroupAccessLevel.ALLOW_DOWNLOAD) {
return Failure(new AccessDeniedException(resource.getString(ResourceNames.NO_DOWNLOAD_PERMISSION), Some(user)))
}
Success(())
}
/**
* OptionをTryに変換する。
*
* @tparam T オプショナルな値の型
* @param opt オプショナルな値
* @return
* Success(T) オプショナルな値が存在した場合
* Failure(NotFoundException) オプショナルな値が存在しなかった場合
*/
def found[T](opt: Option[T]): Try[T] = {
opt match {
case Some(x) => Success(x)
case None => Failure(new NotFoundException)
}
}
/**
* Zipファイルにパスワードが掛かっているかを判定する。
*
* @param zipedFile Zipファイルオブジェクト
* @return パスワードが掛かっている場合はtrue、それ以外の場合はfalse
*/
def hasPassword(zipedFile: persistence.ZipedFiles): Boolean = {
(zipedFile.cenHeader(8) & 0x01) == 1
}
/**
* ファイルにパスワードが掛かっていないかを確認する。
*
* @param file ファイル取得結果オブジェクト
* @return
* Success(Unit) Zipファイルでない場合
* Success(Unit) パスワードのかかっていないZipファイルの場合
* Failure(NotFoundException) パスワードのかかったZipファイルの場合
*/
def requireNotWithPassword(file: FileResult): Try[Unit] = {
file match {
case FileResultNormal(_, _) => Success(())
case FileResultZip(_, _, zipedFile) => {
if (hasPassword(zipedFile)) {
Failure(new NotFoundException)
} else {
Success(())
}
}
}
}
/**
* ファイルからRange指定したInputStreamを作成する。
*
* @param path ファイルパス
* @param offset Rangeの開始位置
* @param limit Rangeの取得幅
* @return 作成したInputStream
*/
def createRangeInputStream(path: Path, offset: Long, limit: Long): InputStream = {
val is = Files.newInputStream(path)
try {
is.skip(offset)
new BoundedInputStream(is, limit)
} catch {
case e: Exception => {
is.close()
throw e
}
}
}
/**
* InputStreamから解凍済みのInputStreamを作成する。
*
* @param data InputStream
* @param centralHeader Zipセントラルヘッダ
* @param dataSize データサイズ
* @param encoding 解凍するエンコーディング
* @return 解凍済みのInputStream
*/
def createUnzipInputStream(
data: InputStream,
centralHeader: Array[Byte],
dataSize: Long,
encoding: Charset
): InputStream = {
val footer = createFooter(centralHeader, dataSize)
val sis = new SequenceInputStream(data, new ByteArrayInputStream(footer))
val zis = new ZipInputStream(sis, encoding)
zis.getNextEntry
zis
}
/**
* Zipフォーマットのfooterを作成する。
*
* @param centralHeader Zipセントラルヘッダ
* @param dataSize データサイズ
* @return 構築したfooterのByte列
*/
def createFooter(centralHeader: Array[Byte], dataSize: Long): Array[Byte] = {
val centralHeaderSize = centralHeader.length
val zip64EndOfCentralDirectoryRecord = if (dataSize < 0x00000000FFFFFFFFL) {
Array.empty[Byte]
} else {
Array.concat(
Array[Byte](
0x50, 0x4b, 0x06, 0x06, // sig
44, 0, 0, 0, 0, 0, 0, 0, // size of this record - 12
45, 0, 45, 0, // version
0, 0, 0, 0, 0, 0, 0, 0, // disk
1, 0, 0, 0, 0, 0, 0, 0, // total entity num on this disk
1, 0, 0, 0, 0, 0, 0, 0 // total entity num
),
longToByte8(centralHeaderSize),
longToByte8(dataSize)
)
}
val zip64EndOfCentralDirectoryLocator = if (dataSize < 0x00000000FFFFFFFFL) {
Array.empty[Byte]
} else {
Array.concat(
Array[Byte](
0x50, 0x4b, 0x06, 0x07, // sig
0, 0, 0, 0 // disk
),
longToByte8(dataSize + centralHeaderSize),
Array[Byte](
1, 0, 0, 0 // total disk num
)
)
}
val endOfCentralDirectoryRecord = Array.concat(
Array[Byte](
0x50, 0x4b, 0x05, 0x06,
0, 0, 0, 0,
1, 0, 1, 0
),
longToByte4(centralHeaderSize),
longToByte4(scala.math.min(dataSize, 0x00000000FFFFFFFFL)),
Array[Byte](0, 0)
)
Array.concat(
centralHeader,
zip64EndOfCentralDirectoryRecord,
zip64EndOfCentralDirectoryLocator,
endOfCentralDirectoryRecord
)
}
/**
* 文字列がSJISエンコーディングかを判定する。
*
* @param str 判定する文字列
* @return SJISの場合はtrue、それ以外の場合はfalse
*/
private def isSJIS(str: String): Boolean = {
try {
val encoded = new String(str.getBytes("SHIFT_JIS"), "SHIFT_JIS")
encoded.equals(str)
} catch {
case e: Exception => false
}
}
/**
* Long値を要素4のByte列に変換する。
*
* @param num Long値
* @return 変換結果
*/
def longToByte4(num: Long): Array[Byte] = {
Array[Long](
(num & 0x00000000000000FFL),
(num & 0x000000000000FF00L) >> 8,
(num & 0x0000000000FF0000L) >> 16,
(num & 0x00000000FF000000L) >> 24
).map(_.toByte)
}
/**
* Long値を要素8のByte列に変換する。
*
* @param num Long値
* @return 変換結果
*/
def longToByte8(num: Long): Array[Byte] = {
Array[Long](
(num & 0x00000000000000FFL),
(num & 0x000000000000FF00L) >> 8,
(num & 0x0000000000FF0000L) >> 16,
(num & 0x00000000FF000000L) >> 24,
(num & 0x000000FF00000000L) >> 32,
(num & 0x0000FF0000000000L) >> 40,
(num & 0x00FF000000000000L) >> 48,
(num & 0xFF00000000000000L) >> 56
).map(_.toByte)
}
/**
* 未削除のFileHistoryを取得する。
*
* @param fileId ファイルID
* @param s DBセッション
* @return FileHistoryのファイルパス
*/
private def getFileHistory(fileId: String)(implicit s: DBSession): Option[String] = {
val fh = persistence.FileHistory.syntax("fh")
val filePath = withSQL {
select(fh.result.filePath)
.from(persistence.FileHistory as fh)
.where
.eq(fh.fileId, sqls.uuid(fileId))
.and
.isNull(fh.deletedAt)
}.map(rs => rs.string(fh.resultName.filePath)).single.apply()
filePath
}
/**
* ユーザIDからPersonalグループを取得する。
*
* @param userId ユーザID
* @param s DBセッション
* @return 取得結果
*/
private def getPersonalGroup(userId: String)(implicit s: DBSession): Option[persistence.Group] = {
val g = persistence.Group.syntax("g")
val m = persistence.Member.syntax("m")
withSQL {
select(g.result.*)
.from(persistence.Group as g)
.innerJoin(persistence.Member as m).on(g.id, m.groupId)
.where
.eq(m.userId, sqls.uuid(userId))
.and
.eq(g.groupType, persistence.GroupType.Personal)
.limit(1)
}.map(rs => persistence.Group(g.resultName)(rs)).single.apply()
}
/**
* 未削除のデータセットを取得する。
*
* @param id データセットID
* @param s DBセッション
* @return 取得結果
*/
private def getDataset(id: String)(implicit s: DBSession): Option[Dataset] = {
if (StringUtil.isUUID(id)) {
val d = persistence.Dataset.syntax("d")
withSQL {
select(d.result.*)
.from(persistence.Dataset as d)
.where
.eq(d.id, sqls.uuid(id))
.and
.isNull(d.deletedAt)
}.map(persistence.Dataset(d.resultName)).single.apply()
} else {
None
}
}
/**
* 対象のデータセットに対する、指定したグループが持つ最も強い権限を取得する。
*
* @param id データセットID
* @param groups グループのリスト
* @param s DBセッション
* @return 対象のデータセットに対する最も強い権限の値
* @throws NullPointerException 引数がnullの場合
*/
private def getPermission(id: String, groups: Seq[String])(implicit s: DBSession): Int = {
CheckUtil.checkNull(id, "id")
CheckUtil.checkNull(groups, "groups")
CheckUtil.checkNull(s, "s")
val o = persistence.Ownership.syntax("o")
val g = persistence.Group.g
val permissions = withSQL {
select(o.result.accessLevel, g.result.groupType)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g).on(o.groupId, g.id)
.where
.eq(o.datasetId, sqls.uuid(id))
.and
.inUuid(o.groupId, Seq.concat(groups, Seq(AppConf.guestGroupId)))
}.map(rs => (rs.int(o.resultName.accessLevel), rs.int(g.resultName.groupType))).list.apply()
// 上記のSQLではゲストユーザーは取得できないため、別途取得する必要がある
val guestPermission = (getGuestAccessLevel(id), GroupType.Personal)
(guestPermission :: permissions).map {
case (accessLevel, groupType) =>
// Provider権限のGroupはWriteできない
if (accessLevel == GroupAccessLevel.Provider && groupType == GroupType.Public) {
UserAndGroupAccessLevel.ALLOW_DOWNLOAD
} else {
accessLevel
}
}.max
}
/**
* データセットのゲストアクセスレベルを取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return ゲストアクセスレベル
*/
private def getGuestAccessLevel(datasetId: String)(implicit s: DBSession): Int = {
val o = persistence.Ownership.syntax("o")
withSQL {
select(o.result.accessLevel)
.from(persistence.Ownership as o)
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.eq(o.groupId, sqls.uuid(AppConf.guestGroupId))
.and
.isNull(o.deletedAt)
}.map(_.int(o.resultName.accessLevel)).single.apply().getOrElse(0)
}
/**
* Ownerのユーザ、Providerのグループの一覧を取得する。
*
* @param datasetIds データセットIDのリスト
* @param userInfo ログインユーザ情報
* @param s DBセッション
* @return 取得結果
*/
private def getOwnerGroups(
datasetIds: Seq[String],
userInfo: User
)(implicit s: DBSession): Map[String, Seq[DatasetData.DatasetOwnership]] = {
if (datasetIds.nonEmpty) {
val o = persistence.Ownership.o
val g = persistence.Group.g
val m = persistence.Member.m
val u = persistence.User.u
val gi = persistence.GroupImage.gi
val owners = withSQL {
select(o.result.*, g.result.*, u.result.*, gi.result.*)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g)
.on(sqls.eq(g.id, o.groupId).and.isNull(g.deletedAt))
.leftJoin(persistence.Member as m)
.on(sqls.eq(g.id, m.groupId)
.and.eq(g.groupType, persistence.GroupType.Personal)
.and.eq(m.role, persistence.GroupMemberRole.Manager)
.and.isNull(m.deletedAt))
.leftJoin(persistence.User as u)
.on(sqls.eq(m.userId, u.id).and.eq(u.disabled, false))
.leftJoin(persistence.GroupImage as gi)
.on(sqls.eq(g.id, gi.groupId).and.eq(gi.isPrimary, true).and.isNull(gi.deletedAt))
.where
.inUuid(o.datasetId, datasetIds)
.and
.append(sqls"(")
.append(sqls"(")
.eq(g.groupType, GroupType.Personal)
.and
.eq(o.accessLevel, UserAccessLevel.Owner)
.append(sqls")")
.or
.append(sqls"(")
.eq(g.groupType, GroupType.Public)
.and
.eq(o.accessLevel, GroupAccessLevel.Provider)
.append(sqls")")
.append(sqls")")
.and
.isNull(o.deletedAt)
}.map { rs =>
(
rs.string(o.resultName.datasetId),
DatasetData.DatasetOwnership(
id = rs.stringOpt(u.resultName.id).getOrElse(rs.string(g.resultName.id)),
name = rs.stringOpt(u.resultName.name).getOrElse(rs.string(g.resultName.name)),
fullname = rs.stringOpt(u.resultName.fullname).getOrElse(""),
organization = rs.stringOpt(u.resultName.organization).getOrElse(""),
title = rs.stringOpt(u.resultName.title).getOrElse(""),
description = rs.stringOpt(u.resultName.description).getOrElse(""),
image = AppConf.imageDownloadRoot +
(if (rs.stringOpt(u.resultName.id).isEmpty) { "groups/" } else { "user/" }) +
rs.stringOpt(u.resultName.id).getOrElse(rs.string(g.resultName.id)) + "/" +
rs.stringOpt(u.resultName.imageId).getOrElse(rs.string(gi.resultName.imageId)),
accessLevel = rs.int(o.resultName.accessLevel),
ownerType = rs.stringOpt(u.resultName.id) match {
case Some(x) => OwnerType.User
case None => OwnerType.Group
}
)
)
}.list.apply()
.groupBy(_._1)
.map(x => (x._1, x._2.map(_._2)))
// グループ、ログインユーザー(あれば)、他のユーザーの順にソート
// mutable map使用
val sortedOwners = scala.collection.mutable.Map.empty[String, Seq[DatasetData.DatasetOwnership]]
owners.foreach { x =>
val groups = x._2.filter(_.ownerType == OwnerType.Group).sortBy(_.fullname)
val loginUser = x._2.filter(_.id == userInfo.id)
val other = x._2.diff(groups).diff(loginUser).sortBy(_.fullname)
sortedOwners.put(x._1, groups ++ loginUser ++ other)
}
// mutable -> immutable
sortedOwners.toMap
} else {
Map.empty
}
}
/**
* データセットのすべてのアクセス権を取得する。
*
* @param datasetId データセットID
* @param userInfo ログインユーザ情報
* @param s DBセッション
* @return 取得結果
*/
private def getAllOwnerships(
datasetId: String,
userInfo: User
)(implicit s: DBSession): Seq[DatasetData.DatasetOwnership] = {
// ゲストアカウント情報はownersテーブルに存在しないので、このメソッドからは取得されない
val o = persistence.Ownership.o
val g = persistence.Group.g
val m = persistence.Member.m
val u = persistence.User.u
val gi = persistence.GroupImage.gi
val owners = withSQL {
select(o.result.*, g.result.*, u.result.*, gi.result.*)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g)
.on(sqls.eq(g.id, o.groupId).and.isNull(g.deletedAt))
.leftJoin(persistence.Member as m)
.on(
sqls.eq(g.id, m.groupId)
.and.eq(g.groupType, persistence.GroupType.Personal)
.and.eq(m.role, persistence.GroupMemberRole.Manager)
.and.isNull(m.deletedAt)
)
.leftJoin(persistence.User as u)
.on(sqls.eq(m.userId, u.id))
.leftJoin(persistence.GroupImage as gi)
.on(sqls.eq(g.id, gi.groupId).and.eq(gi.isPrimary, true).and.isNull(gi.deletedAt))
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.withRoundBracket { sql =>
sql.withRoundBracket { sql =>
sql
.eq(g.groupType, GroupType.Personal)
.and
.gt(o.accessLevel, UserAccessLevel.Deny)
}
.or
.withRoundBracket { sql =>
sql
.eq(g.groupType, GroupType.Public)
.and
.gt(o.accessLevel, GroupAccessLevel.Deny)
}
}
.and
.isNull(o.deletedAt)
.and
.withRoundBracket { sql =>
sql
.eq(u.disabled, false)
.or
.isNull(u.disabled)
}
}.map { rs =>
DatasetData.DatasetOwnership(
id = rs.stringOpt(u.resultName.id).getOrElse(rs.string(g.resultName.id)),
name = rs.stringOpt(u.resultName.name).getOrElse(rs.string(g.resultName.name)),
fullname = rs.stringOpt(u.resultName.fullname).getOrElse(""),
organization = rs.stringOpt(u.resultName.organization).getOrElse(""),
title = rs.stringOpt(u.resultName.title).getOrElse(""),
description = rs.stringOpt(u.resultName.description).getOrElse(""),
image = AppConf.imageDownloadRoot +
(if (rs.stringOpt(u.resultName.id).isEmpty) { "groups/" } else { "user/" }) +
rs.stringOpt(u.resultName.id).getOrElse(rs.string(g.resultName.id)) + "/" +
rs.stringOpt(u.resultName.imageId).getOrElse(rs.string(gi.resultName.imageId)),
accessLevel = rs.int(o.resultName.accessLevel),
ownerType = rs.stringOpt(u.resultName.id) match {
case Some(x) => OwnerType.User
case None => OwnerType.Group
}
)
}.list.apply()
// ソート(ログインユーザーがownerであればそれが一番最初に、それ以外はアクセスレベル→ownerTypeの順に降順に並ぶ)
// ログインユーザーとそれ以外のownershipsとで分ける
val owner = owners.filter(x => x.id == userInfo.id && x.accessLevel == UserAccessLevel.Owner)
// accessLevel, ownerTypeから順序付け用重みを計算してソート
val sortedPartial = owners.diff(owner)
.map(x => (x, x.accessLevel * 10 - x.ownerType))
.sortBy(s => (s._2, s._1.fullname)).reverse.map(_._1)
owner ++ sortedPartial
}
/**
* データセットのすべての属性を取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return 取得結果
*/
private def getAttributes(datasetId: String)(implicit s: DBSession): Seq[DatasetData.DatasetAttribute] = {
val da = persistence.DatasetAnnotation.syntax("da")
val a = persistence.Annotation.syntax("d")
withSQL {
select(da.result.*, a.result.*)
.from(persistence.DatasetAnnotation as da)
.innerJoin(persistence.Annotation as a)
.on(sqls.eq(da.annotationId, a.id).and.isNull(a.deletedAt))
.where
.eq(da.datasetId, sqls.uuid(datasetId))
.and
.isNull(da.deletedAt)
}.map { rs =>
(
persistence.DatasetAnnotation(da.resultName)(rs),
persistence.Annotation(a.resultName)(rs)
)
}.list.apply().map { x =>
DatasetData.DatasetAttribute(
name = x._2.name,
value = x._1.data
)
}
}
/**
* データセットのすべての画像を取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return 取得結果
*/
private def getImages(datasetId: String)(implicit s: DBSession): Seq[Image] = {
val di = persistence.DatasetImage.di
val i = persistence.Image.i
withSQL {
select(di.result.*, i.result.*)
.from(persistence.DatasetImage as di)
.innerJoin(persistence.Image as i).on(di.imageId, i.id)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.isNull(di.deletedAt)
.and
.isNull(i.deletedAt)
.orderBy(i.name, i.createdAt)
}.map { rs =>
(
persistence.DatasetImage(di.resultName)(rs),
persistence.Image(i.resultName)(rs)
)
}.list.apply().map { x =>
Image(
id = x._2.id,
url = datasetImageDownloadRoot + datasetId + "/" + x._2.id
)
}
}
/**
* ファイル一覧を取得する。
*
* @param datasetId データセットID
* @param fileIds ファイルIDの配列。未指定の場合は、データセットの全ファイルを対象とする
* @param permission ログインユーザのアクセスレベル
* @param limit 取得件数
* @param offset 取得開始位置
* @param s DBセッション
* @return 取得結果
*/
private def getFiles(
datasetId: String,
fileIds: Seq[String] = Seq.empty,
permission: Int,
limit: Int = AppConf.fileLimit,
offset: Int = 0
)(implicit s: DBSession): Seq[DatasetData.DatasetFile] = {
val f = persistence.File.f
val u1 = persistence.User.syntax("u1")
val u2 = persistence.User.syntax("u2")
val ma1 = persistence.MailAddress.syntax("ma1")
val ma2 = persistence.MailAddress.syntax("ma2")
val fh = persistence.FileHistory.fh
val results: ArrayBuffer[(Boolean, persistence.File, Option[User], Option[User])] = ArrayBuffer.empty
withSQL {
select(
fh.result.isZip,
f.result.*,
u1.result.*,
u2.result.*,
ma1.result.address,
ma2.result.address
)
.from(persistence.File as f)
.leftJoin(persistence.User as u1).on(sqls.eq(f.createdBy, u1.id).and.eq(u1.disabled, false))
.leftJoin(persistence.User as u2).on(sqls.eq(f.updatedBy, u2.id).and.eq(u2.disabled, false))
.leftJoin(persistence.MailAddress as ma1).on(u1.id, ma1.userId)
.leftJoin(persistence.MailAddress as ma2).on(u2.id, ma2.userId)
.innerJoin(persistence.FileHistory as fh).on(
sqls.eq(fh.id, f.historyId)
.and.isNull(fh.deletedAt)
)
.where(sqls.toAndConditionOpt(
Some(sqls.eqUuid(f.datasetId, datasetId)),
Some(sqls.isNull(f.deletedAt)),
if (fileIds == null || fileIds.size == 0) {
None
} else {
Some(sqls.inUuid(f.id, fileIds))
}
))
.orderBy(f.name, f.createdAt)
.offset(offset)
.limit(limit)
}.map { rs =>
(
rs.boolean(1),
persistence.File(f.resultName)(rs),
rs.stringOpt(u1.resultName.id).map { _ =>
persistence.User(u1.resultName)(rs)
},
rs.stringOpt(u2.resultName.id).map { _ =>
persistence.User(u2.resultName)(rs)
},
rs.stringOpt(ma1.resultName.address),
rs.stringOpt(ma2.resultName.address)
)
}.list.apply().foreach {
case (fileIsZip, file, createdUser, updatedUser, createdUserMail, updatedUserMail) => {
results += ((fileIsZip, file, createdUser.map(u => User(u, createdUserMail.getOrElse(""))), updatedUser.map(u => User(u, updatedUserMail.getOrElse("")))))
}
}
val zippedFileAmounts = getZippedFileAmounts(
results.filter(x => x._1).map(x => x._2.historyId).toSeq
)
results.map {
case (fileIsZip, file, createdUser, updatedUser) => {
val canDownload = permission >= UserAndGroupAccessLevel.ALLOW_DOWNLOAD
DatasetData.DatasetFile(
id = file.id,
name = file.name,
description = file.description,
url = if (canDownload) Some(AppConf.fileDownloadRoot + datasetId + "/" + file.id) else None,
size = if (canDownload) Some(file.fileSize) else None,
createdBy = createdUser,
createdAt = file.createdAt.toString(),
updatedBy = updatedUser,
updatedAt = file.updatedAt.toString(),
isZip = fileIsZip,
zipedFiles = Seq.empty,
zipCount = if (fileIsZip) {
zippedFileAmounts.filter(x => x._1 == file.historyId).headOption.map(x => x._2).getOrElse(0)
} else {
0
}
)
}
}.toSeq
}
/**
* データセットの持つファイル数を取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return ファイル数
*/
def getFileAmount(datasetId: String)(implicit s: DBSession): Int = {
val f = persistence.File.f
val u1 = persistence.User.syntax("u1")
val u2 = persistence.User.syntax("u2")
val ma1 = persistence.MailAddress.syntax("ma1")
val ma2 = persistence.MailAddress.syntax("ma2")
withSQL {
select(sqls.count)
.from(persistence.File as f)
.leftJoin(persistence.User as u1).on(sqls.eq(f.createdBy, u1.id).and.eq(u1.disabled, false))
.leftJoin(persistence.User as u2).on(sqls.eq(f.updatedBy, u2.id).and.eq(u2.disabled, false))
.leftJoin(persistence.MailAddress as ma1).on(u1.id, ma1.userId)
.leftJoin(persistence.MailAddress as ma2).on(u2.id, ma2.userId)
.where
.eq(f.datasetId, sqls.uuid(datasetId))
.and
.isNull(f.deletedAt)
}.map(_.int(1)).single.apply().getOrElse(0)
}
/**
* データセットのファイル情報(ファイル数、データサイズ)を更新する。
*
* @param datasetId データセットID
* @param userId 更新者のユーザID
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def updateDatasetFileStatus(
datasetId: String,
userId: String,
timestamp: DateTime
)(implicit s: DBSession): Int = {
val f = persistence.File.f
val allFiles = withSQL {
select(f.result.*)
.from(persistence.File as f)
.where
.eq(f.datasetId, sqls.uuid(datasetId))
.and
.isNull(f.deletedAt)
}.map(persistence.File(f.resultName)).list.apply()
val totalFileSize = allFiles.foldLeft(0L)((a: Long, b: persistence.File) => a + b.fileSize)
withSQL {
val d = persistence.Dataset.column
update(persistence.Dataset)
.set(d.filesCount -> allFiles.size, d.filesSize -> totalFileSize,
d.updatedBy -> sqls.uuid(userId), d.updatedAt -> timestamp)
.where
.eq(d.id, sqls.uuid(datasetId))
}.update.apply()
}
/**
* データセットのアイコン画像IDを取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return 取得結果
*/
private def getPrimaryImageId(datasetId: String)(implicit s: DBSession): Option[String] = {
val di = persistence.DatasetImage.syntax("di")
val i = persistence.Image.syntax("i")
withSQL {
select(i.result.id)
.from(persistence.Image as i)
.innerJoin(persistence.DatasetImage as di).on(i.id, di.imageId)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.eq(di.isPrimary, true)
.and
.isNull(di.deletedAt)
.and
.isNull(i.deletedAt)
}.map(rs => rs.string(i.resultName.id)).single.apply()
}
/**
* データセットのFeatured画像IDを取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return 取得結果
*/
private def getFeaturedImageId(datasetId: String)(implicit s: DBSession): Option[String] = {
val di = persistence.DatasetImage.syntax("di")
val i = persistence.Image.syntax("i")
withSQL {
select(i.result.id)
.from(persistence.Image as i)
.innerJoin(persistence.DatasetImage as di).on(i.id, di.imageId)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.eq(di.isFeatured, true)
.and
.isNull(di.deletedAt)
.and
.isNull(i.deletedAt)
}.map(rs => rs.string(i.resultName.id)).single.apply()
}
/**
* データセットのアクセスカウントを取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return アクセスカウント
*/
private def getAccessCount(datasetId: String)(implicit s: DBSession): Long = {
val dal = persistence.DatasetAccessLog.dal
persistence.DatasetAccessLog.countBy(sqls.eqUuid(dal.datasetId, datasetId))
}
/**
* データセットに対象の画像が存在しているかを判定する。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param s DBセッション
* @return データセットに関連づいており、画像が存在している場合はtrue、それ以外の場合はfalse
*/
private def existsImage(datasetId: String, imageId: String)(implicit s: DBSession): Boolean = {
val i = persistence.Image.syntax("i")
val di = persistence.DatasetImage.syntax("di")
withSQL {
select(i.result.id)
.from(persistence.Image as i)
.innerJoin(persistence.DatasetImage as di).on(i.id, di.imageId)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.eq(i.id, sqls.uuid(imageId))
.and
.isNull(di.deletedAt)
.and
.isNull(i.deletedAt)
}.map(rs => rs.string(i.resultName.id)).single.apply().isDefined
}
/**
* データセットに対象のファイルが存在しているかを判定する。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param s DBセッション
* @return ファイルが存在している場合はtrue、それ以外の場合はfalse
*/
private def existsFile(datasetId: String, fileId: String)(implicit s: DBSession): Boolean = {
val f = persistence.File.syntax("f")
val d = persistence.Dataset.syntax("d")
withSQL {
select(f.result.id)
.from(persistence.File as f)
.innerJoin(persistence.Dataset as d).on(d.id, f.datasetId)
.where
.eq(f.datasetId, sqls.uuid(datasetId))
.and
.eq(f.id, sqls.uuid(fileId))
.and
.isNull(f.deletedAt)
.and
.isNull(d.deletedAt)
}.map(rs => rs.string(f.resultName.id)).single.apply().isDefined
}
/**
* Onwershipが存在していれば更新し、していなければ作成する。
*
* @param userInfo ログインユーザ情報
* @param datasetId データセットID
* @param groupId グループID
* @param accessLevel アクセスレベル
* @param s DBセッション
*/
private def saveOrCreateOwnerships(
userInfo: User,
datasetId: String,
groupId: String,
accessLevel: Int
)(implicit s: DBSession): Unit = {
val myself = persistence.User.find(userInfo.id).get
val timestamp = DateTime.now()
val o = persistence.Ownership.o
val ownership = withSQL {
select(o.result.*)
.from(persistence.Ownership as o)
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.eq(o.groupId, sqls.uuid(groupId))
}.map(persistence.Ownership(o.resultName)).single.apply()
ownership match {
case Some(x) => {
if (accessLevel != x.accessLevel) {
persistence.Ownership(
id = x.id,
datasetId = x.datasetId,
groupId = x.groupId,
accessLevel = accessLevel,
createdBy = myself.id,
createdAt = x.createdAt,
updatedBy = myself.id,
updatedAt = timestamp
).save()
}
}
case None => {
if (accessLevel > 0) {
persistence.Ownership.create(
id = UUID.randomUUID.toString,
datasetId = datasetId,
groupId = groupId,
accessLevel = accessLevel,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
}
}
}
}
/**
* データセットのコピーを作成する。
*
* @param datasetId データセットID
* @param user ログインユーザ情報
* @return
* Success(CopiedDataset) コピー成功時、コピーデータセット情報
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def copyDataset(datasetId: String, user: User): Try[CopiedDataset] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
val newDatasetId = UUID.randomUUID.toString
val dataset = persistence.Dataset.find(datasetId).get
persistence.Dataset.create(
id = newDatasetId,
name = "Copy of " + dataset.name,
description = dataset.description,
licenseId = dataset.licenseId,
filesCount = 0,
filesSize = 0,
localState = dataset.localState,
s3State = dataset.s3State,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
val da = persistence.DatasetAnnotation.da
val annotations = withSQL {
select
.from(DatasetAnnotation as da)
.where
.eq(da.datasetId, sqls.uuid(datasetId))
}.map(persistence.DatasetAnnotation(da.resultName)).list.apply()
annotations.foreach { annotation =>
persistence.DatasetAnnotation.create(
id = UUID.randomUUID().toString,
datasetId = newDatasetId,
annotationId = annotation.annotationId,
data = annotation.data,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
}
val di = persistence.DatasetImage.di
val images = withSQL {
select
.from(DatasetImage as di)
.where
.eq(di.datasetId, sqls.uuid(datasetId))
}.map(persistence.DatasetImage(di.resultName)).list.apply()
images.foreach { image =>
persistence.DatasetImage.create(
id = UUID.randomUUID().toString,
datasetId = newDatasetId,
imageId = image.imageId,
isPrimary = image.isPrimary,
isFeatured = image.isFeatured,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
}
val o = persistence.Ownership.o
val ownerships = withSQL {
select
.from(Ownership as o)
.where
.eq(o.datasetId, sqls.uuid(datasetId))
}.map(persistence.Ownership(o.resultName)).list.apply()
ownerships.foreach { ownership =>
persistence.Ownership.create(
id = UUID.randomUUID().toString,
datasetId = newDatasetId,
groupId = ownership.groupId,
accessLevel = ownership.accessLevel,
createdBy = myself.id,
createdAt = timestamp,
updatedBy = myself.id,
updatedAt = timestamp
)
}
CopiedDataset(newDatasetId)
}
}
}
/**
* データセットに属性をインポートする。
*
* @param datasetId データセットID
* @param file インポートファイル
* @param user ログインユーザ情報
* @return
* Success(Unit) インポート成功時
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def importAttribute(datasetId: String, file: FileItem, user: User): Try[Unit] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(file, "file")
CheckUtil.checkNull(user, "user")
val tmpFile = File.createTempFile("coi_", null)
val csv = try {
// 負荷分散のため、一旦テンポラリファイルに保存
val tmpPath = Paths.get(tmpFile.getAbsolutePath)
Files.copy(file.getInputStream, tmpPath, StandardCopyOption.REPLACE_EXISTING)
val charset = parseCharset(tmpPath)
use(Files.newBufferedReader(tmpPath, charset)) { in =>
CSVReader.open(in).all().collect {
case name :: value :: _ => (name, value)
}
}
} finally {
tmpFile.delete()
}
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.da
val exists = withSQL {
select
.from(Annotation as a)
.where
.in(a.name, csv.map(_._1))
}.map { rs =>
val annotation = persistence.Annotation(a.resultName)(rs)
(annotation.name, annotation.id)
}.list.apply().toMap
val timestamp = DateTime.now()
val names = csv.map(_._1).toSet
val annotations = names.map { name =>
val id = exists.getOrElse(name, {
val id = UUID.randomUUID().toString
persistence.Annotation.create(
id = id,
name = name,
createdBy = user.id,
createdAt = timestamp,
updatedBy = user.id,
updatedAt = timestamp
)
id
})
(name, id)
}.toMap
csv.foreach {
case (name, value) =>
persistence.DatasetAnnotation.create(
id = UUID.randomUUID().toString,
datasetId = datasetId,
annotationId = annotations(name),
data = value,
createdBy = user.id,
createdAt = timestamp,
updatedBy = user.id,
updatedAt = timestamp
)
}
}
}
}
/**
* Loan Patternでリソースを取り扱う。
*
* @tparam T1 取り扱うリソースの型
* @tparam T2 リソースに対して行う処理の結果型
* @param resource 取り扱うリソース
* @param f リソースに対して行う処理
* @return 処理結果
*/
private def use[T1 <: Closeable, T2](resource: T1)(f: T1 => T2): T2 = {
try {
f(resource)
} finally {
try {
resource.close()
} catch {
case e: Exception =>
}
}
}
/**
* 指定したファイルの文字コードを判定する。
*
*
* @param path 判定対象ファイルへのパス
* @return 文字コード
*/
private def parseCharset(path: Path): Charset = {
val charsets = Seq("ISO-2022-JP", "EUC-JP", "MS932", "UTF-8", "UTF-16")
.map { c =>
try {
Files.readAllLines(path, Charset.forName(c))
Some(c)
} catch {
case _: Throwable => None
}
}
.filter(_.nonEmpty)
if (charsets.nonEmpty) Charset.forName(charsets.head.get)
else StandardCharsets.UTF_8
}
/**
* 属性をcsv形式で取得する。
*
* @param datasetId データセットID
* @param user ユーザ情報
* @return
* Success(File) 取得成功時、CSVファイル
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
*/
def exportAttribute(datasetId: String, user: User): Try[java.io.File] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
checkDatasetExisitence(datasetId)
checkReadPermission(datasetId, user)
val a = persistence.Annotation.a
val da = persistence.DatasetAnnotation.da
val attributes = withSQL {
select
.from(Annotation as a)
.join(DatasetAnnotation as da).on(a.id, da.annotationId)
.where
.eq(da.datasetId, sqls.uuid(datasetId))
}.map { rs =>
val line = Seq(rs.string(a.resultName.name), rs.string(da.resultName.data)).mkString(",")
line + System.getProperty("line.separator")
}.list.apply()
val file = Paths.get(AppConf.tempDir, "export.csv").toFile
use(new FileOutputStream(file)) { out =>
attributes.foreach { x => out.write(x.getBytes) }
}
file
}
}
}
/**
* データセットのアクセスレベルの一覧を取得する。
*
* @param datasetId データセットID
* @param limit 検索上限
* @param offset 検索の開始位置
* @param user ユーザ情報
* @return
* Success(RangeSlice[DatasetOwnership]) 取得成功時、アクセスレベルの一覧(offset, limitつき)
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
*/
def searchOwnerships(
datasetId: String,
offset: Option[Int],
limit: Option[Int],
user: User
): Try[RangeSlice[DatasetOwnership]] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(offset, "offset")
CheckUtil.checkNull(limit, "limit")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
checkDatasetExisitence(datasetId)
checkReadPermission(datasetId, user)
val o = persistence.Ownership.o
val u = persistence.User.u
val g = persistence.Group.g
val m = persistence.Member.m
val gi = persistence.GroupImage.gi
val count = withSQL {
select(sqls.countDistinct(g.id))
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g)
.on(sqls.eq(o.groupId, g.id).and.eq(g.groupType, GroupType.Public))
.innerJoin(persistence.GroupImage as gi).on(gi.groupId, g.id)
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.isNull(o.deletedBy)
.and
.isNull(o.deletedAt)
.and
.gt(o.accessLevel, 0)
.union(
select(sqls.countDistinct(u.id))
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g)
.on(sqls.eq(o.groupId, g.id).and.eq(g.groupType, GroupType.Personal))
.innerJoin(persistence.Member as m).on(sqls.eq(g.id, m.groupId).and.isNull(m.deletedAt))
.innerJoin(persistence.User as u).on(sqls.eq(u.id, m.userId).and.eq(u.disabled, false))
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.isNull(o.deletedBy)
.and
.isNull(o.deletedAt)
.and
.gt(o.accessLevel, 0)
)
}.map(rs => rs.int(1)).list.apply().foldLeft(0)(_ + _)
val list = withSQL {
select(
g.id, o.accessLevel, g.name, gi.imageId, g.description,
sqls"null as fullname, '2' as type, null as organization, null as title, false as own"
)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g)
.on(sqls.eq(o.groupId, g.id).and.eq(g.groupType, GroupType.Public))
.innerJoin(persistence.GroupImage as gi)
.on(sqls.eq(gi.groupId, g.id).and.eq(gi.isPrimary, true).and.isNull(gi.deletedBy))
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.isNull(o.deletedBy)
.and
.isNull(o.deletedAt)
.and
.gt(o.accessLevel, 0)
.union(
select(
u.id, o.accessLevel, u.name, u.imageId, u.description, u.fullname,
sqls"'1' as type",
u.organization, u.title,
sqls.eqUuid(u.id, user.id).and.eq(o.accessLevel, 3).append(sqls"own")
)
.from(persistence.Ownership as o)
.innerJoin(persistence.Group as g)
.on(sqls.eq(o.groupId, g.id).and.eq(g.groupType, GroupType.Personal))
.innerJoin(persistence.Member as m).on(sqls.eq(g.id, m.groupId).and.isNull(m.deletedAt))
.innerJoin(persistence.User as u).on(sqls.eq(u.id, m.userId).and.eq(u.disabled, false))
.where
.eq(o.datasetId, sqls.uuid(datasetId))
.and
.isNull(o.deletedBy)
.and
.isNull(o.deletedAt)
.and
.gt(o.accessLevel, 0)
)
.orderBy(sqls"own desc")
.offset(offset.getOrElse(0))
.limit(limit.getOrElse(DEFALUT_LIMIT))
}.map { rs =>
(
rs.string("id"),
rs.int("access_level"),
rs.string("name"),
rs.string("image_id"),
rs.string("description"),
rs.string("fullname"),
rs.int("type"),
rs.string("organization"),
rs.string("title")
)
}.list.apply().map { o =>
val image = AppConf.imageDownloadRoot + (if (o._7 == 1) "user/" else "groups/") + o._1 + "/" + o._4
DatasetOwnership(
id = o._1,
name = o._3,
fullname = o._6,
image = image,
accessLevel = o._2,
ownerType = o._7,
description = o._5,
organization = o._8,
title = o._9
)
}.toSeq
RangeSlice(
summary = RangeSliceSummary(
total = count,
offset = offset.getOrElse(0),
count = limit.getOrElse(DEFALUT_LIMIT)
),
results = list
)
}
}
}
/**
* データセットの画像一覧を取得する。
*
* @param datasetId データセットID
* @param limit 検索上限
* @param offset 検索の開始位置
* @param user ユーザー情報
* @return
* Success(RangeSlice[DatasetGetImage]) 取得成功時、データセットが保持する画像の一覧(総件数、limit、offset付き)
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
*/
def getImages(
datasetId: String,
offset: Option[Int],
limit: Option[Int],
user: User
): Try[RangeSlice[DatasetData.DatasetGetImage]] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(offset, "offset")
CheckUtil.checkNull(limit, "limit")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
checkDatasetExisitence(datasetId)
checkReadPermission(datasetId, user)
val di = persistence.DatasetImage.di
val i = persistence.Image.i
val totalCount = withSQL {
select(sqls"count(1)")
.from(persistence.DatasetImage as di)
.innerJoin(persistence.Image as i).on(di.imageId, i.id)
.where
.eqUuid(di.datasetId, datasetId)
.and
.isNull(di.deletedBy)
.and
.isNull(di.deletedAt)
}.map(rs => rs.int(1)).single.apply()
val result = withSQL {
select(i.result.*, di.result.isPrimary)
.from(persistence.DatasetImage as di)
.innerJoin(persistence.Image as i).on(di.imageId, i.id)
.where
.eqUuid(di.datasetId, datasetId)
.and
.isNull(di.deletedBy)
.and
.isNull(di.deletedAt)
.offset(offset.getOrElse(0))
.limit(limit.getOrElse(DEFALUT_LIMIT))
}.map { rs =>
(
rs.string(i.resultName.id),
rs.string(i.resultName.name),
rs.boolean(di.resultName.isPrimary)
)
}.list.apply().map { x =>
DatasetData.DatasetGetImage(
id = x._1,
name = x._2,
url = datasetImageDownloadRoot + datasetId + "/" + x._1,
isPrimary = x._3
)
}
RangeSlice(
RangeSliceSummary(
total = totalCount.getOrElse(0),
count = limit.getOrElse(DEFALUT_LIMIT),
offset = offset.getOrElse(0)
),
result
)
}
}
}
/**
* 指定したデータセットのFeatured画像を変更します。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param user ログインユーザ情報
* @return
* Success(DatasetData.ChangeDatasetImage) 変更後の画像ID
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセット、または画像が見つからない場合
* Failure(AccessDeniedException) ログインユーザに編集権限がない場合
*/
def changeFeaturedImage(
datasetId: String,
imageId: String,
user: User
): Try[DatasetData.ChangeDatasetImage] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(imageId, "imageId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
if (!existsImage(datasetId, imageId)) {
throw new NotFoundException
}
checkOwnerAccess(datasetId, user)
val myself = persistence.User.find(user.id).get
val timestamp = DateTime.now()
// 対象のイメージをFeaturedに変更
turnImageToFeatured(datasetId, imageId, myself, timestamp)
// 対象以外のイメージをFeatured以外に変更
turnOffFeaturedOtherImage(datasetId, imageId, myself, timestamp)
DatasetData.ChangeDatasetImage(imageId)
}
}
}
/**
* データセットのFeatured画像を解除する。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def turnOffFeaturedOtherImage(
datasetId: String,
imageId: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val di = persistence.DatasetImage.column
update(persistence.DatasetImage)
.set(di.isFeatured -> false, di.updatedBy -> sqls.uuid(myself.id), di.updatedAt -> timestamp)
.where
.ne(di.imageId, sqls.uuid(imageId))
.and
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.isNull(di.deletedAt)
}.update.apply()
}
/**
* データセットのFeatured画像を設定する。
*
* @param datasetId データセットID
* @param imageId 画像ID
* @param myself ログインユーザ情報
* @param timestamp タイムスタンプ
* @param s DBセッション
* @return 更新件数
*/
private def turnImageToFeatured(
datasetId: String,
imageId: String,
myself: persistence.User,
timestamp: DateTime
)(implicit s: DBSession): Int = {
withSQL {
val di = persistence.DatasetImage.column
update(persistence.DatasetImage)
.set(di.isFeatured -> true, di.updatedBy -> sqls.uuid(myself.id), di.updatedAt -> timestamp)
.where
.eq(di.imageId, sqls.uuid(imageId))
.and
.eq(di.datasetId, sqls.uuid(datasetId))
.and
.isNull(di.deletedAt)
}.update.apply()
}
/**
* 内部処理振り分けのための、ファイル情報を取得する
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param user ユーザ情報
* @return ファイル情報を保持するケースオブジェクト
*/
private def getFileInfo(datasetId: String, fileId: String, user: User): Try[DatasetService.FileInfo] = {
logger.trace(LOG_MARKER, "Called getFileInfo, datasetId={}, fileId={}, user={]", datasetId, fileId, user)
val findResult = DB.readOnly { implicit s =>
for {
file <- found(findFile(fileId))
_ <- found(getDataset(datasetId))
_ <- requireAllowDownload(user, datasetId)
} yield {
file
}
}
for {
fileInfo <- findResult
_ <- requireNotWithPassword(fileInfo)
} yield {
val isFileExistsOnLocal = fileInfo.file.localState == SaveStatus.SAVED
val isFileSync = fileInfo.file.localState == SaveStatus.DELETING
val isFileExistsOnS3 = fileInfo.file.s3State == SaveStatus.SYNCHRONIZING
val isDownloadFromLocal = isFileExistsOnLocal || (isFileExistsOnS3 && isFileSync)
(fileInfo, isDownloadFromLocal) match {
case (FileResultNormal(file, path), true) => {
DatasetService.FileInfoLocalNormal(file, path)
}
case (FileResultNormal(file, path), false) => {
DatasetService.FileInfoS3Normal(file, path)
}
case (FileResultZip(file, path, zippedFile), true) => {
DatasetService.FileInfoLocalZipped(file, path, zippedFile)
}
case (FileResultZip(file, path, zippedFile), false) => {
DatasetService.FileInfoS3Zipped(file, path, zippedFile)
}
case _ => {
logger.error(
LOG_MARKER,
"Unknown file info, fileInfo={}, isDownloadFromLocal={}",
fileInfo,
isDownloadFromLocal.toString
)
throw new UnsupportedOperationException
}
}
}
}
/**
* ファイルダウンロード向けにケースオブジェクトを返す。
*
* @param fileInfo ファイル情報
* @param requireData ファイル内容を返すストリームが必要な場合はtrue
* @return ファイルダウンロード向けに必要項目を保持するケースオブジェクト
*/
private def getDownloadFileByFileInfo(
fileInfo: DatasetService.FileInfo,
requireData: Boolean = true
): Try[DatasetService.DownloadFile] = {
logger.trace(
LOG_MARKER,
"Called getDownloadFileByFileInfo, fileInfo={}, requireData={}",
fileInfo,
requireData.toString
)
Try {
fileInfo match {
case DatasetService.FileInfoLocalNormal(file, path) => {
val downloadFile = FileManager.downloadFromLocal(path.substring(1))
val is = if (requireData) { Files.newInputStream(downloadFile.toPath) } else { null }
DatasetService.DownloadFileLocalNormal(is, file.name, file.fileSize)
}
case DatasetService.FileInfoS3Normal(file, path) => {
val url = FileManager.generateS3PresignedURL(path.substring(1), file.name, !requireData)
DatasetService.DownloadFileS3Normal(url)
}
case DatasetService.FileInfoLocalZipped(file, path, zippedFile) => {
val is = if (requireData) {
createRangeInputStream(
path = Paths.get(AppConf.fileDir, path.substring(1)),
offset = zippedFile.dataStart,
limit = zippedFile.dataSize
)
} else { null }
val encoding = if (isSJIS(zippedFile.name)) {
Charset.forName("Shift-JIS")
} else {
Charset.forName("UTF-8")
}
try {
val zis = if (requireData) {
createUnzipInputStream(
data = is,
centralHeader = zippedFile.cenHeader,
dataSize = zippedFile.dataSize,
encoding = encoding
)
} else { null }
DatasetService.DownloadFileLocalZipped(zis, zippedFile.name, zippedFile.fileSize)
} catch {
case e: Exception => {
logger.error(LOG_MARKER, "Error occurred.", e)
is.close()
throw e
}
}
}
case DatasetService.FileInfoS3Zipped(file, path, zippedFile) => {
val is = if (requireData) {
FileManager.downloadFromS3(
filePath = path.substring(1),
start = zippedFile.dataStart,
end = zippedFile.dataStart + zippedFile.dataSize - 1
)
} else { null }
val encoding = if (isSJIS(zippedFile.name)) {
Charset.forName("Shift-JIS")
} else {
Charset.forName("UTF-8")
}
try {
val zis = if (requireData) {
createUnzipInputStream(
data = is,
centralHeader = zippedFile.cenHeader,
dataSize = zippedFile.dataSize,
encoding = encoding
)
} else { null }
DatasetService.DownloadFileS3Zipped(zis, zippedFile.name, zippedFile.fileSize)
} catch {
case e: Exception => {
logger.error(LOG_MARKER, "Error occurred.", e)
is.close()
throw e
}
}
}
}
}
}
/**
* ファイルダウンロード向けにケースオブジェクトを返す。
* 返すケースオブジェクトには、ファイル内容のストリームを保持する。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param user ユーザ情報
* @return ファイルダウンロード向けに必要項目を保持するケースオブジェクト
*/
def getDownloadFileWithStream(
datasetId: String,
fileId: String,
user: User
): Try[DatasetService.DownloadFile] = {
val fileInfo = getFileInfo(datasetId, fileId, user)
fileInfo.flatMap(getDownloadFileByFileInfo(_, true))
}
/**
* ファイルダウンロード向けにケースオブジェクトを返す。
* 返すケースオブジェクトには、ファイル内容のストリームを保持しない。
*
* @param datasetId データセットID
* @param fileId ファイルID
* @param user ユーザ情報
* @return ファイルダウンロード向けに必要項目を保持するケースオブジェクト
*/
def getDownloadFileWithoutStream(
datasetId: String,
fileId: String,
user: User
): Try[DatasetService.DownloadFile] = {
val fileInfo = getFileInfo(datasetId, fileId, user)
fileInfo.flatMap(getDownloadFileByFileInfo(_, false))
}
/**
* 指定したデータセットが保持するファイル情報の一覧を返す。
*
* @param datasetId データセットID
* @param limit 検索上限
* @param offset 検索の開始位置
* @param user ユーザー情報
* @return
* Success(RangeSlice[DatasetData.DatasetFile])
* 取得成功時、データセットが保持するファイル情報の一覧(総件数、limit、offset付き)
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
*/
def getDatasetFiles(
datasetId: String,
limit: Option[Int],
offset: Option[Int],
user: User
): Try[RangeSlice[DatasetData.DatasetFile]] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(limit, "limit")
CheckUtil.checkNull(offset, "offset")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
val dataset = checkDatasetExisitence(datasetId)
val permission = checkReadPermission(datasetId, user)
val validatedLimit = limit.map { x =>
if (x < 0) { 0 } else { x }
}.getOrElse(AppConf.fileLimit)
val validatedOffset = offset.getOrElse(0)
val count = getFileAmount(datasetId)
// offsetが0未満は空リストを返却する
if (validatedOffset < 0) {
RangeSlice(RangeSliceSummary(count, 0, validatedOffset), Seq.empty[DatasetData.DatasetFile])
} else {
val files = getFiles(datasetId, Seq.empty, permission, validatedLimit, validatedOffset)
RangeSlice(RangeSliceSummary(count, files.size, validatedOffset), files)
}
}
}
}
/**
* 指定したデータセットのZipファイルが内部に保持するファイル情報の一覧を返す。
*
* @param datasetId データセットID
* @param fileId (zipファイルの)ファイルID
* @param limit 検索上限
* @param offset 検索の開始位置
* @param user ユーザー情報
* @return
* Success(RangeSlice[DatasetData.DatasetFile])
* 取得成功時、Zipファイルが内部に保持するファイル情報の一覧(総件数、limit、offset付き)
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
* Failure(BadRequestException) ファイルが見つからない場合
* Failure(BadRequestException) Zipファイル以外に対して行った場合
*/
def getDatasetZippedFiles(
datasetId: String,
fileId: String,
limit: Option[Int],
offset: Option[Int],
user: User
): Try[RangeSlice[DatasetData.DatasetZipedFile]] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(fileId, "fileId")
CheckUtil.checkNull(limit, "limit")
CheckUtil.checkNull(offset, "offset")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
val dataset = checkDatasetExisitence(datasetId)
val file = persistence.File.find(fileId)
val fileHistory = file.flatMap(file => persistence.FileHistory.find(file.historyId))
val history = fileHistory match {
case None => {
throw new BadRequestException(resource.getString(ResourceNames.FILE_NOT_FOUND))
}
case Some(x) if !x.isZip => {
throw new BadRequestException(resource.getString(ResourceNames.CANT_TAKE_OUT_BECAUSE_NOT_ZIP))
}
case Some(x) if x.isZip => {
x
}
}
val permission = checkReadPermission(datasetId, user)
val validatedLimit = limit.map { x =>
if (x < 0) { 0 } else { x }
}.getOrElse(AppConf.fileLimit)
val validatedOffset = offset.getOrElse(0)
val zipAmounts = {
if (fileHistory.get.isZip) {
getZippedFileAmounts(Seq(history.id)).headOption.map(x => x._2).getOrElse(0)
} else {
0
}
}
// offsetが0未満は空リストを返却する
if (validatedOffset < 0) {
RangeSlice(
RangeSliceSummary(zipAmounts, 0, validatedOffset),
Seq.empty[DatasetData.DatasetZipedFile]
)
} else {
val files = getZippedFiles(datasetId, history.id, permission, validatedLimit, validatedOffset)
RangeSlice(RangeSliceSummary(zipAmounts, files.size, validatedOffset), files)
}
}
}
}
/**
* Zipファイル内ファイルを取得する。
*
* @param datasetId データセットID
* @param historyId ファイル履歴ID
* @param permission ログインユーザのアクセスレベル
* @param limit 検索上限
* @param offset 検索の開始位置
* @param s DBセッション
* @return 取得結果
*/
private def getZippedFiles(
datasetId: String,
historyId: String,
permission: Int,
limit: Int,
offset: Int
)(implicit s: DBSession): Seq[DatasetZipedFile] = {
val zf = persistence.ZipedFiles.zf
val zipedFiles = withSQL {
select
.from(ZipedFiles as zf)
.where
.eq(zf.historyId, sqls.uuid(historyId))
.orderBy(zf.name.asc)
.offset(offset)
.limit(limit)
}.map(persistence.ZipedFiles(zf.resultName)).list.apply()
if (zipedFiles.exists(hasPassword)) {
return Seq.empty
}
val canDownload = permission >= UserAndGroupAccessLevel.ALLOW_DOWNLOAD
zipedFiles.map { x =>
DatasetZipedFile(
id = x.id,
name = x.name,
size = if (canDownload) Some(x.fileSize) else None,
url = if (canDownload) Some(AppConf.fileDownloadRoot + datasetId + "/" + x.id) else None
)
}.toSeq
}
/**
* Zipファイル内ファイル件数を取得する。
*
* @param historyIds ファイル履歴IDの配列
* @param s DBセッション
* @return 取得結果
*/
private def getZippedFileAmounts(historyIds: Seq[String])(implicit s: DBSession): Seq[(String, Int)] = {
val zf = persistence.ZipedFiles.zf
withSQL {
select(zf.result.historyId, sqls.count(zf.id))
.from(ZipedFiles as zf)
.where.in(
zf.historyId,
historyIds.map(x => sqls.uuid(x))
)
.groupBy(zf.historyId)
}.map { rs =>
(
rs.string(1),
rs.int(2)
)
}.list().apply()
}
/**
* 指定したデータセットにアプリを追加する。
*
* @param datasetId データセットID
* @param description アプリの説明文
* @param file アプリのJARファイル
* @param user ユーザ情報
* @return
* Success(App) 追加成功時、アプリ情報
* Failure(NullPointerException) datasetId、file、またはuserがnullの場合
* Failure(NotFoundException) データセットが存在しない場合
* Failure(AccessDeniedException) ユーザに権限がない場合
* Failure(IOException) ファイル保存時に入出力エラーが発生した場合
*/
def addApp(datasetId: String, description: String, file: FileItem, user: User): Try[DatasetData.App] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(file, "file")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
val timestamp = DateTime.now()
val appId = UUID.randomUUID.toString
val fileName = file.getName
persistence.App.create(
id = appId,
name = appNameOf(fileName),
description = Option(description),
datasetId = Some(datasetId),
createdBy = user.id,
createdAt = timestamp,
updatedBy = user.id,
updatedAt = timestamp
)
AppManager.upload(appId, file)
getApp(datasetId).getOrElse(throw new NotFoundException())
}
}
}
/**
* 指定されたアプリ情報を取得する。
*
* @param datasetId データセットID
* @param user ユーザ情報
* @return
* Success(App) 取得成功時、アプリ情報
* Failure(NullPointerException) datasetId、appIdまたはuserがnullの場合
* Failure(AccessDeniedException) ユーザに管理権限がない場合
*/
def getApp(datasetId: String, user: User): Try[RangeSlice[DatasetData.App]] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
checkDatasetExisitence(datasetId)
checkOwnerAccess(datasetId, user)
getApp(datasetId).fold(RangeSlice(RangeSliceSummary(0, 0), Seq.empty[DatasetData.App])) { app =>
RangeSlice(RangeSliceSummary(1, 1), Seq(app))
}
}
}
}
/**
* 指定されたアプリを更新する。
*
* @param datasetId データセットID
* @param appId アプリID
* @param description アプリの説明文
* @param file アプリのJARファイル
* @param user ユーザ情報
* @return
* Success(App) 更新成功時、アプリ情報
* Failure(NullPointerException) datasetId、appId, fileまたはuserがnullの場合
* Failure(NotFoundException) データセットまたはアプリが存在しない場合
* Failure(AccessDeniedException) ユーザに管理権限がない場合
* Failure(IOException) ファイル保存時に入出力エラーが発生した場合
*/
def updateApp(
datasetId: String, appId: String, description: String, file: Option[FileItem], user: User
): Try[DatasetData.App] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(appId, "appId")
CheckUtil.checkNull(user, "user")
DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
getApp(datasetId).orElse(throw new NotFoundException())
checkOwnerAccess(datasetId, user)
val timestamp = DateTime.now()
withSQL {
val a = persistence.App.column
if (file.isEmpty) {
// ファイル指定なしの場合
update(persistence.App)
.set(
a.description -> description,
a.datasetId -> sqls.uuid(datasetId),
a.updatedBy -> sqls.uuid(user.id),
a.updatedAt -> timestamp
)
.where
.eq(a.id, sqls.uuid(appId))
} else {
// ファイル指定ありの場合
val fileName = file.get.getName
update(persistence.App)
.set(
a.name -> appNameOf(fileName),
a.description -> description,
a.datasetId -> sqls.uuid(datasetId),
a.updatedBy -> sqls.uuid(user.id),
a.updatedAt -> timestamp
)
.where
.eq(a.id, sqls.uuid(appId))
}
}.update.apply()
if (file.nonEmpty) AppManager.upload(appId, file.get)
getApp(datasetId).getOrElse(throw new NotFoundException())
}
}
}
/**
* 指定されたアプリを物理削除する。
*
* @param datasetId データセットID
* @param user ユーザ情報
* @return
* Success(Unit) 削除成功時
* Failure(NullPointerException) datasetId、appIdまたはuserがnullの場合
* Failure(NotFoundException) データセットまたはアプリが存在しない場合
* Failure(AccessDeniedException) ユーザに管理権限がない場合
*/
def deleteApp(datasetId: String, user: User): Try[Unit] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(user, "user")
val appId = DB.localTx { implicit s =>
checkDatasetExisitence(datasetId)
val app = getApp(datasetId).getOrElse(throw new NotFoundException())
checkOwnerAccess(datasetId, user)
withSQL {
val a = persistence.App.column
delete
.from(persistence.App)
.where
.eq(a.id, sqls.uuid(app.id))
.and
.isNull(a.deletedAt)
}.update.apply()
app.id
}
// Jarファイルの削除
AppManager.delete(appId)
}
}
/**
* ファイル名からアプリ名を取得する。
*
* @param fileName ファイル名
* @return アプリ名
*/
def appNameOf(fileName: String): String = {
fileName
}
/**
* アプリ情報を取得する。
*
* @param datasetId データセットID
* @param s DBセッション
* @return アプリ情報
*/
private def getApp(datasetId: String)(implicit s: DBSession): Option[DatasetData.App] = {
val a = persistence.App.syntax("a")
withSQL {
select(a.result.*)
.from(persistence.App as a)
.where
.eq(a.datasetId, sqls.uuid(datasetId))
.and
.isNull(a.deletedAt)
}.map { rs =>
val app = persistence.App(a.resultName)(rs)
DatasetData.App(
id = app.id,
name = app.name,
description = app.description.getOrElse(""),
datasetId = datasetId,
lastModified = app.updatedAt
)
}.single.apply()
}
/**
* データセットに設定されているアプリのURLを取得する。
*
* @param datasetId データセットID
* @param user ユーザ情報
* @return
* Some(String) アプリのJNLPファイルへのURL
* None 設定されていない、ユーザにAPIキーがない、ユーザに権限がない、データセットが存在しない場合
*/
private def getAppUrl(datasetId: String, user: User): Option[String] = {
Try {
DB.readOnly { implicit s =>
checkDatasetExisitence(datasetId)
for {
_ <- requireAllowDownload(user, datasetId).toOption
_ <- getApp(datasetId)
_ <- getUserKey(user)
} yield {
AppManager.getJnlpUrl(datasetId, user.id)
}
}
}.getOrElse(None)
}
/**
* ユーザのAPIキー情報を取得する。
*
* @param user ユーザ
* @param s DBセッション
* @return ユーザのAPIキー情報、設定されていない場合 None
*/
private def getUserKey(user: User)(implicit s: DBSession): Option[DatasetService.AppUser] = {
val ak = persistence.ApiKey.syntax("ak")
val u = persistence.User.syntax("u")
withSQL {
select(ak.result.*)
.from(persistence.ApiKey as ak)
.where(
sqls.toAndConditionOpt(
Some(sqls.eq(ak.userId, sqls.uuid(user.id))),
if (user.isGuest) {
None
} else {
Some(
sqls.exists(
select
.from(persistence.User as u)
.where
.eq(u.id, ak.userId)
.and
.eq(u.disabled, false)
.toSQLSyntax
)
)
},
Some(sqls.isNull(ak.deletedAt)),
Some(sqls.isNull(ak.deletedBy))
)
)
.limit(1)
}.map { rs =>
DatasetService.AppUser(
id = user.id,
apiKey = rs.string(ak.resultName.apiKey),
secretKey = rs.string(ak.resultName.secretKey)
)
}.single.apply()
}
/**
* 指定したアプリのJNLPファイル情報を取得する。
*
* @param datasetId データセットID
* @param userId ユーザID
* @return
* Success(AppJnlp) 取得成功時、アプリのJNLPファイル情報
* Failure(NullPointerException) datasetIdまたはuserIdがnullの場合
* Failure(NotFoundException)
* ユーザが存在しないまたは無効な場合、
* データセットが存在しない場合、
* データセットに設定されたアプリが存在しない場合、
* ユーザにAPIキーが存在しない場合
* Failure(AccessDeniedException) ユーザにダウンロード権限がない場合
*/
def getAppJnlp(datasetId: String, userId: String): Try[DatasetData.AppJnlp] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(userId, "userId")
val db = DB.readOnly { implicit s =>
for {
user <- found(getUser(userId))
dataset <- found(getDataset(datasetId))
app <- found(getApp(datasetId))
uk <- found(getUserKey(user))
_ <- requireAllowDownload(user, datasetId)
} yield {
(dataset, app, uk)
}
}
for {
(dataset, app, uk) <- db
} yield {
val content = AppManager.getJnlp(
datasetId = datasetId,
userId = userId,
apiKey = uk.apiKey,
secretKey = uk.secretKey
)
DatasetData.AppJnlp(
id = app.id,
name = dataset.name,
datasetId = datasetId,
lastModified = app.lastModified,
content = content
)
}
}.flatMap(x => x)
}
/**
* 指定したアプリのJARファイル情報を取得する。
*
* @param datasetId データセットID
* @param userId ユーザID
* @return
* Success(AppFile) 取得成功時、アプリのJARファイル情報
* Failure(NullPointerException) datasetIdまたはuserIdがnullの場合
* Failure(NotFoundException)
* ユーザが存在しないまたは無効な場合、
* データセットが存在しない場合、
* データセットに設定されたアプリが存在しない場合、
* ユーザにAPIキーが存在しない場合
* Failure(AccessDeniedException) ユーザにダウンロード権限がない場合
*/
def getAppFile(datasetId: String, userId: String): Try[DatasetData.AppFile] = {
Try {
CheckUtil.checkNull(datasetId, "datasetId")
CheckUtil.checkNull(userId, "userId")
val db = DB.readOnly { implicit s =>
for {
user <- found(getUser(userId))
dataset <- found(getDataset(datasetId))
app <- found(getApp(datasetId))
_ <- found(getUserKey(user))
_ <- requireAllowDownload(user, datasetId)
} yield {
(dataset, app)
}
}
for {
(dataset, app) <- db
} yield {
val file = AppManager.download(app.id)
val size = file.length
val content = Files.newInputStream(file.toPath)
DatasetData.AppFile(
appId = app.id,
lastModified = app.lastModified,
size = size,
content = content
)
}
}.flatMap(x => x)
}
/**
* 指定したIDのユーザを取得する。
*
* @param id ユーザID
* @return 取得したユーザ、存在しないまたは無効な場合None
*/
private def getUser(id: String)(implicit s: DBSession): Option[User] = {
if (id == AppConf.guestUser.id) {
return Some(AppConf.guestUser)
}
val u = persistence.User.u
val ma = persistence.MailAddress.ma
withSQL {
select(u.result.*, ma.result.address)
.from(persistence.User as u)
.innerJoin(persistence.MailAddress as ma).on(u.id, ma.userId)
.where
.eq(u.id, sqls.uuid(id))
.and
.eq(u.disabled, false)
}.map { rs =>
val user = persistence.User(u.resultName)(rs)
val address = rs.string(ma.resultName.address)
User(user, address)
}.single.apply()
}
/**
* 指定したデータセットのエラー一覧を取得します。
*
* @param id データセットID
* @param user ユーザ情報
* @return
* Success(Seq[DatasetData.DatasetFile]) 取得成功時、エラー一覧
* Failure(NullPointerException) 引数がnullの場合
* Failure(NotFoundException) データセットが見つからない場合
* Failure(AccessDeniedException) ログインユーザに参照権限がない場合
*/
def getFileHistoryErrors(id: String, user: User): Try[Seq[DatasetData.DatasetFile]] = {
val ret = Try {
CheckUtil.checkNull(id, "id")
CheckUtil.checkNull(user, "user")
DB.readOnly { implicit s =>
val dataset = checkDatasetExisitence(id)
val permission = checkReadPermission(id, user)
val histories = findFileHistoryErrors(id, user)
histories.getOrElse(List.empty).map {
x =>
DatasetData.DatasetFile(
id = x.id,
name = x.name,
description = x.description,
size = Some(x.fileSize),
url = Some(AppConf.fileDownloadRoot + id + "/" + x.id),
createdBy = getUser(x.createdBy),
createdAt = x.createdAt.toString,
updatedBy = getUser(x.updatedBy),
updatedAt = x.updatedAt.toString,
isZip = false,
zipedFiles = Seq.empty,
zipCount = 0
)
}
}
}
// 結果ログを出力する
ret match {
case Success(_) => {
logger.debug("Successed getFileHistoryErrors, id = {}, user = {}", id, user)
}
case Failure(x) => {
logger.info("Failed getFileHistoryErrors, id = {}, user = {}, errorClass = {}, errorMessage = {}", id, user, x.getClass, x.getMessage)
}
}
ret
}
private def findFileHistoryErrors(id: String, user: User): Try[List[persistence.File]] = {
Try {
DB.localTx { implicit s =>
val fhe = persistence.FileHistoryError.fhe
val fh = persistence.FileHistory.fh
val f = persistence.File.f
withSQL {
select
.from(persistence.File as f)
.innerJoin(persistence.FileHistory as fh)
.on(
sqls.eq(fh.fileId, f.id)
.and.isNull(fh.deletedAt)
)
.innerJoin(persistence.FileHistoryError as fhe)
.on(
sqls.eq(fhe.historyId, fh.id)
.and.isNull(fhe.deletedAt)
)
.where.eq(f.datasetId, sqls.uuid(id))
.and.isNull(f.deletedAt)
}.map(persistence.File(f.resultName)).list.apply()
}
}
}
}
object DatasetService {
/**
* DatasetService内で処理判別するためのケースオブジェクト
* ファイル情報を持つ
*/
sealed trait FileInfo
/**
* ファイル情報:ローカルに保持する通常ファイル
*
* @param file ファイル情報
* @param path ファイルパス
*/
case class FileInfoLocalNormal(file: persistence.File, path: String) extends FileInfo
/**
* ファイル情報:S3上に保持する通常ファイル
*
* @param file ファイル情報
* @param path ファイルパス
*/
case class FileInfoS3Normal(file: persistence.File, path: String) extends FileInfo
/**
* ファイル情報:ローカルに保持するZIPファイル内の個別ファイル
*
* @param file ファイル情報
* @param path ファイルパス
*/
case class FileInfoLocalZipped(
file: persistence.File,
path: String,
zippedFile: persistence.ZipedFiles
) extends FileInfo
/**
* ファイル情報:S3上に保持するZIPファイル内の個別ファイル
*
* @param file ファイル情報
* @param path ファイルパス
*/
case class FileInfoS3Zipped(
file: persistence.File,
path: String,
zippedFile: persistence.ZipedFiles
) extends FileInfo
/**
* ファイルダウンロード向けに必要項目を保持するケースオブジェクト
*/
sealed trait DownloadFile
/**
* ファイルダウンロード:ローカルに保持する通常ファイル
*
* @param fileData ファイル内容を返すストリーム
* @param fileName ファイル名
* @param fileSize ファイルサイズ
*/
case class DownloadFileLocalNormal(
fileData: InputStream,
fileName: String,
fileSize: Long
) extends DownloadFile
/**
* ファイルダウンロード:ローカルに保持するZIPファイル内の個別ファイル
*
* @param fileData ファイル内容を返すストリーム
* @param fileName ファイル名
* @param fileSize ファイルサイズ
*/
case class DownloadFileLocalZipped(
fileData: InputStream,
fileName: String,
fileSize: Long
) extends DownloadFile
/**
* ファイルダウンロード:S3上に保持する通常ファイル
*
* @param redirectUrl S3上のファイルへのリダイレクトURL
*/
case class DownloadFileS3Normal(redirectUrl: String) extends DownloadFile
/**
* ファイルダウンロード:S3上に保持するZIPファイル内の個別ファイル
*
* @param fileData ファイル内容を返すストリーム
* @param fileName ファイル名
* @param fileSize ファイルサイズ
*/
case class DownloadFileS3Zipped(
fileData: InputStream,
fileName: String,
fileSize: Long
) extends DownloadFile
/** アプリ検索に用いる削除状態 */
object GetAppDeletedTypes {
/** 論理削除を含まない */
val LOGICAL_DELETED_EXCLUDE = 0
/** 論理削除を含む */
val LOGICAL_DELETED_INCLUDE = 1
/** 論理削除のみ */
val LOGICAL_DELETED_ONLY = 2
}
/** アプリ検索に用いるデフォルトの削除状態 */
val DEFAULT_GET_APP_DELETED_TYPE = GetAppDeletedTypes.LOGICAL_DELETED_EXCLUDE
/**
* ユーザのAPIキー情報
*
* @param id ユーザID
* @param apiKey APIキー
* @param secretKey シークレットキー
*/
case class AppUser(
id: String,
apiKey: String,
secretKey: String
)
}
| nkawa/dsmoq | server/apiServer/src/main/scala/dsmoq/services/DatasetService.scala | Scala | apache-2.0 | 202,087 |
package sample.stream
import java.net.InetSocketAddress
import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success
import akka.actor.ActorSystem
import akka.pattern.ask
import akka.io.IO
import akka.stream.FlowMaterializer
import akka.stream.MaterializerSettings
import akka.stream.io.StreamTcp
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import akka.util.Timeout
object TcpClient {
/**
*
* Use parameters `127.0.0.1 6001` to start client connecting to
* server on 127.0.0.1:6001.
*
*/
def main(args: Array[String]): Unit = {
val serverAddress =
if (args.length == 3) new InetSocketAddress(args(1), args(2).toInt)
else new InetSocketAddress("127.0.0.1", 6000)
val system = ActorSystem("Client")
client(system, serverAddress)
}
def client(system: ActorSystem, serverAddress: InetSocketAddress): Unit = {
implicit val sys = system
implicit val ec = system.dispatcher
val settings = MaterializerSettings()
val materializer = FlowMaterializer(settings)
implicit val timeout = Timeout(5.seconds)
val clientFuture = (IO(StreamTcp) ? StreamTcp.Connect(remoteAddress = serverAddress, settings = settings))
clientFuture.onSuccess {
case clientBinding: StreamTcp.OutgoingTcpConnection =>
val testInput = ('a' to 'z').map(ByteString(_))
Flow(testInput).toProducer(materializer).produceTo(clientBinding.outputStream)
Flow(clientBinding.inputStream).fold(Vector.empty[Char]) { (acc, in) ⇒ acc ++ in.map(_.asInstanceOf[Char])}.
foreach(result => println(s"Result: " + result.mkString("[", ", ", "]"))).
onComplete(materializer) {
case Success(_) =>
println("Shutting down client")
system.shutdown()
case Failure(e) =>
println("Failure: " + e.getMessage)
system.shutdown()
}
}
clientFuture.onFailure {
case e: Throwable =>
println(s"Client could not connect to $serverAddress: ${e.getMessage}")
system.shutdown()
}
}
} | retroryan/streams-workshop | src/examples/sample/stream/TcpClient.scala | Scala | cc0-1.0 | 2,100 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.ui
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.ui.{SparkUI, SparkUITab}
/**
* Spark Web UI tab that shows statistics of a streaming job.
* This assumes the given SparkContext has enabled its SparkUI.
*/
private[spark] class StreamingTab(val ssc: StreamingContext)
extends SparkUITab(StreamingTab.getSparkUI(ssc), "streaming") with Logging {
import StreamingTab._
private val STATIC_RESOURCE_DIR = "org/apache/spark/streaming/ui/static"
val parent = getSparkUI(ssc)
val listener = ssc.progressListener
ssc.addStreamingListener(listener)
ssc.sc.addSparkListener(listener)
parent.setStreamingJobProgressListener(listener)
attachPage(new StreamingPage(this))
attachPage(new BatchPage(this))
def attach() {
getSparkUI(ssc).attachTab(this)
getSparkUI(ssc).addStaticHandler(STATIC_RESOURCE_DIR, "/static/streaming")
}
def detach() {
getSparkUI(ssc).detachTab(this)
getSparkUI(ssc).detachHandler("/static/streaming")
}
}
private object StreamingTab {
def getSparkUI(ssc: StreamingContext): SparkUI = {
ssc.sc.ui.getOrElse {
throw new SparkException("Parent SparkUI to attach this tab to not found!")
}
}
}
| bravo-zhang/spark | streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingTab.scala | Scala | apache-2.0 | 2,120 |
package scala.util.concurrent.locks
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.{Lock => JLock, Condition => JCondition}
import util.concurrent.Duration
class AbstractJavaCondition(cond: => Boolean, val underlying: JCondition)
extends AbstractCondition {
override def condition = cond
override def signal() = underlying.signal
override def signalAll() = underlying.signalAll
override lazy val interruptible: Condition =
new JavaCondition(cond, underlying) with InnerCondition {
override lazy val interruptible = this
}
override lazy val uninterruptible: Condition =
new UninterruptibleJavaCondition(cond, underlying) with InnerCondition {
override lazy val uninterruptible = this
}
override def attemptFor(duration: Duration): TryingCondition =
new TryingJavaCondition(duration, cond, underlying) with InnerCondition
trait InnerCondition extends AbstractJavaCondition {
override lazy val interruptible: Condition =
AbstractJavaCondition.this.interruptible
override lazy val uninterruptible: Condition =
AbstractJavaCondition.this.uninterruptible
}
}
class JavaCondition(cond: => Boolean, override val underlying: JCondition)
extends AbstractJavaCondition(cond, underlying) with Condition {
override def await() = underlying.await
override lazy val interruptible = this
}
class UninterruptibleJavaCondition(cond: => Boolean,
override val underlying: JCondition)
extends AbstractJavaCondition(cond, underlying) with Condition {
override def await() = underlying.awaitUninterruptibly
override lazy val uninterruptible = this
}
class TryingJavaCondition(duration: Duration, cond: => Boolean,
override val underlying: JCondition)
extends AbstractJavaCondition(cond, underlying) with TryingCondition {
override def await() = underlying.await(duration.toJavaNanos, TimeUnit.NANOSECONDS)
}
| joshcough/Sweet | src/main/scala/scala/util/concurrent/locks/JavaCondition.scala | Scala | lgpl-2.1 | 1,899 |
package com.blinkboxbooks.mimir.export
import java.sql.Date
import org.squeryl.Schema
import org.squeryl.PrimitiveTypeMode._
//
// Objects for schemas.
//
case class Book(id: String, publisherId: String, discount: Float, publicationDate: Date,
title: String, description: Option[String], languageCode: Option[String], numberOfSections: Int) {
def this() = this("", "", 0, new Date(0), "", None, None, 0)
}
case class Publisher(id: Int, name: String,
implementsAgencyPricingModel: Boolean, countryCode: Option[String]) {
def this() = this(0, "", false, None)
}
case class Genre(id: Int, parentId: Option[Int], bisacCode: Option[String], name: Option[String]) {
def this() = this(0, None, None, None)
}
case class MapBookToGenre(isbn: String, genreId: Int) {
def this() = this("", 0)
}
case class CurrencyRate(fromCurrency: String, toCurrency: String, rate: BigDecimal) {
def this() = this("", "", 0)
}
case class Contributor(id: Int, fullName: String, firstName: Option[String], lastName: Option[String], guid: String, imageUrl: Option[String]) {
def this() = this(0, "", None, None, "", None)
}
case class MapBookToContributor(contributorId: Int, isbn: String, role: Int) {
def this() = this(0, "", 0)
}
case class BookMedia(id: Int, isbn: String, url: Option[String], kind: Int){
def this() = this(0, "", Some(""), 0)
}
// Enriched Output Classes
// They contain additional fields that are not in the source data.
case class OutputBook(id: String, publisherId: String, discount: Float, publicationDate: Date, title: String, description: Option[String],
languageCode: Option[String], numberOfSections: Int, coverUrl: Option[String]) {
def this() = this("", "", 0, new Date(0), "", None, None, 0, None)
}
case class OutputContributor(id: Int, fullName: String, firstName: Option[String], lastName: Option[String], guid: String, imageUrl: Option[String], url: Option[String]) {
def this() = this(0, "", None, None, "", None, None)
}
// Database enum values
object BookMedia {
import java.net.URL
val BOOK_COVER_MEDIA_ID = 0
val FULL_EPUB_MEDIA_ID = 1
val SAMPLE_EPUB_MEDIA_ID = 2
def fullsizeJpgUrl(mediaUrl: Option[String]):Option[String] = mediaUrl.map { url =>
try { new URL(url) } catch {
case ex: Exception =>
return None
}
url.takeRight(4) match {
case ".jpg" => url
case _ => url.replaceFirst("([^/])/([^/])", "$1/params;v=0/$2") + ".jpg"
}
}
}
object Contributor {
import java.text.Normalizer
def generateContributorUrl(baseUrl: String, guid: String, fullName: String): Option[String] = {
val normalizedName = Normalizer.normalize(fullName, java.text.Normalizer.Form.NFD)
.toLowerCase()
.replaceAll(" ", "-")
.replaceAll("[^a-z-]+", "")
val normalizedNameWithDefault = if (normalizedName.isEmpty) "details" else normalizedName
Some(Array(baseUrl, guid, normalizedNameWithDefault).mkString("/").take(255))
}
}
//
// Input schema definitions.
//
// These are not full definitions of the corresponding tables, they just specify the columns
// that we read and the info needed to read them (e.g. column name but not details about column types).
//
object ShopSchema extends Schema {
val bookData = table[Book]("dat_book")
on(bookData)(b => declare(
b.id is (named("isbn")),
b.publisherId is (named("publisher_id")),
b.discount is (named("discount")),
b.publicationDate is (named("publication_date")),
// Shop DB has longer size of this field than the reporting DB, hence replicate this in tests.
b.description is dbType(s"varchar(65535)"),
b.languageCode is (named("language_code")),
b.numberOfSections is (named("num_sections"))))
val publisherData = table[Publisher]("dat_publisher")
on(publisherData)(p => declare(
p.implementsAgencyPricingModel is (named("implements_agency_pricing_model")),
p.countryCode is (named("country_code"))))
val contributorData = table[Contributor]("dat_contributor")
on(contributorData)(c => declare(
c.guid is (named("guid")),
c.fullName is (named("full_name")),
c.imageUrl is (named("photo")),
c.firstName is (named("first_name")),
c.lastName is (named("last_name"))))
val mapBookContributorData = table[MapBookToContributor]("map_book_contributor")
on(mapBookContributorData)(m => declare(
m.contributorId is (named("contributor_id"))))
val genreData = table[Genre]("dat_genre")
on(genreData)(g => declare(
g.parentId is (named("parent_id")),
g.bisacCode is (named("bisac_code"))))
val bookGenreData = table[MapBookToGenre]("map_book_genre")
on(bookGenreData)(g => declare(
g.genreId is (named("genre_id"))))
val currencyRateData = table[CurrencyRate]("dat_currency_rate")
on(currencyRateData)(e => declare(
e.fromCurrency is (named("from_currency")),
e.toCurrency is (named("to_currency"))))
val bookMediaData = table[BookMedia]("dat_book_media")
on(bookMediaData)(m => declare(
m.id is named("id"),
m.isbn is named("isbn"),
m.url is named("url"),
m.kind is named("type")
))
}
case class Clubcard(id: Int, cardNumber: String) {
def this() = this(0, "")
}
case class ClubcardUser(id: Int, userId: String) {
def this() = this(0, "")
}
case class ClubcardForUser(cardId: Int, userId: Int) {
def this() = this(0, 0)
}
object ClubcardSchema extends Schema {
val clubcards = table[Clubcard]("club_card")
on(clubcards)(c => declare(
c.cardNumber is (named("card_number"))))
val users = table[ClubcardUser]("users")
on(users)(u => declare(
u.userId is (named("user_id"))))
val clubcardUsers = table[ClubcardForUser]("club_card_users")
on(clubcardUsers)(cu => declare(
cu.cardId is (named("card_id")),
cu.userId is (named("user_id"))))
}
//
// Output schema definitions.
//
case class UserClubcardInfo(cardId: String, userId: Int) {
def this() = this("", 0)
}
object ReportingSchema extends Schema {
val MAX_DESCRIPTION_LENGTH = 20000
val booksOutput = table[OutputBook]("books")
on(booksOutput)(b => declare(
b.id is (named("isbn")),
b.publisherId is (named("publisher_id")),
b.discount is (named("discount")),
b.publicationDate is (named("publication_date"), dbType("DATE")),
b.title is dbType("VARCHAR(255)"),
b.description is dbType(s"varchar($MAX_DESCRIPTION_LENGTH)"),
b.languageCode is (named("language_code"), dbType("CHAR(2)")),
b.numberOfSections is (named("number_of_sections")),
b.coverUrl is named("cover_url")))
val publishersOutput = table[Publisher]("publishers")
on(publishersOutput)(p => declare(
p.implementsAgencyPricingModel is (named("implements_agency_pricing_model")),
p.name is (dbType("VARCHAR(128)")),
p.countryCode is (named("country_code"), dbType("VARCHAR(4)"))))
val userClubcardsOutput = table[UserClubcardInfo]("user_clubcards")
on(userClubcardsOutput)(c => declare(
c.cardId is (named("clubcard_id"), dbType("VARCHAR(20)")),
c.userId is (named("user_id"))))
val currencyRatesOutput = table[CurrencyRate]("currency_rates")
on(currencyRatesOutput)(e => declare(
e.fromCurrency is (named("from_currency"), dbType("VARCHAR(5)")),
e.toCurrency is (named("to_currency"), dbType("VARCHAR(5)"))))
val contributorsOutput = table[OutputContributor]("contributors")
on(contributorsOutput)(c => declare(
c.fullName is (named("full_name"), dbType("VARCHAR(256)")),
c.firstName is (named("first_name"), dbType("VARCHAR(256)")),
c.lastName is (named("last_name"), dbType("VARCHAR(256)")),
c.guid is (named("guid"), dbType("VARCHAR(256)")),
c.url is (named("url"), dbType("VARCHAR(256)")),
c.imageUrl is (named("image_url"), dbType("VARCHAR(256)"))))
val contributorRolesOutput = table[MapBookToContributor]("contributor_roles")
on(contributorRolesOutput)(m => declare(
m.contributorId is (named("contributor_id"))))
val genresOutput = table[Genre]("genres")
on(genresOutput)(g => declare(
g.parentId is (named("parent_id")),
g.bisacCode is (named("bisac_code"), dbType("VARCHAR(8)"))))
val bookGenresOutput = table[MapBookToGenre]("book_genres")
on(bookGenresOutput)(g => declare(
g.genreId is (named("genre_id")),
g.isbn is (dbType("VARCHAR(13)"))))
// printDdl(str => println(str))
}
| blinkboxbooks/data-exporter-service.scala | src/main/scala/com/blinkboxbooks/mimir/export/Schemas.scala | Scala | mit | 8,332 |
// $Id$
//package scala.concurrent
package com.biosimilarity.lift.lib.concurrent
import java.util.concurrent.atomic._
import jsr166y._
import jsr166y.forkjoin._
trait FJTaskRunners extends TaskRunnersBase {
type TaskRunner = FJTaskRunner
def numWorkers: Int = java.lang.Runtime.getRuntime().availableProcessors()
class FJTaskRunner(n: Int) extends AbstractTaskRunner {
val pool = new ForkJoinPool(n)
def submitTask(f:Task) {
FJTaskWrapper.runOnCurrentThreadOrPool(new RecursiveAction {
def compute() = try {
f()
} catch { case e : Throwable => e.printStackTrace() }
// TODO: exception handling
}, pool)
}
def waitUntilFinished() {
// Thread.sleep(24*60*60*1000)
pool.awaitTermination(scala.Long.MaxValue,java.util.concurrent.TimeUnit.SECONDS)
// FIXME: doesn't seem to work (?)
}
}
def createDefaultTaskRunner() = new FJTaskRunner(numWorkers)
}
| leithaus/strategies | src/main/scala/com/biosimilarity/lib/concurrent/FJTaskRunners.scala | Scala | cc0-1.0 | 960 |
package s3.website
import s3.website.model.Files.listSiteFiles
import s3.website.model._
import s3.website.Ruby.rubyRegexMatches
import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.{Failure, Success, Try}
import java.io.File
object UploadHelper {
type FutureUploads = Future[Either[ErrorReport, Seq[Upload]]]
def resolveUploads(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
(implicit site: Site, pushOptions: PushOptions, logger: Logger, executor: ExecutionContextExecutor): FutureUploads =
resolveUploadsAgainstGetBucketResponse(s3FilesFuture)
private def resolveUploadsAgainstGetBucketResponse(s3FilesFuture: Future[Either[ErrorReport, Seq[S3File]]])
(implicit site: Site,
pushOptions: PushOptions,
logger: Logger,
executor: ExecutionContextExecutor): FutureUploads =
s3FilesFuture.map { errorOrS3Files =>
errorOrS3Files.right.flatMap { s3Files =>
Try {
val s3KeyIndex = s3Files.map(_.s3Key).toSet
val s3Md5Index = s3Files.map(_.md5).toSet
val siteFiles = listSiteFiles
val existsOnS3 = (f: File) => s3KeyIndex contains site.resolveS3Key(f)
val isChangedOnS3 = (upload: Upload) => !(s3Md5Index contains upload.md5.get)
val newUploads = siteFiles collect {
case file if !existsOnS3(file) => Upload(file, NewFile)
}
val changedUploads = siteFiles collect {
case file if existsOnS3(file) => Upload(file, FileUpdate)
} filter (if (pushOptions.force) selectAllFiles else isChangedOnS3)
newUploads ++ changedUploads
} match {
case Success(ok) => Right(ok)
case Failure(err) => Left(ErrorReport(err))
}
}
}
val selectAllFiles = (upload: Upload) => true
def resolveDeletes(s3Files: Future[Either[ErrorReport, Seq[S3File]]], redirects: Seq[Redirect])
(implicit site: Site, logger: Logger, executor: ExecutionContextExecutor): Future[Either[ErrorReport, Seq[S3Key]]] =
if (site.config.ignore_on_server.contains(Left(DELETE_NOTHING_MAGIC_WORD))) {
logger.debug(s"Ignoring all files on the bucket, since the setting $DELETE_NOTHING_MAGIC_WORD is on.")
Future(Right(Nil))
} else {
val localS3Keys = listSiteFiles.map(site resolveS3Key)
s3Files map { s3Files: Either[ErrorReport, Seq[S3File]] =>
for {
remoteS3Keys <- s3Files.right.map(_ map (_.s3Key)).right
} yield {
val keysToRetain = (localS3Keys ++ (redirects map { _.s3Key })).toSet
remoteS3Keys filterNot { s3Key =>
val ignoreOnServer = site.config.ignore_on_server.exists(_.fold(
(ignoreRegex: String) => rubyRegexMatches(s3Key, ignoreRegex),
(ignoreRegexes: Seq[String]) => ignoreRegexes.exists(rubyRegexMatches(s3Key, _))
))
if (ignoreOnServer) logger.debug(s"Ignoring $s3Key on server")
(keysToRetain contains s3Key) || ignoreOnServer
}
}
}
}
val DELETE_NOTHING_MAGIC_WORD = "_DELETE_NOTHING_ON_THE_S3_BUCKET_"
} | SivagnanamCiena/s3_website | src/main/scala/s3/website/UploadHelper.scala | Scala | mit | 3,340 |
package events
/**
* Created by daz on 12/09/2016.
*/
object RoomEventType extends Enumeration {
type roomEvent = Value
val MESSAGE, JOIN, LEAVE, CREATE, DELETE, LIST = Value
}
| dazito/messengr | app/events/RoomEventType.scala | Scala | apache-2.0 | 186 |
/*
* Copyright 2010-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
package rest
import scala.xml._
import org.specs2.mutable.Specification
import org.specs2.matcher.Matcher
import common._
import util.Helpers.secureXML
import util.ControlHelpers.tryo
/**
* System under specification for XMLApi.
*/
class XmlApiSpec extends Specification {
"XMLApi Specification".title
object XMLApiExample extends XMLApiHelper {
// Define our root tag
def createTag(contents : NodeSeq) : Elem = <api>{contents}</api>
// This method exists to test the non-XML implicit conversions on XMLApiHelper
def produce (in : Any) : LiftResponse = in match {
// Tests boolToResponse
case "true" => true
case "false" => false
// Tests canBoolToResponse
case s : String => tryo[Boolean] { s.toInt > 5 }
// Tests pairToResponse
case i : Int if i == 42 => (true,"But what is the question?")
// These test the listElemToResponse conversion
case f : Float if f == 42f => (<float>perfect</float> : Elem)
case f : Float if f == 0f => (<float>zero</float> : Node)
case f : Float if f > 0f => (<float>positive</float> : NodeSeq)
case f : Float if f < 0f => (<float>negative</float> : Seq[Node])
}
// This method tests the XML implicit conversions on XMLApiHelper
def calculator : LiftRules.DispatchPF = {
case r @ Req(List("api","sum"), _, GetRequest) => () => doSum(r)
case r @ Req(List("api","product"), _, GetRequest) => () => doProduct(r)
case r @ Req(List("api","max"), _, GetRequest) => () => doMax(r)
case r @ Req(List("api","min"), _, GetRequest) => () => doMin(r)
// Tests putResponseInBox
case Req("api" :: _, _, _) => () => BadRequestResponse()
}
// ===== Handler methods =====
def reduceOp (operation : (Int,Int) => Int)(r : Req) : Box[Elem] = tryo {
(r.param("args").map {
args => <result>{args.split(",").map(_.toInt).reduceLeft(operation)}</result>
}) ?~ "Missing args"
} match {
case Full(x) => x
case f : Failure => f
case Empty => Empty
}
// We specify the LiftResponse return type to force use of the implicit
// canNodeToResponse conversion
def doSum (r : Req) : LiftResponse = reduceOp(_ + _)(r)
def doProduct (r : Req) : LiftResponse = (reduceOp(_ * _)(r) : Box[Node])
def doMax (r : Req) : LiftResponse = (reduceOp(_ max _)(r) : Box[NodeSeq])
def doMin (r : Req) : LiftResponse = (reduceOp(_ min _)(r) : Box[Node])
//def doMin (r : Req) : LiftResponse = (reduceOp(_ min _)(r) : Box[Seq[Node]])
}
// A helper to simplify the specs matching
case class matchXmlResponse(expected : Node) extends Matcher[LiftResponse] {
def apply[T <: LiftResponse](response : org.specs2.matcher.Expectable[T]) = response.value match {
case x : XmlResponse => {
/* For some reason, the UnprefixedAttributes that Lift uses to merge in
* new attributes makes comparison fail. Instead, we simply stringify and
* reparse the response contents and that seems to fix the issue. */
val converted = secureXML.loadString(x.xml.toString)
result(converted == expected,
"%s matches %s".format(converted,expected),
"%s does not match %s".format(converted, expected),
response)
}
case other => result(false,"matches","not an XmlResponse", response)
}
}
"XMLApiHelper" should {
import XMLApiExample.produce
/* In all of these tests we include the <xml:group/> since that's what Lift
* inserts for content in non-content responses.
*/
"Convert booleans to LiftResponses" in {
produce("true") must matchXmlResponse(<api success="true"><xml:group/></api>)
produce("false") must matchXmlResponse(<api success="false"><xml:group/></api>)
}
"Convert Boxed booleans to LiftResponses" in {
produce("42") must matchXmlResponse(<api success="true"><xml:group/></api>)
produce("1") must matchXmlResponse(<api success="false"><xml:group/></api>)
val failure = produce("invalidInt")
failure must haveClass[XmlResponse]
failure match {
case x : XmlResponse => {
x.xml.attribute("success").map(_.text) must_== Some("false")
x.xml.attribute("msg").isDefined must_== true
}
}
}
"Convert Pairs to responses" in {
produce(42) must matchXmlResponse(<api success="true" msg="But what is the question?"><xml:group/></api>)
}
"Convert various XML types to a response" in {
produce(0f) must matchXmlResponse(<api success="true"><float>zero</float></api>)
produce(-1f) must matchXmlResponse(<api success="true"><float>negative</float></api>)
produce(1f) must matchXmlResponse(<api success="true"><float>positive</float></api>)
produce(42f) must matchXmlResponse(<api success="true"><float>perfect</float></api>)
}
}
}
| lift/framework | web/webkit/src/test/scala/net/liftweb/http/rest/XMLApiSpec.scala | Scala | apache-2.0 | 5,553 |
/*
* Copyright 2015 Michael Gnatz.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.mg.tt.service
import java.util.Date
import de.mg.tt.model.Category
/**
* Created by gnatz on 12/31/14.
*/
case class FilterCriteria(from: Date,
to: Date,
categories: Set[Category] = Set.empty)
| micgn/timetracker | TimeTrackerWeb/src/main/scala/de/mg/tt/service/FilterCriteria.scala | Scala | apache-2.0 | 862 |
package test
import org.specs2.mutable.Specification
class AvroTypeProvider2ArityHeteroTest extends Specification {
"A case class with an `Int` field coexisting with a non-`Int` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest48(1, "bonjour")
TestUtil.verifyRead(record)
}
}
"A case class with an `String` field coexisting with a non-`Int` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest49("bueno", 2)
TestUtil.verifyRead(record)
}
}
"A case class with an `Option[String]` field coexisting with an `Option[Int]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest50(Some("tropics"), Some(3))
TestUtil.verifyRead(record)
}
}
"A case class with an `Option[Int]` field coexisting with an `Option[String]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest51(Some(4), Some("level"))
TestUtil.verifyRead(record)
}
}
"A case class with a `List[String]` field coexisting with a `List[Int]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest52(List("am.pm"), List(5,6))
TestUtil.verifyRead(record)
}
}
"A case class with an `List[Int]` field coexisting with a `List[String]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest53(List(7, 8), List("bon.sois"))
TestUtil.verifyRead(record)
}
}
"A case class with an `Option[List[Option[String]]]` field coexisting with a `Option[List[Option[Int]]]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest54(Some(List(Some("bronco"), None)), Some(List(Some(9), None)))
TestUtil.verifyRead(record)
}
}
"A case class with an `Option[List[Option[Int]]]` field coexisting with a `Option[List[Option[String]]]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest55(Some(List(Some(10), None)), Some(List(Some("bronca"), None)))
TestUtil.verifyRead(record)
}
}
"A case class with an `List[Option[List[Option[String]]]]` field coexisting with a `List[Option[List[Option[Int]]]]` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest56(List(Some(List(Some("tibetan"), None)), None), List(Some(List(Some(11), None)), None))
TestUtil.verifyRead(record)
}
}
"A case class with an `Int` field coexisting with a non-`Int` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest57(List(Some(List(Some(12), None)), None), List(Some(List(Some("fire"), None)), None))
TestUtil.verifyRead(record)
}
}
}
| julianpeeters/avro-scala-macro-annotations | tests/src/test/scala/AvroTypeProviderTests/datatypetests/AvroTypeProvider2ArityHeteroTest.scala | Scala | apache-2.0 | 2,883 |
/*******************************************************************************
* Copyright (C) 2012 Łukasz Szpakowski.
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package org.lkbgraph.algorithm.spec
import org.scalatest.Spec
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.lkbgraph._
import org.lkbgraph.immutable._
import org.lkbgraph.algorithm._
@RunWith(classOf[JUnitRunner])
class MinSpanningTreeSpec extends Spec with MinSpanningTreeBehaviors[Graph]
{
override def graphFactory = Graph
override def minSpanningTreeStrategy = Kruskal
describe("A Kruskal") {
it should behave like minSpanningTree
}
}
| luckboy/LkbGraph | src/test/org/lkbgraph/algorithm/spec/KruskalSpec.scala | Scala | lgpl-3.0 | 1,372 |
package org.jetbrains.plugins.scala
package testingSupport.test.specs2
import com.intellij.testIntegration.TestFramework
import org.jetbrains.plugins.scala.testingSupport.test.AbstractTestFramework.TestFrameworkSetupInfo
import org.jetbrains.plugins.scala.testingSupport.test.{AbstractTestFramework, TestFrameworkSetupSupportBase}
final class Specs2TestFramework extends AbstractTestFramework with TestFrameworkSetupSupportBase {
override def getName: String = "Specs2"
override def testFileTemplateName = "Specs2 Class"
override def getMarkerClassFQName: String = "org.specs2.mutable.Specification"
override def getDefaultSuperClass: String = "org.specs2.mutable.Specification"
override def baseSuitePaths: Seq[String] = Seq(
"org.specs2.specification.SpecificationStructure",
"org.specs2.specification.core.SpecificationStructure"
)
override def frameworkSetupInfo(scalaVersion: Option[String]): TestFrameworkSetupInfo =
TestFrameworkSetupInfo(
Seq(""""org.specs2" %% "specs2-core" % "latest.integration" % "test""""),
Seq(""""-Yrangepos"""")
)
}
object Specs2TestFramework {
@deprecated("use `apply` instead", "2020.3")
def instance: Specs2TestFramework = apply()
def apply(): Specs2TestFramework =
TestFramework.EXTENSION_NAME.findExtension(classOf[Specs2TestFramework])
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/testingSupport/test/specs2/Specs2TestFramework.scala | Scala | apache-2.0 | 1,340 |
package models.db.retrieve
import anorm.~
import anorm.Id
import anorm.Pk
import anorm.RowParser
import anorm.SQL
import anorm.SqlParser.get
import models.abstracts.Player
import models.abstracts.MatchPlayer
import models.abstracts.Match
import utils.db.BooleanHandling
import utils.db.DbFinder
import utils.db.PerformancesQuery
class DbRetrievePlayer(
iden: Pk[Long],
val surname: String,
val initial: String,
val isHighfield: Boolean
) extends Player {
val id: Long = iden.get
lazy val matches: Seq[Match] = {
performances(new PerformancesQuery(id, None)).map{perf => perf.matchIn}
}
def performances(query: PerformancesQuery): Seq[MatchPlayer] = {
DbRetrieveMatchPlayer.findByPerformanceParams(query)
}
lazy val delete: Unit = DbRetrievePlayer.deleteById(id.toString)
}
object DbRetrievePlayer extends DbFinder[DbRetrievePlayer] {
// Required by DbFinder
val tableName: String = "Player"
val idName: String = "player_id"
// Other Fields
val nameName: String = "name"
val initial: String = "init"
val isHighfieldName = "is_highfield"
val allFieldsParser: RowParser[DbRetrievePlayer] = {
get[Long](idName) ~ get[String](nameName) ~ get[String](initial) ~ get[Int](isHighfieldName) map {
case id ~ name ~ initial ~ isHighfield =>
new DbRetrievePlayer(Id(id), name, initial, BooleanHandling.intToBool(isHighfield))
}
}
} | ishakir/cric-stat | app/models/db/retrieve/DbRetrievePlayer.scala | Scala | epl-1.0 | 1,419 |
package com.typesafe.sbt.packager.jdkpackager
import com.typesafe.sbt.SbtNativePackager
import com.typesafe.sbt.packager.jdkpackager.JDKPackagerPlugin.autoImport._
import sbt._
import scala.xml.Node
/**
* Keys specific to deployment via the `javapackger` tool.
*
* @author <a href="mailto:fitch@datamininglab.com">Simeon H.K. Fitch</a>
* @since 2/11/15
*/
trait JDKPackagerKeys {
val jdkPackagerBasename: SettingKey[String] =
settingKey[String]("Filename sans extension for generated installer package.")
val jdkPackagerType: SettingKey[String] = settingKey[String](
"""Value passed as the `native` attribute to `fx:deploy` task.
|Per `javapackager` documentation, this may be one of the following:
|
| * `all`: Runs all of the installers for the platform on which it is running,
| and creates a disk image for the application.
| * `installer`: Runs all of the installers for the platform on which it is running.
| * `image`: Creates a disk image for the application. On macOS, the image is
| the .app file. On Linux, the image is the directory that gets installed.
| * `dmg`: Generates a DMG file for macOS.
| * `pkg`: Generates a .pkg package for macOS.
| * `mac.appStore`: Generates a package for the Mac App Store.
| * `rpm`: Generates an RPM package for Linux.
| * `deb`: Generates a Debian package for Linux.
| * `exe`: Generates a Windows .exe package.
| * `msi`: Generates a Windows Installer package.
|
| Default: `installer`.
| Details:
| http://docs.oracle.com/javase/8/docs/technotes/guides/deploy/javafx_ant_task_reference.html#CIABIFCI
""".stripMargin
)
val jdkPackagerToolkit: SettingKey[JDKPackagerToolkit] =
settingKey[JDKPackagerToolkit]("GUI toolkit used in app. Either `JavaFXToolkit` (default) or `SwingToolkit`")
val jdkPackagerJVMArgs: SettingKey[Seq[String]] = settingKey[Seq[String]](
"""Sequence of arguments to pass to the JVM.
|Default: `Seq("-Xmx768m")`.
|Details:
| http://docs.oracle.com/javase/8/docs/technotes/guides/deploy/javafx_ant_task_reference.html#CIAHJIJG
""".stripMargin
)
val jdkPackagerAppArgs: SettingKey[Seq[String]] = settingKey[Seq[String]](
"""List of command line arguments to pass to the application on launch.
|Default: `Seq.empty`
|Details:
| http://docs.oracle.com/javase/8/docs/technotes/guides/deploy/javafx_ant_task_reference.html#CACIJFHB
|
""".stripMargin
)
val jdkPackagerProperties: SettingKey[Map[String, String]] = settingKey[Map[String, String]](
"""Map of `System` properties to define in application.
|Default: `Map.empty`
|Details:
| http://docs.oracle.com/javase/8/docs/technotes/guides/deploy/javafx_ant_task_reference.html#CIAHCIFJ
""".stripMargin
)
val jdkAppIcon: SettingKey[Option[File]] = settingKey[Option[File]]("""Path to platform-specific application icon:
| * `icns`: MacOS
| * `ico`: Windows
| * `png`: Linux
|
| Defaults to generic Java icon.
""".stripMargin)
val jdkPackagerAssociations: SettingKey[Seq[FileAssociation]] = settingKey[Seq[FileAssociation]](
"""Set of application file associations to register for the application.
|Example: `jdkPackagerAssociations := Seq(FileAssociation("foo", "application/x-foo", Foo Data File", iconPath))
|Default: `Seq.empty`
|Note: Requires JDK >= 8 build 40.
|Details:
| http://docs.oracle.com/javase/8/docs/technotes/guides/deploy/javafx_ant_task_reference.html#CIAIDHBJ
""".stripMargin
)
/** Config for scoping keys outside of Global . */
val JDKPackager: Configuration = config("jdkPackager") extend SbtNativePackager.Universal
// ------------------------------------------
// Keys to be defined in JDKPackager config.
// ------------------------------------------
val antPackagerTasks: SettingKey[Option[File]] = settingKey[Option[File]](
"Path to `ant-javafx.jar` library in JDK. By plugin attempts to find location based on `java.home` property. Specifying `JAVA_HOME` or `JDK_HOME` can help."
)
val antBuildDefn: TaskKey[Node] =
taskKey[xml.Node]("Generates a Ant XML DOM defining package generating build for JDK provided Ant task.")
val writeAntBuild: TaskKey[File] =
taskKey[File]("Write the Ant `build.xml` file to the jdkpackager target directory")
val antExtraClasspath: SettingKey[Seq[File]] =
settingKey[Seq[File]]("Additional classpath entries for the JavaFX Ant task beyond `antPackagerTasks`")
}
| fsat/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/jdkpackager/JDKPackagerKeys.scala | Scala | bsd-2-clause | 4,663 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.cassandra
import com.spotify.scio.values.SCollection
import com.spotify.scio.ScioContext
import com.spotify.scio.io.{EmptyTap, EmptyTapOf, ScioIO, Tap}
import com.spotify.scio.io.TapT
final case class CassandraIO[T](opts: CassandraOptions) extends ScioIO[T] {
override type ReadP = Nothing
override type WriteP = CassandraIO.WriteParam[T]
override val tapT: TapT.Aux[T, Nothing] = EmptyTapOf[T]
override protected def read(sc: ScioContext, params: ReadP): SCollection[T] =
throw new UnsupportedOperationException("Can't read from Cassandra")
/**
* Save this SCollection as a Cassandra table.
*
* Cassandra `org.apache.cassandra.hadoop.cql3.CqlBulkRecordWriter` is used to perform bulk writes
* for better throughput. The [[com.spotify.scio.values.SCollection SCollection]] is grouped by
* the table partition key before written to the cluster. Therefore writes only occur at the end
* of each window in streaming mode. The bulk writer writes to all nodes in a cluster so remote
* nodes in a multi-datacenter cluster may become a bottleneck.
*/
override protected def write(data: SCollection[T], params: WriteP): Tap[Nothing] = {
val bulkOps = new BulkOperations(opts, params.parallelism)
data
.map(params.outputFn.andThen(bulkOps.serializeFn))
.groupBy(bulkOps.partitionFn)
.map(bulkOps.writeFn)
EmptyTap
}
override def tap(params: ReadP): Tap[Nothing] =
EmptyTap
}
object CassandraIO {
object WriteParam {
private[cassandra] val DefaultPar = 0
}
final case class WriteParam[T] private (
outputFn: T => Seq[Any],
parallelism: Int = WriteParam.DefaultPar
)
}
| spotify/scio | scio-cassandra/cassandra3/src/main/scala/com/spotify/scio/cassandra/CassandraIO.scala | Scala | apache-2.0 | 2,288 |
object Main extends App {
println("Hello, world")
} | josketres/gradle-scala-skeleton | src/main/scala/Main.scala | Scala | apache-2.0 | 53 |
package org.datacleaner.result.html
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Test
import org.datacleaner.result.SimpleAnalysisResult
import org.datacleaner.configuration.AnalyzerBeansConfiguration
import org.datacleaner.configuration.DataCleanerConfigurationImpl
import java.io.StringWriter
import org.junit.Assert
import scala.xml.XML
class HtmlAnalysisResultWriterTest extends AssertionsForJUnit {
@Test
def testEmptyRendering = {
val writer = new HtmlAnalysisResultWriter();
val analysisResult = new SimpleAnalysisResult(new java.util.HashMap());
val configuration = new DataCleanerConfigurationImpl();
val stringWriter = new StringWriter();
writer.write(analysisResult, configuration, stringWriter);
val html = stringWriter.toString();
Assert.assertEquals("""<!DOCTYPE html>
<html>
<head>
<title>Analysis result</title> <script type="text/javascript" src="http://eobjects.org/resources/datacleaner-html-rendering/analysis-result.js"></script>
<link rel="shortcut icon" href="http://eobjects.org/resources/datacleaner-html-rendering/analysis-result-icon.png" />
<script type="text/javascript">//<![CDATA[
var analysisResult = {};
//]]>
</script>
</head><body>
<div class="analysisResultContainer">
<div class="analysisResultHeader"><ul class="analysisResultToc"></ul></div></div>
</body></html>""".replaceAll("\\r\\n", "\\n"), html.replaceAll("\\r\\n", "\\n"));
}
}
| anandswarupv/DataCleaner | components/html-rendering/src/test/scala/org/datacleaner/result/html/HtmlAnalysisResultWriterTest.scala | Scala | lgpl-3.0 | 1,431 |
package com.mec.scala
object MatchingDeep {
def main(args: Array[String]) ={
for(person <- Seq(alice, bob, charlie)){
person match {
case Person("Alice", 25, Address(_, "Chicago", _)) => println("Hi, Alice")
case Person("Bob", 29, Address("2 Java Ave.", "Miami", "USA")) => println("Hi, Bob!")
case Person(name, age, _) => println(s"Who are you, $age year-old person named $name?")
}
}
}
val alice = Person("Alice", 25, Address("1 Scala Lane", "Chicago", "USA"))
val bob = Person("Bob", 29, Address("2 Java Ave.", "Miami", "USA"))
val charlie = Person("Charlie", 32, Address("3 Python Ct1", "Boston", "USA"))
}
case class Address(street: String, city: String, country: String)
case class Person(name: String, age: Int, address: Address)
| mectest1/HelloScala | HelloWorld/src/com/mec/scala/MatchingDeep.scala | Scala | gpl-3.0 | 799 |
import com.alanjz.meerkat.moves.PseudoLegalMover
import com.alanjz.meerkat.position.mutable.MaskNode
import com.alanjz.meerkat.util.position.mutable.NodeStringBuilder
import scala.io.Source
import scala.util.Random
/**
* Created by alan on 12/15/14.
*/
object RandomMoveTest extends App {
val node = MaskNode.initialPosition
val rand = new Random
var moves = new PseudoLegalMover(node).getMoves
while(moves.nonEmpty) {
println(node)
println(moves.mkString(" "))
val line = Source.stdin.bufferedReader().readLine()
if(line == "z") {
node.unmake()
}
else {
val move = moves(rand.nextInt(moves.size))
println(node.active + " chose " + move + ".")
node.make(move)
}
moves = new PseudoLegalMover(node).getMoves
}
}
| spacenut/meerkat-chess | scripts/RandomMoveTest.scala | Scala | gpl-2.0 | 781 |
import scala.language.reflectiveCalls
// Scala class:
class ScalaVarArgs extends J_1 {
// -- no problem on overriding it using ordinary class
def method(s: String*) { println(s) }
}
object Test {
def main(args: Array[String]) {
//[1] Ok - no problem using inferred type
val varArgs = new J_1 {
def method(s: String*) { println(s) }
}
varArgs.method("1", "2")
//[2] Ok -- no problem when explicit set its type after construction
val b: J_1 = varArgs
b.method("1", "2")
//[3] Ok -- no problem on calling its method
(new ScalaVarArgs).method("1", "2")
(new ScalaVarArgs: J_1).method("1", "2")
//[4] Not Ok -- error when assigning anonymous class to an explictly typed val
// Compiler error: object creation impossible, since method method in trait VarArgs of type (s: <repeated...>[java.lang.String])Unit is not defined
val tagged: J_1 = new J_1 {
def method(s: String*) { println(s) }
}
}
}
| folone/dotty | tests/pending/run/t4729/S_2.scala | Scala | bsd-3-clause | 971 |
package com.sksamuel.elastic4s.searches.queries
case class HasParentQuery(`type`: String,
query: Query,
score: Boolean,
boost: Option[Double] = None,
ignoreUnmapped: Option[Boolean] = None,
innerHit: Option[InnerHit] = None,
queryName: Option[String] = None)
extends Query {
def boost(boost: Double): HasParentQuery = copy(boost = Some(boost))
def ignoreUnmapped(ignoreUnmapped: Boolean): HasParentQuery = copy(ignoreUnmapped = Some(ignoreUnmapped))
def innerHit(innerHit: InnerHit): HasParentQuery = copy(innerHit = Some(innerHit))
def queryName(queryName: String): HasParentQuery = copy(queryName = Some(queryName))
}
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/HasParentQuery.scala | Scala | apache-2.0 | 831 |
package edu.vanderbilt.hiplab.securema
import org.jboss.netty.channel._
import org.jboss.netty.bootstrap.ServerBootstrap
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import java.util.concurrent.Executors
import java.net.InetSocketAddress
import java.math.BigInteger
import org.jboss.netty.handler.codec.serialization.{ClassResolvers, ObjectDecoder, ObjectEncoder}
import Program.EstimateNClient
/**
* Refer to README for details.
* Author: Wei Xie
* Version:
*/
object ClientService {
var client: EstimateNClient = _
class ClientHandler extends SimpleChannelUpstreamHandler {
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent) {
try {
val args = e.getMessage.asInstanceOf[Array[String]]
println("Got: " + args(0))
client.setInputs(new BigInteger("5"))
client.runOnline()
} catch { case e: Exception =>
e.printStackTrace()
}
}
}
class ServiceClient(port: Int) {
def run() = {
val bootstrap = new ServerBootstrap(
new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()
)
)
bootstrap.setPipelineFactory(new ChannelPipelineFactory {
def getPipeline: ChannelPipeline = Channels.pipeline(
new ObjectEncoder(),
new ObjectDecoder(ClassResolvers.cacheDisabled(getClass.getClassLoader)),
new ClientHandler()
)
})
bootstrap.bind(new InetSocketAddress(port))
}
}
def main(args: Array[String]): Unit = {
if (args.length < 1) {
println("ERROR: need to provide PORT number for AliceService.")
} else {
//val port = try { args(0).toInt } catch { case _: Exception => 3491 }
val port = 3491
client = new EstimateNClient(80, 80)
client.runOffline()
println("Now accepting new requests...")
new ServiceClient(port).run()
}
}
}
| XieConnect/SecureMA | src/main/scala/edu/vanderbilt/hiplab/securema/ClientService.scala | Scala | mit | 1,975 |
package com.fourseasapp.facebookads.model
import com.fourseasapp.facebookads.network.{APINode, APINodeCompanion}
/**
* Created by hailegia on 3/28/2016.
*/
case class ConnectionObject(id: String,
app_installs_tracked: Option[Boolean] = None,
is_game: Option[Boolean] = None,
name: Option[String] = None,
name_with_location_descriptor: Option[String] = None,
native_app_targeting_ids: Option[Map[String, String]] = None,
object_store_urls: Option[Map[String, String]] = None,
picture: Option[String] = None,
supported_platforms: Option[Seq[Int]] = None,
tabs: Option[Map[String, String]] = None,
`type`: Option[Int] = None,
url: Option[String] = None) extends APINode[ConnectionObject] {
type Fields = ConnectionObject.Fields
override def companion = ConnectionObject
}
object ConnectionObject extends APINodeCompanion[ConnectionObject] {
import enumeratum._
import org.cvogt.play.json.Jsonx
import play.api.libs.json.Format
val APPLICATION = 2
val DOMAIN = 7
val EVENT = 3
val PAGE = 6
val PLACE = 1
sealed trait Fields extends EnumEntry
object Fields extends Enum[Fields] with PlayJsonEnum[Fields] {
val values = findValues
case object id extends Fields
case object app_installs_tracked extends Fields
case object is_game extends Fields
case object name extends Fields
case object name_with_location_descriptor extends Fields
case object native_app_store_ids extends Fields
case object native_app_targeting_ids extends Fields
case object object_store_urls extends Fields
case object picture extends Fields
case object supported_platforms extends Fields
case object tabs extends Fields
case object `type` extends Fields
case object url extends Fields
}
override implicit val format: Format[ConnectionObject] = Jsonx.formatCaseClass[ConnectionObject]
}
| hailg/facebook-scala-ads-sdk | src/main/scala/com/fourseasapp/facebookads/model/ConnectionObject.scala | Scala | mit | 2,160 |
/*
* Copyright (c) 2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.kinesistee.config
import java.util
import awscala.dynamodbv2.{AttributeValue, DynamoDB}
import com.amazonaws.services.dynamodbv2.model.{QueryRequest, QueryResult}
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
class ConfigurationBuilderSpec extends Specification with Mockito {
val sampleGoodConfig = scala.io.Source.fromURL(getClass.getResource("/sample_self_describing_config.json")).mkString
val sampleConfig = Configuration(name = "My Kinesis Tee example",
targetStream = TargetStream("my-target-stream", None),
transformer = Some(Transformer(BuiltIn.SNOWPLOW_TO_NESTED_JSON)),
filter = None)
"A valid configuration" should {
"generate the correct case class" in {
ConfigurationBuilder.build(sampleGoodConfig) mustEqual sampleConfig
}
}
"An invalid JSON configuration" should {
"throw an exception" in {
ConfigurationBuilder.build("banana") must throwA[IllegalArgumentException]
}
}
"A configuration that doesn't match the given schema" should {
"throw an exception" in {
ConfigurationBuilder.build(
"""
|{
| "schema": "com.thing",
| "data": { "foo":"bar" }
|}
""".stripMargin) must throwA(new IllegalArgumentException("Invalid configuration"))
}
}
"Loading from DynamoDB" should {
val sampleConfigTableName = "config-table-sample-name"
"load a configuration using dynamodb and the specified table name" in {
implicit val dynamoDB = mock[DynamoDB]
val res = mock[QueryResult]
val items:util.List[java.util.Map[java.lang.String,com.amazonaws.services.dynamodbv2.model.AttributeValue]] = new util.ArrayList()
val one:util.Map[String,com.amazonaws.services.dynamodbv2.model.AttributeValue] = new util.HashMap()
one.put("id", new AttributeValue(Some("with-id")))
one.put("configuration", new AttributeValue(Some(sampleGoodConfig)))
items.add(one)
res.getItems returns items
dynamoDB.query(any[QueryRequest]) returns res
ConfigurationBuilder.build(sampleConfigTableName, "with-id") mustEqual sampleConfig
}
"give a good error if the table doesn't have a matching entry" in {
implicit val dynamoDB = mock[DynamoDB]
val res = mock[QueryResult]
val items:util.List[java.util.Map[java.lang.String,com.amazonaws.services.dynamodbv2.model.AttributeValue]] = new util.ArrayList()
res.getItems returns items
dynamoDB.query(any[QueryRequest]) returns res
ConfigurationBuilder.build(sampleConfigTableName, "with-id") must throwA(new IllegalStateException(s"No configuration in table '$sampleConfigTableName' for lambda 'with-id'!"))
}
"give a good error if the table doesn't have the right keys (id and configuration)" in {
implicit val dynamoDB = mock[DynamoDB]
val res = mock[QueryResult]
val items:util.List[java.util.Map[java.lang.String,com.amazonaws.services.dynamodbv2.model.AttributeValue]] = new util.ArrayList()
val one:util.Map[String,com.amazonaws.services.dynamodbv2.model.AttributeValue] = new util.HashMap()
one.put("id", new AttributeValue(Some("with-id")))
one.put("this-is-not-config", new AttributeValue(Some("abc")))
items.add(one)
res.getItems returns items
dynamoDB.query(any[QueryRequest]) returns res
ConfigurationBuilder.build(sampleConfigTableName, "with-id") must throwA(new IllegalStateException(s"Config table '${sampleConfigTableName}' for lambda 'with-id' is missing a 'configuration' field!"))
}
"do something reasonable if ddb errors" in {
implicit val dynamoDB = mock[DynamoDB]
val exception = new IllegalArgumentException("Query exploded")
dynamoDB.query(any[QueryRequest]) throws exception
// NB IllegalArgumentException is rethrown as IllegalStateException
ConfigurationBuilder.build(sampleConfigTableName, "with-id") must throwA[IllegalStateException](message = "Query exploded")
}
}
}
| snowplow/kinesis-tee | src/test/scala/com/snowplowanalytics/kinesistee/config/ConfigurationBuilderSpec.scala | Scala | apache-2.0 | 4,846 |
package controllers
import javax.inject._
import play.api._
import play.api.mvc._
import services.ApplicationService
import play.api.libs.json.Json
import services.users.AuthenticationService
import scala.collection.JavaConverters._
import services.users.UserService
import play.api.libs.json.JsError
import services.PaymentMethodsService
import controllers.input.PaymentMethodInput
@Singleton
class PaymentMethodsController @Inject() (authenticationService: AuthenticationService, usersService: UserService, paymentMethodsService: PaymentMethodsService)
extends BaseController(authenticationService, usersService) {
def index = SecuredAction { request =>
ok(paymentMethodsService.getAll())
}
def get(id: Int) = SecuredAction { request =>
paymentMethodsService.get(id).map { paymentMethod =>
ok(paymentMethod)
}.getOrElse {
notFound("Payment method does not exist")
}
}
def save(id: Int) = SecuredAction(BodyParsers.parse.json) { request =>
val paymentMethodInputResult = request.body.validate[PaymentMethodInput]
paymentMethodInputResult.fold(
errors => {
badRequest("JSON parsing error: " + JsError.toJson(errors))
},
paymentMethodInput => {
paymentMethodsService.validationErrorOnUpdate(paymentMethodInput).map { error =>
badRequest(error)
}.getOrElse {
paymentMethodsService.get(id).map { paymentMethod =>
ok(paymentMethodsService.update(paymentMethod, paymentMethodInput))
}.getOrElse {
notFound("Payment method does not exist")
}
}
}
)
}
def create = SecuredAction(BodyParsers.parse.json) { request =>
val paymentMethodInputResult = request.body.validate[PaymentMethodInput]
paymentMethodInputResult.fold(
errors => {
badRequest("JSON parsing error: " + JsError.toJson(errors))
},
paymentMethodInput => {
paymentMethodsService.validationErrorOnCreate(paymentMethodInput).map { error =>
badRequest(error)
}.getOrElse {
ok(paymentMethodsService.create(paymentMethodInput))
}
}
)
}
def delete(id: Int) = SecuredAction { request =>
paymentMethodsService.get(id).map { paymentMethod =>
ok(paymentMethodsService.delete(paymentMethod))
}.getOrElse {
notFound("Payment method does not exist")
}
}
} | marcin-lawrowski/felicia | app/controllers/PaymentMethodsController.scala | Scala | gpl-3.0 | 2,265 |
/*
* sbt-haxe
* Copyright 2014 深圳岂凡网络有限公司 (Shenzhen QiFun Network Corp., LTD)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.microbuilder.sbtHaxe
import sbt._
import java.io.File
import scala.util.parsing.json.JSONObject
trait HaxeKeys {
final val haxeMacros = SettingKey[Seq[String]]("haxe-macros", "--macro command-line options for Haxe compiler.")
final val haxeOptions = SettingKey[Seq[String]]("haxe-options", "Additional command-line options for Haxe compiler.")
final val haxeCommand = SettingKey[String]("haxe-command", "The Haxe executable.")
final val haxelibCommand = SettingKey[String]("haxelib-command", "The haxelib executable")
final val haxePlatformName = SettingKey[String]("haxe-platform-name", "The name of the haxe platform")
final val haxe = TaskKey[Seq[File]]("haxe", "Convert Haxe source code to target source code.")
final val haxeOutputPath = SettingKey[Option[File]]("haxe-output-path", "The path where the Haxe code will be compiled to.")
final val haxeXmls = TaskKey[Seq[File]]("haxe-xmls", "Generate Haxe xmls.")
final val doxRegex = TaskKey[Seq[String]]("dox-regex", "The Regex that used to generate Haxe documentation.")
final val haxeXml = TaskKey[Seq[File]]("haxeXml", "Generate Haxe xml.")
type DependencyVersion = com.thoughtworks.microbuilder.sbtHaxe.DependencyVersion
final val DependencyVersion = com.thoughtworks.microbuilder.sbtHaxe.DependencyVersion
final val haxelibContributors = SettingKey[Seq[String]]("haxelib-contributors", "Contributors in haxelib.json")
final val haxelibSubmitUsername = SettingKey[String]("haxelib-submit-username", "The username for `haxelib submit`")
final val haxelibSubmitPassword = SettingKey[String]("haxelib-submit-password", "The password for `haxelib submit`")
final val haxelibReleaseNote = SettingKey[String]("haxelib-release-note", "The release note in haxelib.json")
final val haxelibTags = SettingKey[Seq[String]]("haxelib-tags", "Tags in haxelib.json")
final val haxelibDependencies = SettingKey[Map[String, DependencyVersion]]("haxelib-dependencies", "Additional dependencies in haxelib.json")
final val haxelibInstallDependencies = TaskKey[Unit]("haxelib-install-dependencies", "Install additional dependencies in haxelib.json")
final val haxelibJson = SettingKey[JSONObject]("haxelib-json", "The file content of haxelib.json")
final val makeHaxelibJson = TaskKey[File]("make-haxelib-json", "Create haxelib.json")
final val haxeExtraParams = SettingKey[Seq[String]]("haxe-extra-params", "The extra haxe flags in extraParams.hxml")
final val makeExtraParamsHxml = TaskKey[Option[File]]("make-extra-params-hxml", "Create extraParams.hxml")
final val haxeNativeDependencyOptions = TaskKey[Seq[String]]("haxe-native-dependency-options", "-java-lib or -net-lib options for Haxe compiler.")
final val isLibrary = SettingKey[Boolean]("is-library", "Indicate whether the current Haxe project is a library or a executable. The Haxe compiler generate DLL for C# target, static library for C++ target and SWC for Flash target, if `isLibrary` is true.")
}
final object HaxeKeys extends HaxeKeys
| ThoughtWorksInc/sbt-haxe | src/main/scala/com/thoughtworks/microbuilder/sbtHaxe/HaxeKeys.scala | Scala | apache-2.0 | 3,701 |
package loom.models.admin
import scala.language.implicitConversions
import scala.collection.JavaConversions._
import scala.beans.BeanProperty
import play.api.db.DB
import play.api.Play.current
import play.api.Logger
import anorm.SQL
import anorm.~
import anorm.SqlParser._
import java.util.Date
import loom.utils.Memcached
import loom.Global
import loom.models._
/**
* Admin (1) -> (N) Role (1) -> (N) APermission
*/
/**
* user & role
*/
case class AdminRole(userId: Long, roleIds: List[Long]) {
private lazy val rolesSet = roleIds.toSet
def contains(roleId: Long) = rolesSet.contains(roleId)
}
object AdminRole {
def create(userId: Long, roleIds: List[Long]) = {
DB.withConnection { implicit conn =>
SQL(
"""insert into opt_admin_role VALUES ({userId}, {roleIds})""").on(
'userId -> userId,
'roleIds -> roleIds.mkString(Model.semicolon)
).executeInsert()
}
}
def _update(userId: Long, roleIds: List[Long]) = {
DB.withConnection { implicit conn =>
SQL( """update opt_admin_role set roleIds = {roleIds} where userId = {userId}""").on(
'userId -> userId,
'roleIds -> roleIds.mkString(Model.semicolon)
).executeUpdate()
}
}
def update(userId: Long, roleIds: List[Long]) = {
cleanCache(userId)
_update(userId, roleIds)
}
private def _findOne(userId: Long): Option[AdminRole] = {
DB.withConnection { implicit conn =>
val ret = SQL(
"""select * from opt_admin_role where userId = {userId}"""
).on(
'userId -> userId
).as(
{
long("userId") ~ str("roleIds") map {
case uId ~ rIds => {
val roleIds = if (rIds.length == 0) Nil else rIds.split(Model.semicolon).map(_.toLong).toList
AdminRole(uId, roleIds)
}
}
}.singleOpt
)
ret
}
}
private def cleanCache(userId: Long) {
Memcached.delete(userRoleCacheKey(userId))
}
private def userRoleCacheKey(userId: Long) = {
Global.cacheNameSpace + "m/AdminRole/0/" + userId
}
def findOne(userId: Long): Option[AdminRole] = {
val ret: AdminRole = Memcached.getOrElse(userRoleCacheKey(userId)) {
_findOne(userId).getOrElse(null)
}
Option(ret)
}
}
/**
* opt role info.
*/
case class Role(@BeanProperty id: Long, @BeanProperty name: String, permissions: List[APermission.Type], status: Int, @BeanProperty visible: Boolean, createDate: Date) {
def enable() = { status == Role.Status_Enable }
def disable() = { status == Role.Status_Disable }
private lazy val permissionsSet = Set() ++ permissions
def hasPermissions(psVal: APermission.Type) = {
permissionsSet.contains(psVal)
}
def getPermissions(): java.util.List[APermission.Type] = {
permissions
}
}
object Role {
val Admin_Role_Id = 1
val Status_Disable = 1
val Status_Enable = 2
val nameMinLen = 2
val nameMaxLen = 8
/** list cache timestamp. update when create role */
@volatile private var listTimes = System.currentTimeMillis()
// -- Parsers
/**
* Parse a Role from a ResultSet
*/
private val simple = {
get[Long]("id") ~
get[String]("name") ~
get[String]("permissions") ~
get[Int]("status") ~
get[Boolean]("visiable") ~
get[Date]("createDate") map { case id ~ name ~ permissions ~ status ~ visiable ~ createDate =>
val pslist = permissions.split(Model.semicolon).map { pid =>
try {
APermission.get(pid.toInt)
} catch {
case e: Exception => Logger.error("roleId %s PVal %s".format(id, e.getMessage))
null
}
}.filter(_ != null).toList
Role(id, name, pslist, status, visiable, createDate)
}
}
/**
*
* @param name
* @param psList
* @return (success: Boolean, role: Role, i18nMsg: String)
*/
def create(name: String, psList: List[APermission.PVal]): (Boolean, Role, String) = {
val permissions = psList.map(_.id).mkString(Model.semicolon)
val status = Role.Status_Enable
val visiable = true
val createDate = new java.util.Date
val id: Option[Long] = DB.withConnection { implicit conn =>
SQL(
"""insert into opt_role(name, permissions, status, visiable,
| createDate)
| values ({name}, {permissions}, {status}, {visiable},
| {createDate})""".stripMargin).on(
'name -> name,
'permissions -> permissions,
'status -> status,
'visiable -> visiable,
'createDate -> createDate
).executeInsert()
}
id match {
case Some(i) =>
listTimes = System.currentTimeMillis()
val r = Role(i, name, psList, status, visiable, createDate)
(true, r, "common.create.success")
case None => (false, null, "common.error.unknown")
}
}
private def _findOne(roleId: Long): Option[Role] = {
DB.withConnection {
implicit c =>
val ret = SQL( """select * from opt_role where id = {id}"""
).on(
'id -> roleId
).as(
simple.singleOpt
)
ret
}
}
def findOne(roleId: Long): Option[Role] = {
val role = Memcached.getOrElse(roleCacheKey(roleId)) {
_findOne(roleId).getOrElse(null)
}
Option(role)
}
def roles(accountId: Long): List[Option[Role]] = {
val ur = AdminRole.findOne(accountId)
ur match {
case Some(userRole) => userRole.roleIds.map(rId => Role.findOne(rId))
case None => Nil
}
}
private def _updateRole(role: Role) = {
val pms = role.permissions.map(_.id).mkString(Model.semicolon)
DB.withConnection { implicit conn =>
SQL( """update opt_role set name = {name},
| permissions = {permissions}, status = {status}
| where id = {id}
""".stripMargin).on(
'name -> role.name,
'permissions -> pms,
'status -> role.status,
'id -> role.id
).executeUpdate()
}
}
private def updateRole(role: Role) = {
cleanCache(role.id)
_updateRole(role)
}
def updateRole(role: Role, name: String, permissions: List[APermission.PVal]) {
val newr = role.copy(name = name, permissions = permissions)
updateRole(newr)
}
private def roleCacheKey(roleId: Long) = {
Global.cacheNameSpace + "m/Role/0/" + roleId
}
private def permissionsCacheKey(userId: Long) = {
Global.cacheNameSpace + "m/APermission/0/" + userId
}
private def countCacheKey() = {
Global.cacheNameSpace + "m/Role/listcount/0/ts/" + listTimes
}
private def listCacheKey(pageNo: Int, pageSize: Int) = {
Global.cacheNameSpace + "m/Role/list/0/pn/" + pageNo + "/ps/" + pageSize + "/ts/" + listTimes
}
private def cleanCache(roleId: Long) {
Memcached.delete(roleCacheKey(roleId))
}
def permissions(userId: Long): List[APermission.Type] = {
Memcached.getOrElse(permissionsCacheKey(userId)) {
val rs = Role.roles(userId).flatten
rs.flatMap(role => role.permissions)
}
}
private def _list(page: PageRequest): PageImpl[Long] = {
val limit = if (Global.isDbH2) {
"order by id desc limit {rowcount} offset {offset}"
} else if (Global.isDbMysql) {
"order by id desc limit {offset} , {rowcount}"
} else {
""
}
val sql = "select id from opt_role " + limit
val list: List[Long] = DB.withConnection { implicit conn =>
SQL(sql).on(
'offset -> page.offset,
'rowcount -> page.pageSize).as(long("id").*)
}
val c: Long = Memcached.getOrElse(countCacheKey()) {
DB.withConnection { implicit conn =>
SQL("select count(*) from opt_role").as(scalar[Long].single)
}
}
PageImpl(page.pageNo, page.pageSize, list, c)
}
def list(page: PageRequest): PageImpl[Role] = {
val idsPage = Memcached.getOrElse(listCacheKey(page.pageNo, page.pageSize)) {
_list(page)
}
val rlist = idsPage.result.map(id => findOne(id)).flatten
PageImpl(page.pageNo, page.pageSize, rlist, idsPage.total)
}
def toggleStatus(roleId: Long): (Boolean, String, String) = {
findOne(roleId) match {
case Some(role) =>
val newrole = if (role.disable())
("enable", role.copy(status = Role.Status_Enable))
else
("disable", role.copy(status = Role.Status_Disable))
updateRole(newrole._2)
(true, newrole._1, "common.ok")
case None => (false, "none", "common.not.found")
}
}
}
| chaosky/loom | app/loom/models/admin/Role.scala | Scala | mit | 8,500 |
package rocks.muki.example
import java.time.Instant
import io.circe._
/**
* You can put custom circe codecs here so they will be picked up as long as you put this into the
* `graphqlCodegenImports` sbt setting.
*/
object codecs {
/**
* This codec encodes `java.time.Instant` as unix epoch millis instead of iso8601 string (circe default).
*
* This has to match the server-side implementation for a custom "Instant" type.
*/
implicit val instantCodec: Codec[Instant] = Codec.from(
Decoder[Long].map(Instant.ofEpochMilli),
Encoder[Long].contramap(_.toEpochMilli)
)
}
| muuki88/sbt-graphql | test-project/client/src/main/scala/rocks/muki/example/codecs.scala | Scala | apache-2.0 | 605 |
/*
* Copyright (C) 2017 LREN CHUV for Human Brain Project
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.chuv.lren.woken.api.swagger
import javax.ws.rs.{ GET, POST, Path }
import akka.http.scaladsl.server.{ Directives, Route }
import ch.chuv.lren.woken.messages.query._
import io.swagger.v3.oas.annotations.Operation
import io.swagger.v3.oas.annotations.media.{ Content, Schema }
import io.swagger.v3.oas.annotations.parameters.RequestBody
import io.swagger.v3.oas.annotations.responses.ApiResponse
// This trait documents the API, tries not to pollute the code with annotations
/**
* Operations for data mining
*/
@Path("/mining")
trait MiningServiceApi extends Directives {
import queryProtocol._
@Path("/job")
@Operation(
summary = "Run a data mining job",
description = "Run a data mining job for a single algorithm",
tags = Array("api"),
requestBody = new RequestBody(
description = "Data mining query to execute",
content = Array(
new Content(mediaType = "application/json",
schema = new Schema(implementation = classOf[MiningQuery]))
),
required = true
),
responses = Array(
new ApiResponse(
responseCode = "200",
description = "Success - Result of the data mining query",
content = Array(
new Content(mediaType = "application/json",
schema = new Schema(implementation = classOf[QueryResult]))
)
),
new ApiResponse(responseCode = "401",
description = "Authentication required",
content = Array(new Content(mediaType = "text/plain"))),
new ApiResponse(responseCode = "403",
description = "Authentication failed",
content = Array(new Content(mediaType = "text/plain"))),
new ApiResponse(responseCode = "500",
description = "Internal server error",
content = Array(new Content(mediaType = "text/plain")))
)
)
@POST
def runMiningJob: Route
@Path("/experiment")
@Operation(
summary = "Run a data mining experiment",
description = "Run a data mining experiment and return id",
tags = Array("api"),
requestBody = new RequestBody(
description = "Experiment to execute",
content = Array(
new Content(mediaType = "application/json",
schema = new Schema(implementation = classOf[ExperimentQuery]))
),
required = true
),
responses = Array(
new ApiResponse(
responseCode = "200",
description = "Success - Result of the experiment query",
content = Array(
new Content(mediaType = "application/json",
schema = new Schema(implementation = classOf[QueryResult]))
)
),
new ApiResponse(responseCode = "401",
description = "Authentication required",
content = Array(new Content(mediaType = "text/plain"))),
new ApiResponse(responseCode = "403",
description = "Authentication failed",
content = Array(new Content(mediaType = "text/plain"))),
new ApiResponse(responseCode = "500",
description = "Internal server error",
content = Array(new Content(mediaType = "text/plain")))
)
)
def runExperiment: Route
@Path("/algorithms")
@Operation(
summary = "Get complete catalog of mining methods (algorithms)",
description = "Get catalog containing available mining methods (algorithms)",
tags = Array("api"),
responses = Array(
new ApiResponse(
responseCode = "200",
description = "Success - returns the list of algorithms",
content = Array(
new Content(mediaType = "application/json",
schema = new Schema(implementation = classOf[MethodsResponse]))
)
),
new ApiResponse(responseCode = "401",
description = "Authentication required",
content = Array(new Content(mediaType = "text/plain"))),
new ApiResponse(responseCode = "403",
description = "Authentication failed",
content = Array(new Content(mediaType = "text/plain"))),
new ApiResponse(responseCode = "500",
description = "Internal server error",
content = Array(new Content(mediaType = "text/plain")))
)
)
@GET
def listAlgorithms: Route
}
| LREN-CHUV/workflow | src/main/scala/ch/chuv/lren/woken/api/swagger/MiningServiceApi.scala | Scala | apache-2.0 | 5,213 |
import sbt._
import Keys._
import com.jsuereth.sbtpgp.PgpKeys
import sbtrelease.ReleasePlugin.autoImport._
import sbtrelease.ReleaseStateTransformations._
object Publish {
pomExtra in Global := {
<developers>
<developer>
<id>ceedubs</id>
<name>Cody Allen</name>
<email>ceedubs@gmail.com</email>
</developer>
<developer>
<id>kailuowang</id>
<name>Kailuo Wang</name>
<email>kailuo.wang@gmail.com</email>
</developer>
</developers>
}
val publishingSettings = Seq(
ThisBuild / organization := "com.iheart",
publishMavenStyle := true,
licenses := Seq("MIT" -> url("http://www.opensource.org/licenses/mit-license.html")),
homepage := Some(url("http://iheartradio.github.io/ficus")),
scmInfo := Some(
ScmInfo(
url("https://github.com/iheartradio/ficus"),
"git@github.com:iheartradio/ficus.git",
Some("git@github.com:iheartradio/ficus.git")
)
),
pomIncludeRepository := { _ => false },
Test / publishArtifact := false,
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("Snapshots" at nexus + "content/repositories/snapshots")
else
Some("Releases" at nexus + "service/local/staging/deploy/maven2")
},
pomExtra := (
<developers>
<developer>
<id>ceedubs</id>
<name>Cody Allen</name>
<email>ceedubs@gmail.com</email>
</developer>
<developer>
<id>kailuowang</id>
<name>Kailuo Wang</name>
<email>kailuo.wang@gmail.com</email>
</developer>
</developers>
),
releaseCrossBuild := true,
releasePublishArtifactsAction := PgpKeys.publishSigned.value,
releaseProcess := Seq[ReleaseStep](
checkSnapshotDependencies,
inquireVersions,
runClean,
runTest,
setReleaseVersion,
commitReleaseVersion,
tagRelease,
publishArtifacts,
setNextVersion,
commitNextVersion,
ReleaseStep(action = Command.process("sonatypeReleaseAll", _)),
pushChanges
)
)
val settings = publishingSettings
}
| mdedetrich/ficus | project/Publish.scala | Scala | mit | 2,376 |
object test_rational {
def main(args: Array[String]) {
// Put code here
}
}
| LoyolaChicagoBooks/introcs-scala-examples | test_rational/test_rational.scala | Scala | gpl-3.0 | 84 |
package utils
import akka.actor.ActorRef
/**
* Created by marcin on 6/17/17.
*/
object Jade {
def nodeNames(implicit nodeMap: Map[String, ActorRef]): (String, List[String]) = "nodeNames" -> nodeMap.keys.toList
def error(implicit reason: String): (String, String) = "error" -> reason
def servletName(implicit servletName: String): (String, String) = "servletName" -> servletName
def node(nodeName: String): (String, String) = "nodeName" -> nodeName
}
| mprzewie/cloudia-client | src/main/scala/utils/Jade.scala | Scala | mit | 467 |
/*
* Copyright 2012-2014 Joscha Feth, Steve Chaloner, Anton Fedchenko
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.kompot.play2sec.authentication.service
import play.api.mvc.Request
import com.github.kompot.play2sec.authentication.user.{AuthUserIdentity, AuthUser}
import scala.concurrent.Future
trait UserService {
type UserClass
/**
* Saves auth provider/id combination to a local user
* @param authUser
* @return
*/
def save(authUser: AuthUser): Future[Option[UserClass]]
/**
* Returns the local identifying object if the auth provider/id combination
* has been linked to a local user account already or null if not.
* This gets called on any login to check whether the session user still
* has a valid corresponding local user
*
* @param identity
* @return
*/
def getByAuthUserIdentity(identity: AuthUserIdentity): Future[Option[UserClass]]
/**
* Merges two user accounts after a login with an auth provider/id that
* is linked to a different account than the login from before
* Returns the user to generate the session information from
*
* @param newUser
* @param oldUser
* @return
*/
def merge(newUser: AuthUser, oldUser: Option[AuthUser]): Future[AuthUser]
/**
* Links a new account to an existing local user.
* Returns the auth user to log in with
*
* @param oldUser
* @param newUser
* @return
*/
def link(oldUser: Option[AuthUser], newUser: AuthUser): Future[AuthUser]
/**
* Unlinks account from an existing local user.
* Returns the auth user to log in with
*
* @param currentUser
* @param provider
* @return
*/
def unlink(currentUser: Option[AuthUser], provider: String): Future[Option[AuthUser]]
/**
* Gets called when a user logs in - you might make profile updates here with
* data coming from the login provider or bump a last-logged-in date
*
* @param knownUser
* @return
*/
def whenLogin[A](knownUser: AuthUser, request: Request[A]): AuthUser
}
| kompot/play2sec | app/com/github/kompot/play2sec/authentication/service/UserService.scala | Scala | apache-2.0 | 2,556 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.parquet
import java.net.URI
import java.util.TimeZone
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate}
import org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GROUPS
import org.apache.parquet.hadoop.{ParquetFileReader, ParquetInputFormat, ParquetInputSplit, ParquetRecordReader}
import org.apache.spark.TaskContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader}
import org.apache.spark.sql.execution.datasources.{PartitionedFile, RecordReaderIterator}
import org.apache.spark.sql.execution.datasources.parquet._
import org.apache.spark.sql.execution.datasources.v2._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.{AtomicType, StructType}
import org.apache.spark.sql.vectorized.ColumnarBatch
import org.apache.spark.util.SerializableConfiguration
/**
* A factory used to create Parquet readers.
*
* @param sqlConf SQL configuration.
* @param broadcastedConf Broadcast serializable Hadoop Configuration.
* @param dataSchema Schema of Parquet files.
* @param readDataSchema Required schema of Parquet files.
* @param partitionSchema Schema of partitions.
* @param filters Filters to be pushed down in the batch scan.
*/
case class ParquetPartitionReaderFactory(
sqlConf: SQLConf,
broadcastedConf: Broadcast[SerializableConfiguration],
dataSchema: StructType,
readDataSchema: StructType,
partitionSchema: StructType,
filters: Array[Filter]) extends FilePartitionReaderFactory with Logging {
private val isCaseSensitive = sqlConf.caseSensitiveAnalysis
private val resultSchema = StructType(partitionSchema.fields ++ readDataSchema.fields)
private val enableOffHeapColumnVector = sqlConf.offHeapColumnVectorEnabled
private val enableVectorizedReader: Boolean = sqlConf.parquetVectorizedReaderEnabled &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
private val enableRecordFilter: Boolean = sqlConf.parquetRecordFilterEnabled
private val timestampConversion: Boolean = sqlConf.isParquetINT96TimestampConversion
private val capacity = sqlConf.parquetVectorizedReaderBatchSize
private val enableParquetFilterPushDown: Boolean = sqlConf.parquetFilterPushDown
private val pushDownDate = sqlConf.parquetFilterPushDownDate
private val pushDownTimestamp = sqlConf.parquetFilterPushDownTimestamp
private val pushDownDecimal = sqlConf.parquetFilterPushDownDecimal
private val pushDownStringStartWith = sqlConf.parquetFilterPushDownStringStartWith
private val pushDownInFilterThreshold = sqlConf.parquetFilterPushDownInFilterThreshold
override def supportColumnarReads(partition: InputPartition): Boolean = {
sqlConf.parquetVectorizedReaderEnabled && sqlConf.wholeStageEnabled &&
resultSchema.length <= sqlConf.wholeStageMaxNumFields &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
}
override def buildReader(file: PartitionedFile): PartitionReader[InternalRow] = {
val reader = if (enableVectorizedReader) {
createVectorizedReader(file)
} else {
createRowBaseReader(file)
}
val fileReader = new PartitionReader[InternalRow] {
override def next(): Boolean = reader.nextKeyValue()
override def get(): InternalRow = reader.getCurrentValue.asInstanceOf[InternalRow]
override def close(): Unit = reader.close()
}
new PartitionReaderWithPartitionValues(fileReader, readDataSchema,
partitionSchema, file.partitionValues)
}
override def buildColumnarReader(file: PartitionedFile): PartitionReader[ColumnarBatch] = {
val vectorizedReader = createVectorizedReader(file)
vectorizedReader.enableReturningBatches()
new PartitionReader[ColumnarBatch] {
override def next(): Boolean = vectorizedReader.nextKeyValue()
override def get(): ColumnarBatch =
vectorizedReader.getCurrentValue.asInstanceOf[ColumnarBatch]
override def close(): Unit = vectorizedReader.close()
}
}
private def buildReaderBase[T](
file: PartitionedFile,
buildReaderFunc: (
ParquetInputSplit, InternalRow, TaskAttemptContextImpl, Option[FilterPredicate],
Option[TimeZone]) => RecordReader[Void, T]): RecordReader[Void, T] = {
val conf = broadcastedConf.value.value
val filePath = new Path(new URI(file.filePath))
val split =
new org.apache.parquet.hadoop.ParquetInputSplit(
filePath,
file.start,
file.start + file.length,
file.length,
Array.empty,
null)
lazy val footerFileMetaData =
ParquetFileReader.readFooter(conf, filePath, SKIP_ROW_GROUPS).getFileMetaData
// Try to push down filters when filter push-down is enabled.
val pushed = if (enableParquetFilterPushDown) {
val parquetSchema = footerFileMetaData.getSchema
val parquetFilters = new ParquetFilters(parquetSchema, pushDownDate, pushDownTimestamp,
pushDownDecimal, pushDownStringStartWith, pushDownInFilterThreshold, isCaseSensitive)
filters
// Collects all converted Parquet filter predicates. Notice that not all predicates can be
// converted (`ParquetFilters.createFilter` returns an `Option`). That's why a `flatMap`
// is used here.
.flatMap(parquetFilters.createFilter)
.reduceOption(FilterApi.and)
} else {
None
}
// PARQUET_INT96_TIMESTAMP_CONVERSION says to apply timezone conversions to int96 timestamps'
// *only* if the file was created by something other than "parquet-mr", so check the actual
// writer here for this file. We have to do this per-file, as each file in the table may
// have different writers.
// Define isCreatedByParquetMr as function to avoid unnecessary parquet footer reads.
def isCreatedByParquetMr: Boolean =
footerFileMetaData.getCreatedBy().startsWith("parquet-mr")
val convertTz =
if (timestampConversion && !isCreatedByParquetMr) {
Some(DateTimeUtils.getTimeZone(conf.get(SQLConf.SESSION_LOCAL_TIMEZONE.key)))
} else {
None
}
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
// Try to push down filters when filter push-down is enabled.
// Notice: This push-down is RowGroups level, not individual records.
if (pushed.isDefined) {
ParquetInputFormat.setFilterPredicate(hadoopAttemptContext.getConfiguration, pushed.get)
}
val reader =
buildReaderFunc(split, file.partitionValues, hadoopAttemptContext, pushed, convertTz)
reader.initialize(split, hadoopAttemptContext)
reader
}
private def createRowBaseReader(file: PartitionedFile): RecordReader[Void, InternalRow] = {
buildReaderBase(file, createRowBaseParquetReader)
}
private def createRowBaseParquetReader(
split: ParquetInputSplit,
partitionValues: InternalRow,
hadoopAttemptContext: TaskAttemptContextImpl,
pushed: Option[FilterPredicate],
convertTz: Option[TimeZone]): RecordReader[Void, InternalRow] = {
logDebug(s"Falling back to parquet-mr")
val taskContext = Option(TaskContext.get())
// ParquetRecordReader returns InternalRow
val readSupport = new ParquetReadSupport(convertTz, enableVectorizedReader = false)
val reader = if (pushed.isDefined && enableRecordFilter) {
val parquetFilter = FilterCompat.get(pushed.get, null)
new ParquetRecordReader[InternalRow](readSupport, parquetFilter)
} else {
new ParquetRecordReader[InternalRow](readSupport)
}
val iter = new RecordReaderIterator(reader)
// SPARK-23457 Register a task completion listener before `initialization`.
taskContext.foreach(_.addTaskCompletionListener[Unit](_ => iter.close()))
reader
}
private def createVectorizedReader(file: PartitionedFile): VectorizedParquetRecordReader = {
val vectorizedReader = buildReaderBase(file, createParquetVectorizedReader)
.asInstanceOf[VectorizedParquetRecordReader]
vectorizedReader.initBatch(partitionSchema, file.partitionValues)
vectorizedReader
}
private def createParquetVectorizedReader(
split: ParquetInputSplit,
partitionValues: InternalRow,
hadoopAttemptContext: TaskAttemptContextImpl,
pushed: Option[FilterPredicate],
convertTz: Option[TimeZone]): VectorizedParquetRecordReader = {
val taskContext = Option(TaskContext.get())
val vectorizedReader = new VectorizedParquetRecordReader(
convertTz.orNull, enableOffHeapColumnVector && taskContext.isDefined, capacity)
val iter = new RecordReaderIterator(vectorizedReader)
// SPARK-23457 Register a task completion listener before `initialization`.
taskContext.foreach(_.addTaskCompletionListener[Unit](_ => iter.close()))
logDebug(s"Appending $partitionSchema $partitionValues")
vectorizedReader
}
}
| jkbradley/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetPartitionReaderFactory.scala | Scala | apache-2.0 | 10,198 |
package org.jetbrains.plugins.scala
package findUsages.factory
import com.intellij.find.findUsages.JavaFindUsagesOptions
import com.intellij.openapi.project.Project
/**
* Nikolay.Tropin
* 2014-09-15
*/
class ScalaMemberFindUsagesOptions(project: Project) extends JavaFindUsagesOptions(project) {
isSearchForTextOccurrences = false
}
class ScalaParameterFindUsagesOptions(project: Project) extends JavaFindUsagesOptions(project) {
isSearchForTextOccurrences = false
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/findUsages/factory/ScalaMemberFindUsagesOptions.scala | Scala | apache-2.0 | 477 |
package com.tajpure.scheme.compiler.value
class FunType {
} | tajpure/SoScheme | src/main/scala/com/tajpure/scheme/compiler/value/FunType.scala | Scala | gpl-3.0 | 61 |
package lib.generator
import com.bryzek.apidoc.generator.v0.models.Generator
case class CodeGenTarget(metaData: Generator, status: Status, codeGenerator: Option[CodeGenerator])
| krschultz/apidoc-generator | lib/src/main/scala/generator/CodeGenTarget.scala | Scala | mit | 180 |
package rere.sasl.scram
import java.text.Normalizer
import rere.sasl.util.BinaryString
package object crypto {
val CLIENT_KEY = "Client Key"
val SERVER_KEY = "Server Key"
// Naive implementation, but it's better than nothing
def normalize(str: String): String = {
Normalizer.normalize(str, Normalizer.Form.NFKC)
}
def xor(a: BinaryString, b: BinaryString): Either[String, BinaryString] = {
if (a.length != b.length) {
Left("Mismatch of keys length.")
} else {
val result = new Array[Byte](a.length)
var i = result.length - 1
while (i >= 0) {
result(i) = (a(i) ^ b(i)).toByte
i -= 1
}
Right(result)
}
}
}
| pbaun/rere | modules/sasl/src/main/scala/rere/sasl/scram/crypto/package.scala | Scala | apache-2.0 | 691 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.